From ed71f2c9726539d00db6df91234e21ccf52dce52 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 13 May 2025 18:04:31 +0200 Subject: [PATCH 001/509] export from google3 --- .allstar/BUILD.bazel | 17 ++ ortools/lp_data/README.md | 0 ortools/pdlp/primal_dual_hybrid_gradient.cc | 219 ++++++++++++++---- .../pdlp/primal_dual_hybrid_gradient_test.cc | 149 +++++++++++- ortools/pdlp/solvers.proto | 13 ++ ortools/util/time_limit.h | 12 +- 6 files changed, 347 insertions(+), 63 deletions(-) create mode 100644 .allstar/BUILD.bazel create mode 100644 ortools/lp_data/README.md diff --git a/.allstar/BUILD.bazel b/.allstar/BUILD.bazel new file mode 100644 index 0000000000..c6f182d9ae --- /dev/null +++ b/.allstar/BUILD.bazel @@ -0,0 +1,17 @@ +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exports_files( + glob(["**"]), + visibility = ["//ortools/open_source:__subpackages__"], +) diff --git a/ortools/lp_data/README.md b/ortools/lp_data/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ortools/pdlp/primal_dual_hybrid_gradient.cc b/ortools/pdlp/primal_dual_hybrid_gradient.cc index 2aaf59241e..d166ef7700 100644 --- a/ortools/pdlp/primal_dual_hybrid_gradient.cc +++ b/ortools/pdlp/primal_dual_hybrid_gradient.cc @@ -11,6 +11,31 @@ // See the License for the specific language governing permissions and // limitations under the License. +// We solve a QP, which we call the "original QP", by applying preprocessing +// including presolve and rescaling, which produces a new QP that we call the +// "working QP". We then solve the working QP using the Primal Dual Hybrid +// Gradient algorithm (PDHG). The optimality criteria are evaluated using the +// original QP. There are three main modules in this file: +// * The free function `PrimalDualHybridGradient()`, which is the user API, and +// is responsible for input validation that doesn't use +// ShardedQuadraticProgram, creating a `PreprocessSolver`, and calling +// `PreprocessSolver::PreprocessAndSolve()`. +// * The class `PreprocessSolver`, which is responsible for everything that +// touches the original QP, including input validation that uses +// ShardedQuadraticProgram, the preprocessing, converting solutions to the +// working QP back to solutions to the original QP, and termination checks. It +// also creates a `Solver` object and calls `Solver::Solve()`. +// * The class `Solver`, which is responsible for everything that only touches +// the working QP. It keeps a pointer to `PreprocessSolver` and calls methods +// on it when it needs access to the original QP, e.g. termination checks. +// When feasibility polishing is enabled the main solve's `Solver` object +// creates additional `Solver` objects periodically to do the feasibility +// polishing (in `Solver::TryPrimalPolishing()` and +// `Solver::TryDualPolishing()`). +// The main reason for having two separate classes `PreprocessSolver` and +// `Solver` is the fact that feasibility polishing mode uses a single +// `PreprocessSolver` object with multiple `Solver` objects. + #include "ortools/pdlp/primal_dual_hybrid_gradient.h" #include @@ -313,6 +338,7 @@ SolverResult ConstructSolverResult(VectorXd primal_solution, .solve_log = std::move(solve_log)}; } +// See comment at top of file. class PreprocessSolver { public: // Assumes that `qp` and `params` are valid. @@ -505,6 +531,7 @@ class PreprocessSolver { IterationStatsCallback iteration_stats_callback_; }; +// See comment at top of file. class Solver { public: // `preprocess_solver` should not be nullptr, and the `PreprocessSolver` @@ -556,6 +583,15 @@ class Solver { // cause infinite and NaN values. constexpr static double kDivergentMovement = 1.0e100; + // The total number of iterations in feasibility polishing is at most + // `4 * iterations_completed_ / kFeasibilityIterationFraction`. + // One factor of two is because there are both primal and dual feasibility + // polishing phases, and the other factor of two is because + // `next_feasibility_polishing_iteration` increases by a factor of 2 each + // feasibility polishing phase, so the sum of iteration limits is at most + // twice the last value. + constexpr static int kFeasibilityIterationFraction = 8; + // Attempts to solve primal and dual feasibility subproblems starting at the // average iterate, for at most `iteration_limit` iterations each. If // successful, returns a `SolverResult`, otherwise nullopt. Appends @@ -2255,6 +2291,36 @@ IterationStats WorkFromFeasibilityPolishing(const SolveLog& solve_log) { return result; } +bool TerminationReasonIsInterrupted(const TerminationReason reason) { + return reason == TERMINATION_REASON_INTERRUPTED_BY_USER; +} + +bool TerminationReasonIsWorkLimitNotInterrupted( + const TerminationReason reason) { + return reason == TERMINATION_REASON_ITERATION_LIMIT || + reason == TERMINATION_REASON_TIME_LIMIT || + reason == TERMINATION_REASON_KKT_MATRIX_PASS_LIMIT; +} + +// Note: `TERMINATION_REASON_INTERRUPTED_BY_USER` is treated as a work limit +// (that was determined in real-time by the user). +bool TerminationReasonIsWorkLimit(const TerminationReason reason) { + return TerminationReasonIsWorkLimitNotInterrupted(reason) || + TerminationReasonIsInterrupted(reason); +} + +bool DoFeasibilityPolishingAfterLimitsReached( + const PrimalDualHybridGradientParams& params, + const TerminationReason reason) { + if (TerminationReasonIsWorkLimitNotInterrupted(reason)) { + return params.apply_feasibility_polishing_after_limits_reached(); + } + if (TerminationReasonIsInterrupted(reason)) { + return params.apply_feasibility_polishing_if_solver_is_interrupted(); + } + return false; +} + std::optional Solver::MajorIterationAndTerminationCheck( const IterationType iteration_type, const bool force_numerical_termination, const std::atomic* interrupt_solve, @@ -2272,12 +2338,12 @@ std::optional Solver::MajorIterationAndTerminationCheck( IterationStats stats = CreateSimpleIterationStats(restart); IterationStats full_work_stats = AddWorkStats(stats, work_from_feasibility_polishing); + std::optional simple_termination_reason = + CheckSimpleTerminationCriteria(params_.termination_criteria(), + full_work_stats, interrupt_solve); const bool check_termination = major_iteration_cycle % params_.termination_check_frequency() == 0 || - CheckSimpleTerminationCriteria(params_.termination_criteria(), - full_work_stats, interrupt_solve) - .has_value() || - force_numerical_termination; + simple_termination_reason.has_value() || force_numerical_termination; // We check termination on every major iteration. DCHECK(!is_major_iteration || check_termination); if (check_termination) { @@ -2304,6 +2370,19 @@ std::optional Solver::MajorIterationAndTerminationCheck( } // We've terminated. if (maybe_termination_reason.has_value()) { + if (iteration_type == IterationType::kNormal && + DoFeasibilityPolishingAfterLimitsReached( + params_, maybe_termination_reason->reason)) { + const std::optional feasibility_result = + TryFeasibilityPolishing( + iterations_completed_ / kFeasibilityIterationFraction, + interrupt_solve, solve_log); + if (feasibility_result.has_value()) { + LOG(INFO) << "Returning result from feasibility polishing after " + "limits reached"; + return *feasibility_result; + } + } IterationStats terminating_full_stats = AddWorkStats(stats, work_from_feasibility_polishing); return PickSolutionAndConstructSolverResult( @@ -2573,15 +2652,6 @@ FeasibilityPolishingDetails BuildFeasibilityPolishingDetails( return details; } -// Note: `TERMINATION_REASON_INTERRUPTED_BY_USER` is treated as a work limit -// (that was determined in real-time by the user). -bool TerminationReasonIsWorkLimit(const TerminationReason reason) { - return reason == TERMINATION_REASON_ITERATION_LIMIT || - reason == TERMINATION_REASON_TIME_LIMIT || - reason == TERMINATION_REASON_KKT_MATRIX_PASS_LIMIT || - reason == TERMINATION_REASON_INTERRUPTED_BY_USER; -} - std::optional Solver::TryFeasibilityPolishing( const int iteration_limit, const std::atomic* interrupt_solve, SolveLog& solve_log) { @@ -2600,12 +2670,20 @@ std::optional Solver::TryFeasibilityPolishing( // polishing, it is usually increased, and an experiment (on MIPLIB2017) // shows that this test reduces the iteration count by 3-4% on average. if (!ObjectiveGapMet(optimality_criteria, first_convergence_info)) { - if (params_.verbosity_level() >= 2) { - SOLVER_LOG(&preprocess_solver_->Logger(), - "Skipping feasibility polishing because the objective gap " - "is too large."); + std::optional simple_termination_reason = + CheckSimpleTerminationCriteria(params_.termination_criteria(), + TotalWorkSoFar(solve_log), + interrupt_solve); + if (!(simple_termination_reason.has_value() && + DoFeasibilityPolishingAfterLimitsReached( + params_, simple_termination_reason->reason))) { + if (params_.verbosity_level() >= 2) { + SOLVER_LOG(&preprocess_solver_->Logger(), + "Skipping feasibility polishing because the objective gap " + "is too large."); + } + return std::nullopt; } - return std::nullopt; } if (params_.verbosity_level() >= 2) { @@ -2623,7 +2701,17 @@ std::optional Solver::TryFeasibilityPolishing( } if (TerminationReasonIsWorkLimit( primal_result.solve_log.termination_reason())) { - return std::nullopt; + // Have we also reached the overall work limit? If so, consider finishing + // the final polishing phase. + std::optional simple_termination_reason = + CheckSimpleTerminationCriteria(params_.termination_criteria(), + TotalWorkSoFar(solve_log), + interrupt_solve); + if (!(simple_termination_reason.has_value() && + DoFeasibilityPolishingAfterLimitsReached( + params_, simple_termination_reason->reason))) { + return std::nullopt; + } } else if (primal_result.solve_log.termination_reason() != TERMINATION_REASON_OPTIMAL) { // Note: `TERMINATION_REASON_PRIMAL_INFEASIBLE` could happen normally, but @@ -2651,9 +2739,29 @@ std::optional Solver::TryFeasibilityPolishing( TerminationReason_Name(dual_result.solve_log.termination_reason())); } + IterationStats full_stats = TotalWorkSoFar(solve_log); + std::optional simple_termination_reason = + CheckSimpleTerminationCriteria(params_.termination_criteria(), full_stats, + interrupt_solve); if (TerminationReasonIsWorkLimit( dual_result.solve_log.termination_reason())) { - return std::nullopt; + // Have we also reached the overall work limit? If so, consider falling out + // of the "if" test and returning the polishing solution anyway. + if (simple_termination_reason.has_value() && + DoFeasibilityPolishingAfterLimitsReached( + params_, simple_termination_reason->reason)) { + preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution( + params_, primal_result.primal_solution, dual_result.dual_solution, + POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION, + full_stats.add_convergence_information(), nullptr); + return ConstructSolverResult( + std::move(primal_result.primal_solution), + std::move(dual_result.dual_solution), full_stats, + simple_termination_reason->reason, + POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION, solve_log); + } else { + return std::nullopt; + } } else if (dual_result.solve_log.termination_reason() != TERMINATION_REASON_OPTIMAL) { // Note: The comment in the corresponding location when checking the @@ -2665,7 +2773,6 @@ std::optional Solver::TryFeasibilityPolishing( return std::nullopt; } - IterationStats full_stats = TotalWorkSoFar(solve_log); preprocess_solver_->ComputeConvergenceAndInfeasibilityFromWorkingSolution( params_, primal_result.primal_solution, dual_result.dual_solution, POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION, @@ -2689,12 +2796,16 @@ std::optional Solver::TryFeasibilityPolishing( full_stats, preprocess_solver_->OriginalBoundNorms(), /*force_numerical_termination=*/false); - if (earned_termination.has_value()) { - return ConstructSolverResult(std::move(primal_result.primal_solution), - std::move(dual_result.dual_solution), - full_stats, earned_termination->reason, - POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION, - solve_log); + if (earned_termination.has_value() || + (simple_termination_reason.has_value() && + DoFeasibilityPolishingAfterLimitsReached( + params_, simple_termination_reason->reason))) { + return ConstructSolverResult( + std::move(primal_result.primal_solution), + std::move(dual_result.dual_solution), full_stats, + earned_termination.has_value() ? earned_termination->reason + : simple_termination_reason->reason, + POINT_TYPE_FEASIBILITY_POLISHING_SOLUTION, solve_log); } // Note: A typical termination check would now call // `CheckSimpleTerminationCriteria`. However, there is no obvious iterate to @@ -2708,15 +2819,22 @@ std::optional Solver::TryFeasibilityPolishing( TerminationCriteria ReduceWorkLimitsByPreviousWork( TerminationCriteria criteria, const int iteration_limit, - const IterationStats& previous_work) { - criteria.set_iteration_limit(std::max( - 0, std::min(iteration_limit, criteria.iteration_limit() - - previous_work.iteration_number()))); - criteria.set_kkt_matrix_pass_limit( - std::max(0.0, criteria.kkt_matrix_pass_limit() - - previous_work.cumulative_kkt_matrix_passes())); - criteria.set_time_sec_limit(std::max( - 0.0, criteria.time_sec_limit() - previous_work.cumulative_time_sec())); + const IterationStats& previous_work, + bool apply_feasibility_polishing_after_limits_reached) { + if (apply_feasibility_polishing_after_limits_reached) { + criteria.set_iteration_limit(iteration_limit); + criteria.set_kkt_matrix_pass_limit(std::numeric_limits::infinity()); + criteria.set_time_sec_limit(std::numeric_limits::infinity()); + } else { + criteria.set_iteration_limit(std::max( + 0, std::min(iteration_limit, criteria.iteration_limit() - + previous_work.iteration_number()))); + criteria.set_kkt_matrix_pass_limit( + std::max(0.0, criteria.kkt_matrix_pass_limit() - + previous_work.cumulative_kkt_matrix_passes())); + criteria.set_time_sec_limit(std::max( + 0.0, criteria.time_sec_limit() - previous_work.cumulative_time_sec())); + } return criteria; } @@ -2725,9 +2843,13 @@ SolverResult Solver::TryPrimalPolishing( const std::atomic* interrupt_solve, SolveLog& solve_log) { PrimalDualHybridGradientParams primal_feasibility_params = params_; *primal_feasibility_params.mutable_termination_criteria() = - ReduceWorkLimitsByPreviousWork(params_.termination_criteria(), - iteration_limit, - TotalWorkSoFar(solve_log)); + ReduceWorkLimitsByPreviousWork( + params_.termination_criteria(), iteration_limit, + TotalWorkSoFar(solve_log), + params_.apply_feasibility_polishing_after_limits_reached()); + if (params_.apply_feasibility_polishing_if_solver_is_interrupted()) { + interrupt_solve = nullptr; + } // This will save the original objective after the swap. VectorXd objective; @@ -2785,9 +2907,13 @@ SolverResult Solver::TryDualPolishing(VectorXd starting_dual_solution, SolveLog& solve_log) { PrimalDualHybridGradientParams dual_feasibility_params = params_; *dual_feasibility_params.mutable_termination_criteria() = - ReduceWorkLimitsByPreviousWork(params_.termination_criteria(), - iteration_limit, - TotalWorkSoFar(solve_log)); + ReduceWorkLimitsByPreviousWork( + params_.termination_criteria(), iteration_limit, + TotalWorkSoFar(solve_log), + params_.apply_feasibility_polishing_after_limits_reached()); + if (params_.apply_feasibility_polishing_if_solver_is_interrupted()) { + interrupt_solve = nullptr; + } // These will initially contain the homogenous variable and constraint // bounds, but will contain the original variable and constraint bounds @@ -2883,14 +3009,6 @@ SolverResult Solver::Solve(const IterationType iteration_type, if (params_.use_feasibility_polishing() && iteration_type == IterationType::kNormal && iterations_completed_ >= next_feasibility_polishing_iteration) { - // The total number of iterations in feasibility polishing is at most - // `4 * iterations_completed_ / kFeasibilityIterationFraction`. - // One factor of two is because there are both primal and dual feasibility - // polishing phases, and the other factor of two is because - // `next_feasibility_polishing_iteration` increases by a factor of 2 each - // feasibility polishing phase, so the sum of iteration limits is at most - // twice the last value. - const int kFeasibilityIterationFraction = 8; const std::optional feasibility_result = TryFeasibilityPolishing( iterations_completed_ / kFeasibilityIterationFraction, @@ -2940,6 +3058,7 @@ SolverResult PrimalDualHybridGradient( std::move(iteration_stats_callback)); } +// See comment at top of file. SolverResult PrimalDualHybridGradient( QuadraticProgram qp, const PrimalDualHybridGradientParams& params, std::optional initial_solution, diff --git a/ortools/pdlp/primal_dual_hybrid_gradient_test.cc b/ortools/pdlp/primal_dual_hybrid_gradient_test.cc index 122ab5fd25..ef1c0e6b8b 100644 --- a/ortools/pdlp/primal_dual_hybrid_gradient_test.cc +++ b/ortools/pdlp/primal_dual_hybrid_gradient_test.cc @@ -13,13 +13,11 @@ #include "ortools/pdlp/primal_dual_hybrid_gradient.h" -#include #include #include #include #include #include -#include #include #include #include @@ -30,7 +28,6 @@ #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" -#include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "gtest/gtest.h" @@ -1520,7 +1517,6 @@ TEST(PrimalDualHybridGradientTest, EmptyQp) { } TEST(PrimalDualHybridGradientTest, RespectsInterrupt) { - std::atomic interrupt_solve; PrimalDualHybridGradientParams params; params.mutable_termination_criteria() ->mutable_simple_optimality_criteria() @@ -1529,7 +1525,7 @@ TEST(PrimalDualHybridGradientTest, RespectsInterrupt) { ->mutable_simple_optimality_criteria() ->set_eps_optimal_relative(0.0); - interrupt_solve.store(true); + std::atomic interrupt_solve = true; const SolverResult output = PrimalDualHybridGradient(TestLp(), params, &interrupt_solve); EXPECT_EQ(output.solve_log.termination_reason(), @@ -1537,7 +1533,6 @@ TEST(PrimalDualHybridGradientTest, RespectsInterrupt) { } TEST(PrimalDualHybridGradientTest, RespectsInterruptFromCallback) { - std::atomic interrupt_solve; PrimalDualHybridGradientParams params; params.mutable_termination_criteria() ->mutable_simple_optimality_criteria() @@ -1546,7 +1541,7 @@ TEST(PrimalDualHybridGradientTest, RespectsInterruptFromCallback) { ->mutable_simple_optimality_criteria() ->set_eps_optimal_relative(0.0); - interrupt_solve.store(false); + std::atomic interrupt_solve = false; auto callback = [&](const IterationCallbackInfo& info) { if (info.iteration_stats.iteration_number() >= 10) { interrupt_solve.store(true); @@ -1562,7 +1557,6 @@ TEST(PrimalDualHybridGradientTest, RespectsInterruptFromCallback) { } TEST(PrimalDualHybridGradientTest, IgnoresFalseInterrupt) { - std::atomic interrupt_solve; PrimalDualHybridGradientParams params; params.mutable_termination_criteria() ->mutable_simple_optimality_criteria() @@ -1572,7 +1566,7 @@ TEST(PrimalDualHybridGradientTest, IgnoresFalseInterrupt) { ->set_eps_optimal_relative(0.0); params.mutable_termination_criteria()->set_kkt_matrix_pass_limit(1); - interrupt_solve.store(false); + std::atomic interrupt_solve = false; const SolverResult output = PrimalDualHybridGradient(TestLp(), params, &interrupt_solve); EXPECT_EQ(output.solve_log.termination_reason(), @@ -1793,6 +1787,143 @@ TEST_F(FeasibilityPolishingPrimalTest, FeasibilityPolishingFindsValidSolution) { 1.0e-12)); } +TEST_F(FeasibilityPolishingPrimalTest, + NoPolishingAfterIterationLimitWhenPolishingAfterLimitsDisabled) { + // Feasibility polishing would solve the problem the first time it is + // attempted, which would be at iteration 100. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_after_limits_reached(false); + params_.mutable_termination_criteria()->set_iteration_limit(50); + SolverResult output = PrimalDualHybridGradient(lp_, params_); + EXPECT_EQ(output.solve_log.termination_reason(), + TERMINATION_REASON_ITERATION_LIMIT); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingAfterIterationLimitWhenPolishingAfterLimitsEnabled) { + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_after_limits_reached(true); + params_.mutable_termination_criteria()->set_iteration_limit(50); + SolverResult output = PrimalDualHybridGradient(lp_, params_); + EXPECT_EQ(output.solve_log.termination_reason(), TERMINATION_REASON_OPTIMAL); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingTerminatesAfterIterationLimitWhenPolishingAfterLimitsDisabled) { + // Feasibility polishing will be triggered at iteration 100. The iteration + // limit prevents primal polishing from completing. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_after_limits_reached(false); + params_.mutable_termination_criteria()->set_iteration_limit(101); + SolverResult output = PrimalDualHybridGradient(lp_, params_); + EXPECT_EQ(output.solve_log.termination_reason(), + TERMINATION_REASON_ITERATION_LIMIT); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingContinuesAfterIterationLimitWhenPolishingAfterLimitsEnabled) { + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_after_limits_reached(true); + params_.mutable_termination_criteria()->set_iteration_limit(101); + SolverResult output = PrimalDualHybridGradient(lp_, params_); + EXPECT_EQ(output.solve_log.termination_reason(), TERMINATION_REASON_OPTIMAL); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingStopsAfterContinuingAfterIterationLimitWhenNotOptimal) { + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_after_limits_reached(true); + params_.mutable_termination_criteria()->set_iteration_limit(101); + auto* opt_criteria = params_.mutable_termination_criteria() + ->mutable_detailed_optimality_criteria(); + opt_criteria->set_eps_optimal_primal_residual_absolute(1.0e-16); + opt_criteria->set_eps_optimal_primal_residual_relative(0.0); + opt_criteria->set_eps_optimal_dual_residual_absolute(1.0e-16); + opt_criteria->set_eps_optimal_dual_residual_relative(0.0); + opt_criteria->set_eps_optimal_objective_gap_absolute(1.0e-16); + opt_criteria->set_eps_optimal_objective_gap_relative(0.0); + SolverResult output = PrimalDualHybridGradient(lp_, params_); + EXPECT_EQ(output.solve_log.termination_reason(), + TERMINATION_REASON_ITERATION_LIMIT); + // 100 main iterations + at most 12 primal feasibility polishing iterations + // + at most 12 dual feasibility polishing iterations. + EXPECT_LE(output.solve_log.iteration_count(), 124); +} + +TEST_F(FeasibilityPolishingPrimalTest, + NoPolishingAfterInterruptWhenPolishingAfterInterruptDisabled) { + // Feasibility polishing would solve the problem the first time it is + // attempted, which would be at iteration 100. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_if_solver_is_interrupted(false); + std::atomic interrupt_solve = false; + auto callback = [&](const IterationCallbackInfo& info) { + if (info.iteration_stats.iteration_number() >= 50) { + interrupt_solve.store(true); + } + }; + SolverResult output = + PrimalDualHybridGradient(lp_, params_, &interrupt_solve, + /*message_callback=*/nullptr, callback); + EXPECT_EQ(output.solve_log.termination_reason(), + TERMINATION_REASON_INTERRUPTED_BY_USER); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingAfterInterruptWhenPolishingAfterInterruptEnabled) { + // Feasibility polishing would solve the problem the first time it is + // attempted, which would be at iteration 100. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_if_solver_is_interrupted(true); + std::atomic interrupt_solve = false; + auto callback = [&](const IterationCallbackInfo& info) { + if (info.iteration_stats.iteration_number() >= 50) { + interrupt_solve.store(true); + } + }; + SolverResult output = + PrimalDualHybridGradient(lp_, params_, &interrupt_solve, + /*message_callback=*/nullptr, callback); + EXPECT_EQ(output.solve_log.termination_reason(), TERMINATION_REASON_OPTIMAL); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingTerminatesAfterInterruptWhenPolishingAfterInterruptDisabled) { + // Feasibility polishing would solve the problem the first time it is + // attempted, which would be at iteration 100. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_if_solver_is_interrupted(false); + std::atomic interrupt_solve = false; + auto callback = [&](const IterationCallbackInfo& info) { + if (info.iteration_type == IterationType::kPrimalFeasibility) { + interrupt_solve.store(true); + } + }; + SolverResult output = + PrimalDualHybridGradient(lp_, params_, &interrupt_solve, + /*message_callback=*/nullptr, callback); + EXPECT_EQ(output.solve_log.termination_reason(), + TERMINATION_REASON_INTERRUPTED_BY_USER); +} + +TEST_F(FeasibilityPolishingPrimalTest, + PolishingContinuesAfterInterruptWhenPolishingAfterInterruptEnabled) { + // Feasibility polishing would solve the problem the first time it is + // attempted, which would be at iteration 100. + params_.set_use_feasibility_polishing(true); + params_.set_apply_feasibility_polishing_if_solver_is_interrupted(true); + std::atomic interrupt_solve = false; + auto callback = [&](const IterationCallbackInfo& info) { + if (info.iteration_type == IterationType::kPrimalFeasibility) { + interrupt_solve.store(true); + } + }; + SolverResult output = + PrimalDualHybridGradient(lp_, params_, &interrupt_solve, + /*message_callback=*/nullptr, callback); + EXPECT_EQ(output.solve_log.termination_reason(), TERMINATION_REASON_OPTIMAL); +} + TEST_F(FeasibilityPolishingPrimalTest, FeasibilityPolishingDetailsInLog) { SolverResult output = PrimalDualHybridGradient(lp_, params_); diff --git a/ortools/pdlp/solvers.proto b/ortools/pdlp/solvers.proto index c87a5a90d1..f21869e9a0 100644 --- a/ortools/pdlp/solvers.proto +++ b/ortools/pdlp/solvers.proto @@ -480,5 +480,18 @@ message PrimalDualHybridGradientParams { // optional bool use_feasibility_polishing = 30 [default = false]; + // If true, feasibility polishing will be applied after the iteration limit, + // kkt limit, or time limit is reached. This can result in a solution that is + // closer to feasibility, at the expense of violating the limit by a moderate + // amount. + optional bool apply_feasibility_polishing_after_limits_reached = 33 + [default = false]; + + // If true, feasibility polishing will be applied after the solver is + // interrupted. This can result in a solution that is closer to feasibility, + // at the expense of not stopping as promptly when interrupted. + optional bool apply_feasibility_polishing_if_solver_is_interrupted = 34 + [default = false]; + reserved 13, 14, 15, 20, 21; } diff --git a/ortools/util/time_limit.h b/ortools/util/time_limit.h index cf8fbca432..2fd664b182 100644 --- a/ortools/util/time_limit.h +++ b/ortools/util/time_limit.h @@ -465,19 +465,23 @@ class NestedTimeLimit { class TimeLimitCheckEveryNCalls { public: TimeLimitCheckEveryNCalls(int N, TimeLimit* time_limit) - : time_limit_(time_limit), count_(0), frequency_(N) {} + : time_limit_(time_limit), frequency_(N) {} bool LimitReached() { if (count_++ == frequency_) { - if (time_limit_->LimitReached()) return true; + if (time_limit_->LimitReached()) { + stopped_ = true; + return true; + } count_ = 0; } - return false; + return stopped_; } private: TimeLimit* time_limit_; - int count_; + bool stopped_ = false; + int count_ = 0; const int frequency_; }; From 640aaeb8e0e7a135fa043c9439a257182b3bd76a Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 14 May 2025 13:33:20 +0200 Subject: [PATCH 002/509] [CP-SAT] lots of bugfixes, improvement to primary variables heuristics; cleanup scheduling cuts code --- ortools/sat/BUILD.bazel | 12 +- ortools/sat/cp_model_solver.cc | 159 ++-- ortools/sat/cp_model_solver.h | 2 - ortools/sat/cp_model_solver_helpers.cc | 8 + ortools/sat/diffn.cc | 32 +- ortools/sat/drat_writer.h | 6 +- ortools/sat/integer_base.cc | 49 +- ortools/sat/integer_base.h | 12 +- ortools/sat/integer_search.cc | 14 +- ortools/sat/intervals.cc | 77 +- ortools/sat/intervals.h | 28 +- ortools/sat/linear_constraint.cc | 11 + ortools/sat/linear_constraint.h | 14 +- ortools/sat/linear_constraint_manager.cc | 8 +- ortools/sat/linear_constraint_test.cc | 9 +- ortools/sat/precedences.cc | 148 ++++ ortools/sat/precedences.h | 59 ++ ortools/sat/primary_variables.cc | 20 + ortools/sat/primary_variables_test.cc | 21 + ortools/sat/sat_solver.cc | 3 +- ortools/sat/scheduling_cuts.cc | 934 ++++++++++++----------- ortools/sat/scheduling_cuts.h | 88 +-- ortools/sat/scheduling_cuts_test.cc | 105 +-- ortools/sat/scheduling_helpers.cc | 2 - ortools/sat/simplification.h | 13 +- 25 files changed, 1088 insertions(+), 746 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index a4708c1830..b1eb60e362 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -726,6 +726,7 @@ cc_library( ":util", ":work_assignment", "//ortools/base", + "//ortools/base:file", "//ortools/base:status_macros", "//ortools/base:strong_vector", "//ortools/base:threadpool", @@ -1809,7 +1810,6 @@ cc_library( "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:vlog_is_on", - "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", @@ -1935,6 +1935,7 @@ cc_library( ":linear_constraint", ":model", ":no_overlap_2d_helper", + ":precedences", ":sat_base", ":sat_solver", ":scheduling_helpers", @@ -1942,7 +1943,6 @@ cc_library( "//ortools/util:strong_integers", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/log:check", - "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/types:span", ], ) @@ -1976,13 +1976,11 @@ cc_library( ":sat_base", ":sat_solver", "//ortools/base", - "//ortools/base:strong_vector", "//ortools/util:bitset", "//ortools/util:sort", "//ortools/util:strong_integers", "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/log:check", - "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", ], @@ -2891,10 +2889,12 @@ cc_library( ":cuts", ":integer", ":integer_base", + ":intervals", ":linear_constraint", ":linear_constraint_manager", ":model", ":sat_base", + ":sat_solver", ":scheduling_helpers", ":util", "//ortools/base", @@ -2906,7 +2906,6 @@ cc_library( "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/container:flat_hash_map", - "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", @@ -2929,6 +2928,7 @@ cc_test( ":model", ":sat_base", ":scheduling_cuts", + ":scheduling_helpers", "//ortools/base:gmock_main", "//ortools/base:strong_vector", "//ortools/util:strong_integers", @@ -3542,13 +3542,11 @@ cc_library( ":synchronization", ":timetable", ":util", - "//ortools/base:stl_util", "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", "//ortools/util:time_limit", "@abseil-cpp//absl/container:flat_hash_set", - "@abseil-cpp//absl/container:inlined_vector", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:vlog_is_on", diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 2d81c362f1..680555185a 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -27,15 +27,10 @@ #include #include #include +#include #include #include -#include "ortools/base/logging.h" -#include "ortools/base/timer.h" -#if !defined(__PORTABLE_PLATFORM__) -#include "ortools/base/helpers.h" -#include "ortools/base/options.h" -#endif // __PORTABLE_PLATFORM__ #include "absl/base/thread_annotations.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" @@ -54,6 +49,10 @@ #include "absl/types/span.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" +#include "ortools/base/helpers.h" +#include "ortools/base/logging.h" +#include "ortools/base/options.h" +#include "ortools/base/timer.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/combine_solutions.h" #include "ortools/sat/cp_model.pb.h" @@ -92,9 +91,9 @@ #include "ortools/sat/work_assignment.h" #include "ortools/util/logging.h" #include "ortools/util/random_engine.h" -#if !defined(__PORTABLE_PLATFORM__) +#if !defined(__EMBEDDED_PLATFORM__) #include "ortools/util/sigint.h" -#endif // __PORTABLE_PLATFORM__ +#endif // __EMBEDDED_PLATFORM__ #include "ortools/base/version.h" #include "ortools/util/sorted_interval_list.h" #include "ortools/util/time_limit.h" @@ -1208,7 +1207,7 @@ class FullProblemSolver : public SubSolver { bool previous_task_is_completed_ ABSL_GUARDED_BY(mutex_) = true; }; -#if !defined(__PORTABLE_PLATFORM__) +#if !defined(__EMBEDDED_PLATFORM__) class FeasibilityPumpSolver : public SubSolver { public: @@ -1398,7 +1397,7 @@ class LnsSolver : public SubSolver { break; } const std::string_view search_info = - absl::StripPrefix(std::string_view(local_params.name()), "lns_"); + absl::StripPrefix(absl::string_view(local_params.name()), "lns_"); local_params.set_max_deterministic_time(data.deterministic_limit); std::string source_info = @@ -2218,7 +2217,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { LaunchSubsolvers(params, shared, subsolvers, name_filter.AllIgnored()); } -#endif // __PORTABLE_PLATFORM__ +#endif // !defined(__EMBEDDED_PLATFORM__) // If the option use_sat_inprocessing is true, then before post-solving a // solution, we need to make sure we add any new clause required for postsolving @@ -2263,18 +2262,27 @@ std::function NewBestBoundCallback( }; } -#if !defined(__PORTABLE_PLATFORM__) +namespace { +template +void ParseFromStringOrDie(absl::string_view str, T* proto) { + if constexpr (std::is_base_of_v) { + CHECK(google::protobuf::TextFormat::ParseFromString(str, proto)) << str; + } else { + LOG(FATAL) << "Calling NewSatParameters() with a textual proto is not " + "supported when using Lite Protobuf."; + } +} +} // namespace + // TODO(user): Support it on android. std::function NewSatParameters( const std::string& params) { sat::SatParameters parameters; if (!params.empty()) { - CHECK(google::protobuf::TextFormat::ParseFromString(params, ¶meters)) - << params; + ParseFromStringOrDie(params, ¶meters); } return NewSatParameters(parameters); } -#endif // __PORTABLE_PLATFORM__ std::function NewSatParameters( const sat::SatParameters& parameters) { @@ -2337,15 +2345,15 @@ void RegisterSearchStatisticCallback(Model* global_model) { } void MergeParamsWithFlagsAndDefaults(SatParameters* params) { -#if !defined(__PORTABLE_PLATFORM__) - // Override parameters? - if (!absl::GetFlag(FLAGS_cp_model_params).empty()) { - SatParameters flag_params; - CHECK(google::protobuf::TextFormat::ParseFromString( - absl::GetFlag(FLAGS_cp_model_params), &flag_params)); - params->MergeFrom(flag_params); + if constexpr (std::is_base_of_v) { + // Override parameters? + if (!absl::GetFlag(FLAGS_cp_model_params).empty()) { + SatParameters flag_params; + ParseFromStringOrDie(absl::GetFlag(FLAGS_cp_model_params), + &flag_params); + params->MergeFrom(flag_params); + } } -#endif // __PORTABLE_PLATFORM__ } } // namespace @@ -2356,19 +2364,19 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { wall_timer->Start(); user_timer->Start(); -#if !defined(__PORTABLE_PLATFORM__) - // Dump initial model? - if (absl::GetFlag(FLAGS_cp_model_dump_models)) { - DumpModelProto(model_proto, "model"); - } - if (absl::GetFlag(FLAGS_cp_model_export_model)) { - if (model_proto.name().empty()) { - DumpModelProto(model_proto, "unnamed_model"); - } else { - DumpModelProto(model_proto, model_proto.name()); + if constexpr (std::is_base_of_v) { + // Dump initial model? + if (absl::GetFlag(FLAGS_cp_model_dump_models)) { + DumpModelProto(model_proto, "model"); + } + if (absl::GetFlag(FLAGS_cp_model_export_model)) { + if (model_proto.name().empty()) { + DumpModelProto(model_proto, "unnamed_model"); + } else { + DumpModelProto(model_proto, model_proto.name()); + } } } -#endif // __PORTABLE_PLATFORM__ MergeParamsWithFlagsAndDefaults(model->GetOrCreate()); const SatParameters& params = *model->GetOrCreate(); @@ -2389,20 +2397,21 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { absl::GetFlag(FLAGS_cp_model_dump_prefix)); RegisterSearchStatisticCallback(model); -#if !defined(__PORTABLE_PLATFORM__) - // Note that the postprocessors are executed in reverse order, so this - // will always dump the response just before it is returned since it is - // the first one we register. - if (absl::GetFlag(FLAGS_cp_model_dump_response)) { - shared_response_manager->AddFinalResponsePostprocessor( - [](CpSolverResponse* response) { - const std::string file = absl::StrCat( - absl::GetFlag(FLAGS_cp_model_dump_prefix), "response.pb.txt"); - LOG(INFO) << "Dumping response proto to '" << file << "'."; - CHECK(WriteModelProtoToFile(*response, file)); - }); + if constexpr (std::is_base_of_v) { + // Note that the postprocessors are executed in reverse order, so this + // will always dump the response just before it is returned since it is + // the first one we register. + if (absl::GetFlag(FLAGS_cp_model_dump_response)) { + shared_response_manager->AddFinalResponsePostprocessor( + [](CpSolverResponse* response) { + const std::string file = absl::StrCat( + absl::GetFlag(FLAGS_cp_model_dump_prefix), "response.pb.txt"); + LOG(INFO) << "Dumping response proto to '" << file << "'."; + CHECK(WriteModelProtoToFile(*response, file)); + }); + } } -#endif // __PORTABLE_PLATFORM__ // Always display the final response stats if requested. // This also copy the logs to the response if requested. @@ -2456,13 +2465,13 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // Initialize the time limit from the parameters. model->GetOrCreate()->ResetLimitFromParameters(params); -#if !defined(__PORTABLE_PLATFORM__) +#if !defined(__EMBEDDED_PLATFORM__) // Register SIGINT handler if requested by the parameters. if (params.catch_sigint_signal()) { model->GetOrCreate()->Register( [shared_time_limit]() { shared_time_limit->Stop(); }); } -#endif // __PORTABLE_PLATFORM__ +#endif // __EMBEDDED_PLATFORM__ SOLVER_LOG(logger, ""); SOLVER_LOG(logger, "Starting ", CpSatSolverVersion()); @@ -2868,31 +2877,33 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { }); } -#if !defined(__PORTABLE_PLATFORM__) - if (absl::GetFlag(FLAGS_cp_model_dump_models)) { - DumpModelProto(*new_cp_model_proto, "presolved_model"); - DumpModelProto(*mapping_proto, "mapping_model"); + if constexpr (std::is_base_of_v && + std::is_base_of_v) { + if (absl::GetFlag(FLAGS_cp_model_dump_models)) { + DumpModelProto(*new_cp_model_proto, "presolved_model"); + DumpModelProto(*mapping_proto, "mapping_model"); - // If the model is convertible to a MIP, we dump it too. - // - // TODO(user): We could try to dump our linear relaxation too. - MPModelProto mip_model; - if (ConvertCpModelProtoToMPModelProto(*new_cp_model_proto, &mip_model)) { - DumpModelProto(mip_model, "presolved_mp_model"); - } + // If the model is convertible to a MIP, we dump it too. + // + // TODO(user): We could try to dump our linear relaxation too. + MPModelProto mip_model; + if (ConvertCpModelProtoToMPModelProto(*new_cp_model_proto, &mip_model)) { + DumpModelProto(mip_model, "presolved_mp_model"); + } - // If the model is convertible to a pure SAT one, we dump it too. - std::string cnf_string; - if (ConvertCpModelProtoToCnf(*new_cp_model_proto, &cnf_string)) { - const std::string filename = absl::StrCat( - absl::GetFlag(FLAGS_cp_model_dump_prefix), "presolved_cnf_model.cnf"); - LOG(INFO) << "Dumping cnf model to '" << filename << "'."; - const absl::Status status = - file::SetContents(filename, cnf_string, file::Defaults()); - if (!status.ok()) LOG(ERROR) << status; + // If the model is convertible to a pure SAT one, we dump it too. + std::string cnf_string; + if (ConvertCpModelProtoToCnf(*new_cp_model_proto, &cnf_string)) { + const std::string filename = + absl::StrCat(absl::GetFlag(FLAGS_cp_model_dump_prefix), + "presolved_cnf_model.cnf"); + LOG(INFO) << "Dumping cnf model to '" << filename << "'."; + const absl::Status status = + file::SetContents(filename, cnf_string, file::Defaults()); + if (!status.ok()) LOG(ERROR) << status; + } } } -#endif // __PORTABLE_PLATFORM__ if (params.stop_after_presolve() || shared_time_limit->LimitReached()) { int64_t num_terms = 0; @@ -2955,15 +2966,15 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { LoadDebugSolution(*new_cp_model_proto, model); if (!model->GetOrCreate()->LimitReached()) { -#if defined(__PORTABLE_PLATFORM__) +#if defined(__EMBEDDED_PLATFORM__) if (/* DISABLES CODE */ (false)) { // We ignore the multithreading parameter in this case. -#else // __PORTABLE_PLATFORM__ +#else // __EMBEDDED_PLATFORM__ if (params.num_workers() > 1 || params.interleave_search() || !params.subsolvers().empty() || !params.filter_subsolvers().empty() || params.use_ls_only()) { SolveCpModelParallel(&shared, model); -#endif // __PORTABLE_PLATFORM__ +#endif // __EMBEDDED_PLATFORM__ } else { shared_response_manager->SetUpdateGapIntegralOnEachChange(true); @@ -2991,14 +3002,12 @@ CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, return SolveCpModel(model_proto, &model); } -#if !defined(__PORTABLE_PLATFORM__) CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, const std::string& params) { Model model; model.Add(NewSatParameters(params)); return SolveCpModel(model_proto, &model); } -#endif // !__PORTABLE_PLATFORM__ } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_solver.h b/ortools/sat/cp_model_solver.h index 149b801ced..2afcac1ec6 100644 --- a/ortools/sat/cp_model_solver.h +++ b/ortools/sat/cp_model_solver.h @@ -123,10 +123,8 @@ std::function NewBestBoundCallback( \endcode * before calling \c SolveCpModel(). */ -#if !defined(__PORTABLE_PLATFORM__) std::function NewSatParameters( const std::string& params); -#endif // !__PORTABLE_PLATFORM__ std::function NewSatParameters( const SatParameters& parameters); diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 3946a878cb..d889246bfe 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -1070,6 +1070,7 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, auto* encoder = model->GetOrCreate(); auto* mapping = model->GetOrCreate(); auto* repository = model->GetOrCreate(); + auto* relations_maps = model->GetOrCreate(); for (const ConstraintProto& ct : model_proto.constraints()) { // Load conditional precedences and always true binary relations. @@ -1135,6 +1136,13 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, if (vars.size() == 2) { repository->Add(Literal(kNoLiteralIndex), {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, rhs_min, rhs_max); + + LinearExpression2 expr; + expr.vars[0] = vars[0]; + expr.vars[1] = vars[1]; + expr.coeffs[0] = coeffs[0]; + expr.coeffs[1] = coeffs[1]; + relations_maps->AddRelationBounds(expr, rhs_min, rhs_max); } } else { const Literal lit = mapping->Literal(ct.enforcement_literal(0)); diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 853e1b643d..1e86f0cd4f 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -265,29 +265,9 @@ void AddNonOverlappingRectangles(const std::vector& x, if (num_boxes < params.no_overlap_2d_boolean_relations_limit()) { auto* implications = model->GetOrCreate(); auto* sat_solver = model->GetOrCreate(); - auto* encoder = model->GetOrCreate(); auto* integer_trail = model->GetOrCreate(); DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); - // Creates and returns the Boolean equivalent to a <= b. - const auto f = [repository, integer_trail, encoder]( - const AffineExpression& a, const AffineExpression& b) { - if (a.var == b.var && a.coeff == b.coeff) { - return (a.constant <= b.constant) ? encoder->GetTrueLiteral() - : encoder->GetFalseLiteral(); - } - if (integer_trail->UpperBound(a) <= integer_trail->LowerBound(b)) { - return encoder->GetTrueLiteral(); - } - if (integer_trail->LowerBound(a) > integer_trail->UpperBound(b)) { - return encoder->GetFalseLiteral(); - } - repository->CreatePrecedenceLiteral(a, b); - const LiteralIndex index = repository->GetPrecedenceLiteral(a, b); - CHECK(index != kNoLiteralIndex); - return Literal(index); - }; - for (int i = 0; i < num_boxes; ++i) { if (repository->IsAbsent(x[i])) continue; if (repository->IsAbsent(y[i])) continue; @@ -296,8 +276,10 @@ void AddNonOverlappingRectangles(const std::vector& x, if (repository->IsAbsent(y[j])) continue; // At most one of these two x options is true. - const Literal x_ij = f(repository->End(x[i]), repository->Start(x[j])); - const Literal x_ji = f(repository->End(x[j]), repository->Start(x[i])); + const Literal x_ij = repository->GetOrCreatePrecedenceLiteral( + repository->End(x[i]), repository->Start(x[j])); + const Literal x_ji = repository->GetOrCreatePrecedenceLiteral( + repository->End(x[j]), repository->Start(x[i])); if ((integer_trail->LowerBound(repository->Size(x[i])) > 0 || integer_trail->LowerBound(repository->Size(x[j])) > 0) && !implications->AddAtMostOne({x_ij, x_ji})) { @@ -306,8 +288,10 @@ void AddNonOverlappingRectangles(const std::vector& x, } // At most one of these two y options is true. - const Literal y_ij = f(repository->End(y[i]), repository->Start(y[j])); - const Literal y_ji = f(repository->End(y[j]), repository->Start(y[i])); + const Literal y_ij = repository->GetOrCreatePrecedenceLiteral( + repository->End(y[i]), repository->Start(y[j])); + const Literal y_ji = repository->GetOrCreatePrecedenceLiteral( + repository->End(y[j]), repository->Start(y[i])); if ((integer_trail->LowerBound(repository->Size(y[i])) > 0 || integer_trail->LowerBound(repository->Size(y[j])) > 0) && !implications->AddAtMostOne({y_ij, y_ji})) { diff --git a/ortools/sat/drat_writer.h b/ortools/sat/drat_writer.h index 209a81d0ae..e5e3a7dff6 100644 --- a/ortools/sat/drat_writer.h +++ b/ortools/sat/drat_writer.h @@ -16,12 +16,8 @@ #include -#if !defined(__PORTABLE_PLATFORM__) -#include "ortools/base/file.h" -#else -class File {}; -#endif // !__PORTABLE_PLATFORM__ #include "absl/types/span.h" +#include "ortools/base/file.h" #include "ortools/sat/sat_base.h" namespace operations_research { diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index a3f8b84130..4f3f7e70e7 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -26,7 +26,15 @@ void LinearExpression2::SimpleCanonicalization() { if (coeffs[1] == 0) vars[1] = kNoIntegerVariable; // Corner case when the underlying variable is the same. - if (vars[0] == vars[1]) { + if (PositiveVariable(vars[0]) == PositiveVariable(vars[1])) { + // Make sure variable are positive before merging. + for (int i = 0; i < 2; ++i) { + if (!VariableIsPositive(vars[i])) { + coeffs[i] = -coeffs[i]; + vars[i] = NegationOf(vars[i]); + } + } + coeffs[0] += coeffs[1]; coeffs[1] = 0; vars[1] = kNoIntegerVariable; @@ -49,27 +57,30 @@ void LinearExpression2::SimpleCanonicalization() { } void LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, - IntegerValue& ub) { - // We need to be able to negate without overflow. - CHECK_GE(lb, kMinIntegerValue); - CHECK_LE(ub, kMaxIntegerValue); - + IntegerValue& ub, + bool allow_negation) { SimpleCanonicalization(); if (coeffs[0] == 0 || coeffs[1] == 0) return; // abort. - bool negate = false; - if (coeffs[0] == 0) { - if (coeffs[1] != 0) { - negate = !VariableIsPositive(vars[1]); + if (allow_negation) { + bool negate = false; + if (coeffs[0] == 0) { + if (coeffs[1] != 0) { + negate = !VariableIsPositive(vars[1]); + } + } else { + negate = !VariableIsPositive(vars[0]); + } + if (negate) { + Negate(); + + // We need to be able to negate without overflow. + CHECK_GE(lb, kMinIntegerValue); + CHECK_LE(ub, kMaxIntegerValue); + std::swap(lb, ub); + lb = -lb; + ub = -ub; } - } else { - negate = !VariableIsPositive(vars[0]); - } - if (negate) { - Negate(); - std::swap(lb, ub); - lb = -lb; - ub = -ub; } // Do gcd division. @@ -108,7 +119,7 @@ bool BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, RelationStatus BestBinaryRelationBounds::GetStatus(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub) { + IntegerValue ub) const { expr.CanonicalizeAndUpdateBounds(lb, ub); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { return RelationStatus::IS_UNKNOWN; diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index 398403e375..3ae647f744 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -358,7 +358,8 @@ struct LinearExpression2 { void SimpleCanonicalization(); // This fully canonicalize this, and update the given bounds accordingly. - void CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub); + void CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, + bool allow_negation = false); bool operator==(const LinearExpression2& o) const { return vars[0] == o.vars[0] && vars[1] == o.vars[1] && @@ -369,6 +370,13 @@ struct LinearExpression2 { IntegerVariable vars[2]; }; +inline std::ostream& operator<<(std::ostream& os, + const LinearExpression2& expr) { + os << absl::StrCat(expr.coeffs[0], " X", expr.vars[0], " + ", expr.coeffs[1], + " X", expr.vars[1]); + return os; +} + template H AbslHashValue(H h, const LinearExpression2& e) { h = H::combine(std::move(h), e.vars[0]); @@ -390,7 +398,7 @@ class BestBinaryRelationBounds { // Returns the known status of expr <= bound. RelationStatus GetStatus(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub); + IntegerValue ub) const; private: // The best bound on the given "canonicalized" expression. diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 0d5de45924..7e095a2eba 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -759,7 +759,7 @@ std::function DisjunctivePrecedenceSearchHeuristic( const auto a = best_helper->GetIntervalDefinition(best_before); const auto b = best_helper->GetIntervalDefinition(best_after); return BooleanOrIntegerLiteral( - repo->GetOrCreateDisjunctivePrecedenceLiteral(a, b)); + repo->GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial(a, b)); } return BooleanOrIntegerLiteral(); @@ -867,7 +867,7 @@ std::function CumulativePrecedenceSearchHeuristic( open_tasks.push_back(first_skipped_task); // TODO(user): If the two box cannot overlap because of high demand, use - // repo.CreateDisjunctivePrecedenceLiteral() instead. + // repo.CreateDisjunctivePrecedenceLiteralIfNonTrivial() instead. // // TODO(user): Add heuristic ordering for creating interesting precedence // first. @@ -908,8 +908,8 @@ std::function CumulativePrecedenceSearchHeuristic( } // It shouldn't be able to fail since s can be before t. - CHECK(repo->CreatePrecedenceLiteral(helper->Ends()[s], - helper->Starts()[t])); + CHECK(repo->CreatePrecedenceLiteralIfNonTrivial( + helper->Ends()[s], helper->Starts()[t])); } // Branch on that precedence. @@ -962,7 +962,7 @@ std::function CumulativePrecedenceSearchHeuristic( << " " << best_helper->TaskDebugString(best_after); const AffineExpression end_a = best_helper->Ends()[best_before]; const AffineExpression start_b = best_helper->Starts()[best_after]; - repo->CreatePrecedenceLiteral(end_a, start_b); + repo->CreatePrecedenceLiteralIfNonTrivial(end_a, start_b); return BooleanOrIntegerLiteral( repo->GetPrecedenceLiteral(end_a, start_b)); } @@ -1421,7 +1421,7 @@ LiteralIndex IntegerSearchHelper::GetDecisionLiteral( bool IntegerSearchHelper::GetDecision( const std::function& f, LiteralIndex* decision) { *decision = kNoLiteralIndex; - while (!time_limit_->LimitReached()) { + do { BooleanOrIntegerLiteral new_decision; if (integer_trail_->InPropagationLoop()) { const IntegerVariable var = @@ -1451,7 +1451,7 @@ bool IntegerSearchHelper::GetDecision( *decision = GetDecisionLiteral(new_decision); if (*decision != kNoLiteralIndex) break; - } + } while (!time_limit_->LimitReached()); return true; } diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index 43cf294098..bc5ad0bde2 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -18,15 +18,17 @@ #include #include "absl/container/flat_hash_map.h" -#include "absl/meta/type_traits.h" +#include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/strong_vector.h" +#include "ortools/sat/clause.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/integer_expr.h" #include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/no_overlap_2d_helper.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/scheduling_helpers.h" @@ -35,6 +37,14 @@ namespace operations_research { namespace sat { +IntervalsRepository::IntervalsRepository(Model* model) + : model_(model), + assignment_(model->GetOrCreate()->Assignment()), + sat_solver_(model->GetOrCreate()), + implications_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()), + relations_maps_(model->GetOrCreate()) {} + IntervalVariable IntervalsRepository::CreateInterval(IntegerVariable start, IntegerVariable end, IntegerVariable size, @@ -78,7 +88,7 @@ IntervalVariable IntervalsRepository::CreateInterval(AffineExpression start, void IntervalsRepository::CreateDisjunctivePrecedenceLiteral( IntervalVariable a, IntervalVariable b) { - GetOrCreateDisjunctivePrecedenceLiteral( + GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( IntervalDefinition{.start = Start(a), .end = End(a), .size = Size(a), @@ -93,7 +103,8 @@ void IntervalsRepository::CreateDisjunctivePrecedenceLiteral( : std::nullopt}); } -LiteralIndex IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteral( +LiteralIndex +IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( const IntervalDefinition& a, const IntervalDefinition& b) { auto it = disjunctive_precedences_.find({a, b}); if (it != disjunctive_precedences_.end()) return it->second.Index(); @@ -143,7 +154,26 @@ LiteralIndex IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteral( return kNoLiteralIndex; } + // Abort if the relation is already known. + if (relations_maps_->GetPrecedenceStatus(a.end, b.start) == + RelationStatus::IS_TRUE || + relations_maps_->GetPrecedenceStatus(b.end, a.start) == + RelationStatus::IS_TRUE) { + return kNoLiteralIndex; + } + // Create a new literal. + // + // TODO(user): If there are no enforcement and we already have at one of: + // - s <=> a.end <= b.start + // - t <=> b.end <= a.start + // We could use (s, not(s)) or (not(t), t) and make sure s = not(t) if both + // exists. + // + // TODO(user): Otherwise, an alternative solution is to create s and t (can be + // one more Boolean though), and have enforcement => s + t == 1. The later + // might not even be needed though, since interval equation should already + // enforce it. const BooleanVariable boolean_var = sat_solver_->NewBooleanVariable(); const Literal a_before_b = Literal(boolean_var, true); disjunctive_precedences_.insert({{a, b}, a_before_b}); @@ -151,9 +181,10 @@ LiteralIndex IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteral( // Also insert it in precedences. if (enforcement_literals.empty()) { - // TODO(user): also add the reverse like start_b + 1 <= end_a if negated? - precedences_.insert({{a.end, b.start}, a_before_b}); - precedences_.insert({{b.end, a.start}, a_before_b.Negated()}); + relations_maps_->AddReifiedPrecedenceIfNonTrivial(a_before_b, a.end, + b.start); + relations_maps_->AddReifiedPrecedenceIfNonTrivial(a_before_b.Negated(), + b.end, a.start); } enforcement_literals.push_back(a_before_b); @@ -179,25 +210,22 @@ LiteralIndex IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteral( return a_before_b; } -bool IntervalsRepository::CreatePrecedenceLiteral(AffineExpression x, - AffineExpression y) { - if (precedences_.contains({x, y})) return false; +bool IntervalsRepository::CreatePrecedenceLiteralIfNonTrivial( + AffineExpression x, AffineExpression y) { + const LiteralIndex index = relations_maps_->GetReifiedPrecedence(x, y); + if (index != kNoLiteralIndex) return false; // We want l => x <= y and not(l) => x > y <=> y + 1 <= x // Do not create l if the relation is always true or false. - if (integer_trail_->UpperBound(x) <= integer_trail_->LowerBound(y)) { - return false; - } - if (integer_trail_->LowerBound(x) > integer_trail_->UpperBound(y)) { + if (relations_maps_->GetPrecedenceStatus(x, y) != + RelationStatus::IS_UNKNOWN) { return false; } // Create a new literal. const BooleanVariable boolean_var = sat_solver_->NewBooleanVariable(); const Literal x_before_y = Literal(boolean_var, true); - - // TODO(user): Also add {{y_plus_one, x}, x_before_y.Negated()} ? - precedences_.insert({{x, y}, x_before_y}); + relations_maps_->AddReifiedPrecedenceIfNonTrivial(x_before_y, x, y); AffineExpression y_plus_one = y; y_plus_one.constant += 1; @@ -208,9 +236,20 @@ bool IntervalsRepository::CreatePrecedenceLiteral(AffineExpression x, LiteralIndex IntervalsRepository::GetPrecedenceLiteral( AffineExpression x, AffineExpression y) const { - const auto it = precedences_.find({x, y}); - if (it != precedences_.end()) return it->second.Index(); - return kNoLiteralIndex; + return relations_maps_->GetReifiedPrecedence(x, y); +} + +Literal IntervalsRepository::GetOrCreatePrecedenceLiteral(AffineExpression x, + AffineExpression y) { + { + const LiteralIndex index = GetPrecedenceLiteral(x, y); + if (index != kNoLiteralIndex) return Literal(index); + } + + CHECK(CreatePrecedenceLiteralIfNonTrivial(x, y)); + const LiteralIndex index = relations_maps_->GetReifiedPrecedence(x, y); + CHECK_NE(index, kNoLiteralIndex); + return Literal(index); } // TODO(user): Ideally we should sort the vector of variables, but right now diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index 11665c2bd1..8b36fd47d2 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -42,12 +42,7 @@ namespace sat { // provides many helper functions to add precedences relation between intervals. class IntervalsRepository { public: - explicit IntervalsRepository(Model* model) - : model_(model), - assignment_(model->GetOrCreate()->Assignment()), - sat_solver_(model->GetOrCreate()), - implications_(model->GetOrCreate()), - integer_trail_(model->GetOrCreate()) {} + explicit IntervalsRepository(Model* model); // This type is neither copyable nor movable. IntervalsRepository(const IntervalsRepository&) = delete; @@ -149,19 +144,25 @@ class IntervalsRepository { // If such literal already exists this returns it. void CreateDisjunctivePrecedenceLiteral(IntervalVariable a, IntervalVariable b); - LiteralIndex GetOrCreateDisjunctivePrecedenceLiteral( + LiteralIndex GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( const IntervalDefinition& a, const IntervalDefinition& b); // Creates a literal l <=> y >= x. // Returns true if such literal is "non-trivial" and was created. - bool CreatePrecedenceLiteral(AffineExpression x, AffineExpression y); + bool CreatePrecedenceLiteralIfNonTrivial(AffineExpression x, + AffineExpression y); // Returns a literal l <=> y >= x if it exist or kNoLiteralIndex // otherwise. This could be the one created by - // CreateDisjunctivePrecedenceLiteral() or CreatePrecedenceLiteral(). + // CreateDisjunctivePrecedenceLiteral() or + // CreatePrecedenceLiteralIfNonTrivial(). LiteralIndex GetPrecedenceLiteral(AffineExpression x, AffineExpression y) const; + // Combines the two calls. Note that we will only create literals when the + // relation is not known. + Literal GetOrCreatePrecedenceLiteral(AffineExpression x, AffineExpression y); + const std::vector& AllDisjunctiveHelpers() const { return disjunctive_helpers_; @@ -188,6 +189,7 @@ class IntervalsRepository { SatSolver* sat_solver_; BinaryImplicationGraph* implications_; IntegerTrail* integer_trail_; + BinaryRelationsMaps* relations_maps_; // Literal indicating if the tasks is executed. Tasks that are always executed // will have a kNoLiteralIndex entry in this vector. @@ -212,16 +214,10 @@ class IntervalsRepository { SchedulingDemandHelper*> demand_helper_repository_; - // Disjunctive and normal precedences. - // - // Note that for normal precedences, we use directly the affine expression so - // that if many intervals share the same start, we don't re-create Booleans - // for no reason. + // Disjunctive precedences. absl::flat_hash_map, Literal> disjunctive_precedences_; - absl::flat_hash_map, Literal> - precedences_; // Disjunctive/Cumulative helpers_. std::vector disjunctive_helpers_; diff --git a/ortools/sat/linear_constraint.cc b/ortools/sat/linear_constraint.cc index 8e660c2683..9fb0220a2d 100644 --- a/ortools/sat/linear_constraint.cc +++ b/ortools/sat/linear_constraint.cc @@ -171,6 +171,17 @@ LinearExpression LinearConstraintBuilder::BuildExpression() { return result; } +double LinearConstraint::NormalizedViolation( + const util_intops::StrongVector& lp_values) const { + const double activity = ComputeActivity(*this, lp_values); + const double violation = + std::max(activity - ToDouble(ub), ToDouble(lb) - activity); + if (violation <= 0.0) return 0.0; + + const double l2_norm = ComputeL2Norm(*this); + return violation / l2_norm; +} + double ComputeActivity( const LinearConstraint& constraint, const util_intops::StrongVector& values) { diff --git a/ortools/sat/linear_constraint.h b/ortools/sat/linear_constraint.h index 96fd023786..91f0197084 100644 --- a/ortools/sat/linear_constraint.h +++ b/ortools/sat/linear_constraint.h @@ -66,6 +66,12 @@ struct LinearConstraint { LinearConstraint() = default; LinearConstraint(IntegerValue _lb, IntegerValue _ub) : lb(_lb), ub(_ub) {} + // Compute the normalized violation of the constraint. + // For a cut, this is the usual definition of its efficacy. + double NormalizedViolation( + const util_intops::StrongVector& lp_values) + const; + // Resize the LinearConstraint to have space for num_terms. We always // re-allocate if the size is different to always be tight in memory. void resize(int size) { @@ -234,7 +240,7 @@ class LinearConstraintBuilder { ABSL_MUST_USE_RESULT bool AddDecomposedProduct( absl::Span product); - // Add literal * coeff to the constaint. Returns false and do nothing if the + // Add literal * coeff to the constraint. Returns false and do nothing if the // given literal didn't have an integer view. ABSL_MUST_USE_RESULT bool AddLiteralTerm( Literal lit, IntegerValue coeff = IntegerValue(1)); @@ -312,8 +318,8 @@ double ComputeActivity( // linear relaxation. This is a bit relaxed compared to what we require for // generic linear constraint that are used in our CP propagators. // -// If this check pass, our constraint should be safe to use in our simplication -// code, our cut computation, etc... +// If this check pass, our constraint should be safe to use in our +// simplification code, our cut computation, etc... bool PossibleOverflow(const IntegerTrail& integer_trail, const LinearConstraint& constraint); @@ -329,7 +335,7 @@ double ScalarProduct(const LinearConstraint& constraint1, const LinearConstraint& constraint2); // Computes the GCD of the constraint coefficient, and divide them by it. This -// also tighten the constraint bounds assumming all the variables are integer. +// also tighten the constraint bounds assuming all the variables are integer. void DivideByGCD(LinearConstraint* constraint); // Removes the entries with a coefficient of zero. diff --git a/ortools/sat/linear_constraint_manager.cc b/ortools/sat/linear_constraint_manager.cc index d160f6e454..844be40636 100644 --- a/ortools/sat/linear_constraint_manager.cc +++ b/ortools/sat/linear_constraint_manager.cc @@ -29,7 +29,6 @@ #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/log/vlog_is_on.h" -#include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" @@ -979,11 +978,8 @@ void TopNCuts::AddCut( LinearConstraint ct, absl::string_view name, const util_intops::StrongVector& lp_solution) { if (ct.num_terms == 0) return; - const double activity = ComputeActivity(ct, lp_solution); - const double violation = - std::max(activity - ToDouble(ct.ub), ToDouble(ct.lb) - activity); - const double l2_norm = ComputeL2Norm(ct); - cuts_.Add({std::string(name), std::move(ct)}, violation / l2_norm); + const double normalized_violation = ct.NormalizedViolation(lp_solution); + cuts_.Add({std::string(name), std::move(ct)}, normalized_violation); } void TopNCuts::TransferToManager(LinearConstraintManager* manager) { diff --git a/ortools/sat/linear_constraint_test.cc b/ortools/sat/linear_constraint_test.cc index dce15b893e..9e2993756c 100644 --- a/ortools/sat/linear_constraint_test.cc +++ b/ortools/sat/linear_constraint_test.cc @@ -13,7 +13,9 @@ #include "ortools/sat/linear_constraint.h" +#include #include +#include #include #include #include @@ -44,8 +46,11 @@ TEST(ComputeActivityTest, BasicBehavior) { util_intops::StrongVector values = {0.5, 0.0, 1.4, 0.0, -2.1, 0.0}; - EXPECT_NEAR(ComputeActivity(ct.Build(), values), 1 * 0.5 - 2 * 1.4 - 3 * 2.1, - 1e-6); + const double expected_activity = 1 * 0.5 - 2 * 1.4 - 3 * 2.1; + EXPECT_NEAR(ComputeActivity(ct.Build(), values), expected_activity, 1e-6); + const double expected_violation = + std::abs(expected_activity) / std::sqrt(1 + 4 + 9); + EXPECT_NEAR(ct.Build().NormalizedViolation(values), expected_violation, 1e-6); } TEST(ComputeActivityTest, EmptyConstraint) { diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 35112fca03..0b6c0e0ed6 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -1487,5 +1487,153 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( return num_added_constraints; } +BinaryRelationsMaps::BinaryRelationsMaps(Model* model) + : integer_trail_(model->GetOrCreate()), + integer_encoder_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()) { + int index = 0; + model->GetOrCreate()->callbacks.push_back( + [index = index, trail = model->GetOrCreate(), this]() mutable { + DCHECK_EQ(trail->CurrentDecisionLevel(), 0); + absl::flat_hash_set relevant_true_literals; + for (; index < trail->Index(); ++index) { + const Literal l = (*trail)[index]; + if (variable_appearing_in_reified_relations_.contains(l.Variable())) { + relevant_true_literals.insert(l); + } + } + if (relevant_true_literals.empty()) return true; + + // Linear scan. + for (const auto [l, expr, ub] : all_reified_relations_) { + if (relevant_true_literals.contains(l)) { + AddRelationBounds(expr, kMinIntegerValue, ub); + VLOG(2) << "New fixed precedence: " << expr << " <= " << ub + << " (was reified by " << l << ")"; + } else if (relevant_true_literals.contains(l.Negated())) { + AddRelationBounds(expr, ub + 1, kMaxIntegerValue); + VLOG(2) << "New fixed precedence: " << expr << " > " << ub + << " (was reified by not(" << l << "))"; + } + } + return true; + }); +} + +BinaryRelationsMaps::~BinaryRelationsMaps() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"BinaryRelationsMaps/num_relations", num_updates_}); + shared_stats_->AddStats(stats); +} + +std::pair +BinaryRelationsMaps::GetImpliedLevelZeroBounds( + const LinearExpression2& expr) const { + // Compute the implied bounds on the expression. + IntegerValue implied_lb = 0; + IntegerValue implied_ub = 0; + if (expr.coeffs[0] != 0) { + CHECK_GE(expr.vars[0], 0); + implied_lb += + expr.coeffs[0] * integer_trail_->LevelZeroLowerBound(expr.vars[0]); + implied_ub += + expr.coeffs[0] * integer_trail_->LevelZeroUpperBound(expr.vars[0]); + } + if (expr.coeffs[1] != 0) { + CHECK_GE(expr.vars[1], 0); + implied_lb += + expr.coeffs[1] * integer_trail_->LevelZeroLowerBound(expr.vars[1]); + implied_ub += + expr.coeffs[1] * integer_trail_->LevelZeroUpperBound(expr.vars[1]); + } + + return {implied_lb, implied_ub}; +} + +void BinaryRelationsMaps::AddRelationBounds(LinearExpression2 expr, + IntegerValue lb, IntegerValue ub) { + expr.CanonicalizeAndUpdateBounds(lb, ub); + const auto [implied_lb, implied_ub] = GetImpliedLevelZeroBounds(expr); + lb = std::max(lb, implied_lb); + ub = std::min(ub, implied_ub); + + if (lb > ub) return; // unsat ?? + if (lb == implied_lb && ub == implied_ub) return; // trivially true. + + if (best_upper_bounds_.Add(expr, lb, ub)) { + // TODO(user): Also push them to a global shared repository after + // remapping IntegerVariable to proto indices. + ++num_updates_; + } +} + +RelationStatus BinaryRelationsMaps::GetStatus(LinearExpression2 expr, + IntegerValue lb, + IntegerValue ub) const { + expr.CanonicalizeAndUpdateBounds(lb, ub); + const auto [implied_lb, implied_ub] = GetImpliedLevelZeroBounds(expr); + lb = std::max(lb, implied_lb); + ub = std::min(ub, implied_ub); + + // Returns directly if the status can be derived from the implied bounds. + if (lb > ub) return RelationStatus::IS_FALSE; + if (lb == implied_lb && ub == implied_ub) return RelationStatus::IS_TRUE; + + // Relax as best_upper_bounds_.GetStatus() might have older bounds. + if (lb == implied_lb) lb = kMinIntegerValue; + if (ub == implied_ub) ub = kMaxIntegerValue; + + return best_upper_bounds_.GetStatus(expr, lb, ub); +} + +std::pair BinaryRelationsMaps::FromDifference( + const AffineExpression& a, const AffineExpression& b) const { + LinearExpression2 expr; + expr.vars[0] = a.var; + expr.vars[1] = b.var; + expr.coeffs[0] = a.coeff; + expr.coeffs[1] = -b.coeff; + IntegerValue lb = kMinIntegerValue; // unused. + IntegerValue ub = b.constant - a.constant; + expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/false); + return {std::move(expr), ub}; +} + +RelationStatus BinaryRelationsMaps::GetPrecedenceStatus( + AffineExpression a, AffineExpression b) const { + const auto [expr, ub] = FromDifference(a, b); + return GetStatus(expr, kMinIntegerValue, ub); +} + +void BinaryRelationsMaps::AddReifiedPrecedenceIfNonTrivial(Literal l, + AffineExpression a, + AffineExpression b) { + const auto [expr, ub] = FromDifference(a, b); + const RelationStatus status = GetStatus(expr, kMinIntegerValue, ub); + if (status != RelationStatus::IS_UNKNOWN) return; + + relation_to_lit_.insert({{expr, ub}, l}); + + variable_appearing_in_reified_relations_.insert(l.Variable()); + all_reified_relations_.push_back({l, expr, ub}); +} + +LiteralIndex BinaryRelationsMaps::GetReifiedPrecedence(AffineExpression a, + AffineExpression b) { + const auto [expr, ub] = FromDifference(a, b); + const RelationStatus status = GetStatus(expr, kMinIntegerValue, ub); + if (status == RelationStatus::IS_TRUE) { + return integer_encoder_->GetTrueLiteral().Index(); + } + if (status == RelationStatus::IS_FALSE) { + return integer_encoder_->GetFalseLiteral().Index(); + } + + const auto it = relation_to_lit_.find({expr, ub}); + if (it == relation_to_lit_.end()) return kNoLiteralIndex; + return it->second; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index e29e4f1e17..8056303e86 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -506,6 +506,7 @@ struct Relation { class BinaryRelationRepository { public: int size() const { return relations_.size(); } + // The returned relation is guaranteed to only have positive variables. const Relation& relation(int index) const { return relations_[index]; } @@ -574,6 +575,64 @@ class BinaryRelationRepository { var_pair_to_relations_; }; +// TODO(user): Merge with BinaryRelationRepository. Note that this one provides +// different indexing though, so it could be kept separate. The +// LinearExpression2 data structure is also slightly more efficient. +class BinaryRelationsMaps { + public: + explicit BinaryRelationsMaps(Model* model); + ~BinaryRelationsMaps(); + + // This mainly wraps BestBinaryRelationBounds, but in addition it checks the + // current LevelZero variable bounds to detect trivially true or false + // relation. + void AddRelationBounds(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub); + RelationStatus GetStatus(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub) const; + + // Return the status of a <= b; + RelationStatus GetPrecedenceStatus(AffineExpression a, + AffineExpression b) const; + + // Register the fact that l <=> ( a <= b ). + // These are considered equivalence relation. + void AddReifiedPrecedenceIfNonTrivial(Literal l, AffineExpression a, + AffineExpression b); + + // Returns kNoLiteralIndex if we don't have a literal <=> ( a <= b ), or + // returns that literal if we have one. Note that we will return the + // true/false literal if the status is known at level zero. + LiteralIndex GetReifiedPrecedence(AffineExpression a, AffineExpression b); + + private: + // Return the pair (a - b) <= rhs. + std::pair FromDifference( + const AffineExpression& a, const AffineExpression& b) const; + + std::pair GetImpliedLevelZeroBounds( + const LinearExpression2& expr) const; + + IntegerTrail* integer_trail_; + IntegerEncoder* integer_encoder_; + SharedStatistics* shared_stats_; + BestBinaryRelationBounds best_upper_bounds_; + + int64_t num_updates_ = 0; + + // This stores relations l <=> (linear2 <= rhs). + absl::flat_hash_map, Literal> + relation_to_lit_; + + // This is used to detect relations that become fixed at level zero and + // "upgrade" them to non-enforced relations. Because we only do that when + // we fix variable, a linear scan shouldn't be too bad and is relatively + // compact memory wise. + absl::flat_hash_set variable_appearing_in_reified_relations_; + std::vector> + all_reified_relations_; +}; + // Detects if at least one of a subset of linear of size 2 or 1, touching the // same variable, must be true. When this is the case we add a new propagator to // propagate that fact. diff --git a/ortools/sat/primary_variables.cc b/ortools/sat/primary_variables.cc index c61ae37ca4..3071139260 100644 --- a/ortools/sat/primary_variables.cc +++ b/ortools/sat/primary_variables.cc @@ -122,6 +122,12 @@ void GetRelationshipForConstraint(const ConstraintProto& ct, } return; } + case ConstraintProto::kExactlyOne: { + for (const int lit : ct.exactly_one().literals()) { + deducible_vars->insert(PositiveRef(lit)); + } + return; + } default: break; } @@ -613,6 +619,20 @@ bool ComputeAllVariablesFromPrimaryVariables( product -= target.offset(); (*solution)[var] = product / coeff_of_var; } break; + case ConstraintProto::kExactlyOne: { + (*solution)[var] = 0; + int sum = 0; + for (const int lit : ct.exactly_one().literals()) { + const int positive_ref = PositiveRef(lit); + DCHECK(positive_ref == var || + !dependent_variables_set.IsSet(positive_ref)); + sum += RefIsPositive(lit) ? (*solution)[positive_ref] + : 1 - (*solution)[positive_ref]; + } + if (sum != 1) { + (*solution)[var] ^= 1; + } + } break; default: break; } diff --git a/ortools/sat/primary_variables_test.cc b/ortools/sat/primary_variables_test.cc index a917b53112..66f8a104b6 100644 --- a/ortools/sat/primary_variables_test.cc +++ b/ortools/sat/primary_variables_test.cc @@ -26,6 +26,8 @@ namespace { using ::google::protobuf::contrib::parse_proto::ParseTestProto; using ::testing::Contains; +using ::testing::ElementsAre; +using ::testing::EqualsProto; using ::testing::Pair; TEST(PrimaryVariablesTest, BasicExample) { @@ -114,6 +116,25 @@ TEST(PrimaryVariablesTest, WithIntProd) { EXPECT_EQ(all_variables, solution); } +TEST(PrimaryVariablesTest, WithExactlyOne) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + constraints { exactly_one { literals: [ 0, 1, 2, 3 ] } } + )pb"); + const VariableRelationships relationships = + ComputeVariableRelationships(model); + EXPECT_EQ(relationships.secondary_variables.size(), 1); + const ConstraintProto expected = ParseTestProto(R"pb( + exactly_one { literals: [ 0, 1, 2, 3 ] } + )pb"); + EXPECT_THAT(relationships.dependency_resolution_constraint, + ElementsAre(EqualsProto(expected))); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index 69d101f367..e457281736 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -2485,10 +2485,11 @@ void SatSolver::MinimizeConflictRecursively(std::vector* conflict) { // be infered by some other variables in the conflict. // Note that we can skip the first position since this is the 1-UIP. int index = 1; + TimeLimitCheckEveryNCalls time_limit_check(100, time_limit_); for (int i = 1; i < conflict->size(); ++i) { const BooleanVariable var = (*conflict)[i].Variable(); const AssignmentInfo& info = trail_->Info(var); - if (time_limit_->LimitReached() || + if (time_limit_check.LimitReached() || info.type == AssignmentType::kSearchDecision || info.trail_index <= min_trail_index_per_level_[info.level] || !CanBeInferedFromConflictVariables(var)) { diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 43bd817a25..39be5600b4 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -28,7 +29,6 @@ #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" -#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" @@ -39,10 +39,12 @@ #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" +#include "ortools/sat/intervals.h" #include "ortools/sat/linear_constraint.h" #include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_base.h" +#include "ortools/sat/sat_solver.h" #include "ortools/sat/scheduling_helpers.h" #include "ortools/sat/util.h" #include "ortools/util/sorted_interval_list.h" @@ -56,71 +58,101 @@ namespace { // Minimum amount of violation of the cut constraint by the solution. This // is needed to avoid numerical issues and adding cuts with minor effect. const double kMinCutViolation = 1e-4; -} // namespace -BaseEvent::BaseEvent(int t, SchedulingConstraintHelper* x_helper) - : x_start_min(x_helper->StartMin(t)), - x_start_max(x_helper->StartMax(t)), - x_end_min(x_helper->EndMin(t)), - x_end_max(x_helper->EndMax(t)), - x_size_min(x_helper->SizeMin(t)), - x_size_max(x_helper->SizeMax(t)) {} +// Checks that the literals of the decomposed energy (if present) and the size +// and demand of a cumulative task are in sync. +// In some rare instances, this is not the case. In that case, it is better not +// to try to generate cuts. +bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t, + SchedulingConstraintHelper* helper, + SchedulingDemandHelper* demands_helper) { + const std::vector& decomposed_energy = + demands_helper->DecomposedEnergies()[t]; + if (decomposed_energy.empty()) return true; -void BaseEvent::PropagateDecomposedEnergy( - const VariablesAssignment& assignment) { - if (decomposed_energy.empty()) return; - - IntegerValue new_x_size_min = kMaxIntegerValue; - IntegerValue new_x_size_max = kMinIntegerValue; - IntegerValue new_y_size_min = kMaxIntegerValue; - IntegerValue new_y_size_max = kMinIntegerValue; - IntegerValue new_energy_min = kMaxIntegerValue; - int new_size = 0; + // Checks the propagation of the exactly_one constraint on literals. + int num_false_literals = 0; + int num_true_literals = 0; for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) { - // Filter out false literals and out of bounds values. - if (assignment.LiteralIsFalse(lit) || fixed_size < x_size_min || - fixed_size > x_size_max || fixed_demand < y_size_min || - fixed_demand > y_size_max) { - continue; - } + if (assignment.LiteralIsFalse(lit)) ++num_false_literals; + if (assignment.LiteralIsTrue(lit)) ++num_true_literals; + } + if (num_true_literals > 1) return false; + if (num_true_literals == 1 && + num_false_literals < decomposed_energy.size() - 1) { + return false; + } + if (num_false_literals == decomposed_energy.size()) return false; + + // Checks the propagations of the bounds of the size and the demand. + IntegerValue propagated_size_min = kMaxIntegerValue; + IntegerValue propagated_size_max = kMinIntegerValue; + IntegerValue propagated_demand_min = kMaxIntegerValue; + IntegerValue propagated_demand_max = kMinIntegerValue; + for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) { + if (assignment.LiteralIsFalse(lit)) continue; if (assignment.LiteralIsTrue(lit)) { - new_x_size_min = fixed_size; - new_x_size_max = fixed_size; - new_y_size_min = fixed_demand; - new_y_size_max = fixed_demand; - new_energy_min = fixed_size * fixed_demand; - decomposed_energy.clear(); - decomposed_energy.push_back({lit, fixed_size, fixed_demand}); - new_size = 1; - break; + if (fixed_size != helper->SizeMin(t) || + fixed_size != helper->SizeMax(t) || + fixed_demand != demands_helper->DemandMin(t) || + fixed_demand != demands_helper->DemandMax(t)) { + return false; + } + return true; } - new_x_size_min = std::min(new_x_size_min, fixed_size); - new_x_size_max = std::max(new_x_size_max, fixed_size); - new_y_size_min = std::min(new_y_size_min, fixed_demand); - new_y_size_max = std::max(new_y_size_max, fixed_demand); - new_energy_min = std::min(new_energy_min, fixed_size * fixed_demand); - decomposed_energy[new_size++] = {lit, fixed_size, fixed_demand}; + if (fixed_size < helper->SizeMin(t) || fixed_size > helper->SizeMax(t) || + fixed_demand < demands_helper->DemandMin(t) || + fixed_demand > demands_helper->DemandMax(t)) { + return false; + } + propagated_size_min = std::min(propagated_size_min, fixed_size); + propagated_size_max = std::max(propagated_size_max, fixed_size); + propagated_demand_min = std::min(propagated_demand_min, fixed_demand); + propagated_demand_max = std::max(propagated_demand_max, fixed_demand); } - decomposed_energy.resize(new_size); - CHECK(!decomposed_energy.empty()); - // Update the event. - x_size_min = new_x_size_min; - x_size_max = new_x_size_max; - y_size_min = new_y_size_min; - y_size_max = new_y_size_max; - energy_min = new_energy_min; - use_energy = energy_min > x_size_min * y_size_min; + if (propagated_size_min != helper->SizeMin(t) || + propagated_size_max != helper->SizeMax(t) || + propagated_demand_min != demands_helper->DemandMin(t) || + propagated_demand_max != demands_helper->DemandMax(t)) { + return false; + } + + return true; } -struct EnergyEvent : BaseEvent { +} // namespace + +struct EnergyEvent { EnergyEvent(int t, SchedulingConstraintHelper* x_helper) - : BaseEvent(t, x_helper) {} + : start_min(x_helper->StartMin(t)), + start_max(x_helper->StartMax(t)), + end_min(x_helper->EndMin(t)), + end_max(x_helper->EndMax(t)), + size_min(x_helper->SizeMin(t)) {} + + // Cache of the bounds of the interval + IntegerValue start_min; + IntegerValue start_max; + IntegerValue end_min; + IntegerValue end_max; + IntegerValue size_min; + + // Cache of the bounds of the demand. + IntegerValue demand_min; + + // The energy min of this event. + IntegerValue energy_min; + + // If non empty, a decomposed view of the energy of this event. + // First value in each pair is size, second is demand. + std::vector decomposed_energy; + bool use_energy = false; // We need this for linearizing the energy in some cases. - AffineExpression y_size; + AffineExpression demand; // If set, this event is optional and its presence is controlled by this. LiteralIndex presence_literal_index = kNoLiteralIndex; @@ -143,15 +175,15 @@ struct EnergyEvent : BaseEvent { // Computes the mandatory minimal overlap of the interval with the time window // [start, end]. IntegerValue GetMinOverlap(IntegerValue start, IntegerValue end) const { - return std::max(std::min({x_end_min - start, end - x_start_max, x_size_min, - end - start}), - IntegerValue(0)); + return std::max( + std::min({end_min - start, end - start_max, size_min, end - start}), + IntegerValue(0)); } // This method expects all the other fields to have been filled before. // It must be called before the EnergyEvent is used. ABSL_MUST_USE_RESULT bool FillEnergyLp( - AffineExpression x_size, + AffineExpression size, const util_intops::StrongVector& lp_values, Model* model) { LinearConstraintBuilder tmp_energy(model); @@ -159,7 +191,7 @@ struct EnergyEvent : BaseEvent { if (!decomposed_energy.empty()) { if (!tmp_energy.AddDecomposedProduct(decomposed_energy)) return false; } else { - tmp_energy.AddQuadraticLowerBound(x_size, y_size, + tmp_energy.AddQuadraticLowerBound(size, demand, model->GetOrCreate(), &energy_is_quadratic); } @@ -176,15 +208,13 @@ struct EnergyEvent : BaseEvent { std::string DebugString() const { return absl::StrCat( - "EnergyEvent(x_start_min = ", x_start_min.value(), - ", x_start_max = ", x_start_max.value(), - ", x_end_min = ", x_end_min.value(), - ", x_end_max = ", x_end_max.value(), - ", y_size = ", y_size.DebugString(), ", energy = ", + "EnergyEvent(start_min = ", start_min, ", start_max = ", start_max, + ", end_min = ", end_min, ", end_max = ", end_max, + ", demand = ", demand.DebugString(), ", energy = ", decomposed_energy.empty() ? "{}" : absl::StrCat(decomposed_energy.size(), " terms"), - ", presence_literal_index = ", presence_literal_index.value(), ")"); + ", presence_literal_index = ", presence_literal_index, ")"); } }; @@ -195,24 +225,22 @@ namespace { // failed. ABSL_MUST_USE_RESULT bool AddOneEvent( const EnergyEvent& event, IntegerValue window_start, - IntegerValue window_end, LinearConstraintBuilder* cut, + IntegerValue window_end, LinearConstraintBuilder& cut, bool* add_energy_to_name = nullptr, bool* add_quadratic_to_name = nullptr, bool* add_opt_to_name = nullptr, bool* add_lifted_to_name = nullptr) { - DCHECK(cut != nullptr); - - if (event.x_end_min <= window_start || event.x_start_max >= window_end) { + if (event.end_min <= window_start || event.start_max >= window_end) { return true; // Event can move outside the time window. } - if (event.x_start_min >= window_start && event.x_end_max <= window_end) { + if (event.start_min >= window_start && event.end_max <= window_end) { // Event is always contained by the time window. - cut->AddLinearExpression(event.linearized_energy); + cut.AddLinearExpression(event.linearized_energy); if (event.energy_is_quadratic && add_quadratic_to_name != nullptr) { *add_quadratic_to_name = true; } if (add_energy_to_name != nullptr && - event.energy_min > event.x_size_min * event.y_size_min) { + event.energy_min > event.size_min * event.demand_min) { *add_energy_to_name = true; } if (!event.IsPresent() && add_opt_to_name != nullptr) { @@ -227,35 +255,35 @@ ABSL_MUST_USE_RESULT bool AddOneEvent( if (event.IsPresent()) { const std::vector& energy = event.decomposed_energy; if (energy.empty()) { - cut->AddTerm(event.y_size, min_overlap); + cut.AddTerm(event.demand, min_overlap); } else { const IntegerValue window_size = window_end - window_start; for (const auto [lit, fixed_size, fixed_demand] : energy) { const IntegerValue alt_end_min = - std::max(event.x_end_min, event.x_start_min + fixed_size); + std::max(event.end_min, event.start_min + fixed_size); const IntegerValue alt_start_max = - std::min(event.x_start_max, event.x_end_max - fixed_size); + std::min(event.start_max, event.end_max - fixed_size); const IntegerValue energy_min = fixed_demand * std::min({alt_end_min - window_start, window_end - alt_start_max, fixed_size, window_size}); if (energy_min == 0) continue; - if (!cut->AddLiteralTerm(lit, energy_min)) return false; + if (!cut.AddLiteralTerm(lit, energy_min)) return false; } if (add_energy_to_name != nullptr) *add_energy_to_name = true; } } else { if (add_opt_to_name != nullptr) *add_opt_to_name = true; const IntegerValue min_energy = ComputeEnergyMinInWindow( - event.x_start_min, event.x_start_max, event.x_end_min, - event.x_end_max, event.x_size_min, event.y_size_min, - event.decomposed_energy, window_start, window_end); - if (min_energy > event.x_size_min * event.y_size_min && + event.start_min, event.start_max, event.end_min, event.end_max, + event.size_min, event.demand_min, event.decomposed_energy, + window_start, window_end); + if (min_energy > event.size_min * event.demand_min && add_energy_to_name != nullptr) { *add_energy_to_name = true; } - if (!cut->AddLiteralTerm(Literal(event.presence_literal_index), - min_energy)) { + if (!cut.AddLiteralTerm(Literal(event.presence_literal_index), + min_energy)) { return false; } } @@ -270,17 +298,17 @@ std::vector FindPossibleDemands(const EnergyEvent& event, IntegerTrail* integer_trail) { std::vector possible_demands; if (event.decomposed_energy.empty()) { - if (integer_trail->IsFixed(event.y_size)) { + if (integer_trail->IsFixed(event.demand)) { possible_demands.push_back( - integer_trail->FixedValue(event.y_size).value()); + integer_trail->FixedValue(event.demand).value()); } else { - if (integer_trail->InitialVariableDomain(event.y_size.var).Size() > + if (integer_trail->InitialVariableDomain(event.demand.var).Size() > 1000000) { return {}; } for (const int64_t var_value : - integer_trail->InitialVariableDomain(event.y_size.var).Values()) { - possible_demands.push_back(event.y_size.ValueAt(var_value).value()); + integer_trail->InitialVariableDomain(event.demand.var).Values()) { + possible_demands.push_back(event.demand.ValueAt(var_value).value()); } } } else { @@ -292,23 +320,6 @@ std::vector FindPossibleDemands(const EnergyEvent& event, return possible_demands; } -// This generates the actual cut and compute its activity vs the -// available_energy_lp. -bool CutIsEfficient( - absl::Span events, IntegerValue window_start, - IntegerValue window_end, double available_energy_lp, - const util_intops::StrongVector& lp_values, - LinearConstraintBuilder* temp_builder) { - temp_builder->Clear(); - for (const EnergyEvent& event : events) { - if (!AddOneEvent(event, window_start, window_end, temp_builder)) { - return false; - } - } - return temp_builder->BuildExpression().LpValue(lp_values) >= - available_energy_lp * (1.0 + kMinCutViolation); -} - } // namespace // This cumulative energetic cut generator will split the cumulative span in 2 @@ -336,22 +347,6 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( const VariablesAssignment& assignment = model->GetOrCreate()->Assignment(); - // Currently, we look at all the possible time windows, and will push all cuts - // in the TopNCuts object. From our observations, this generator creates only - // a few cuts for a given run. - // - // The complexity of this loop is n^3. if we follow the latest research, we - // could implement this in n log^2(n). Still, this is not visible in the - // profile as we only this method at the root node, - struct OverloadedTimeWindowWithMakespan { - IntegerValue start; - IntegerValue end; - IntegerValue fixed_energy_rhs; // Can be complemented by the makespan. - bool use_makespan = false; - bool use_subset_sum = false; - }; - - std::vector overloaded_time_windows; // Compute relevant time points. // TODO(user): We could reduce this set. // TODO(user): we can compute the max usage between makespan_min and @@ -363,20 +358,20 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( IntegerValue max_end_max = kMinIntegerValue; // Used as a sentinel. for (int i = 0; i < events.size(); ++i) { const EnergyEvent& event = events[i]; - if (event.x_start_min < makespan_min) { - time_points.push_back(event.x_start_min); + if (event.start_min < makespan_min) { + time_points.push_back(event.start_min); } - if (event.x_start_max < makespan_min) { - time_points.push_back(event.x_start_max); + if (event.start_max < makespan_min) { + time_points.push_back(event.start_max); } - if (event.x_end_min < makespan_min) { - time_points.push_back(event.x_end_min); + if (event.end_min < makespan_min) { + time_points.push_back(event.end_min); } - if (event.x_end_max < makespan_min) { - time_points.push_back(event.x_end_max); + if (event.end_max < makespan_min) { + time_points.push_back(event.end_max); } - max_end_min = std::max(max_end_min, event.x_end_min); - max_end_max = std::max(max_end_max, event.x_end_max); + max_end_min = std::max(max_end_min, event.end_min); + max_end_max = std::max(max_end_max, event.end_max); possible_demands[i] = FindPossibleDemands(event, assignment, integer_trail); } time_points.push_back(makespan_min); @@ -393,7 +388,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( reachable_capacity_subset_sum.Reset(capacity.value()); for (int i = 0; i < events.size(); ++i) { const EnergyEvent& event = events[i]; - if (event.x_start_min >= window_end || event.x_end_max <= window_start) { + if (event.start_min >= window_end || event.end_max <= window_start) { continue; } if (possible_demands[i].empty()) { // Number of values was too large. @@ -412,6 +407,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( const double makespan_lp = makespan.LpValue(lp_values); const double makespan_min_lp = ToDouble(makespan_min); LinearConstraintBuilder temp_builder(model); + TopNCuts top_n_cuts(5); for (int i = 0; i + 1 < num_time_points; ++i) { // Checks the time limit if the problem is too big. if (events.size() > 50 && time_limit->LimitReached()) return; @@ -462,68 +458,55 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( // // We reuse the min cut violation to allow some slack in the comparison // between the two computed energy values. + bool cut_generated = true; + bool add_opt_to_name = false; + bool add_lifted_to_name = false; + bool add_quadratic_to_name = false; + bool add_energy_to_name = false; + + temp_builder.Clear(); const bool use_makespan = max_energy_up_to_makespan_lp <= ToDouble(cumulated_max_energy) + kMinCutViolation; - const double available_energy_lp = use_makespan - ? max_energy_up_to_makespan_lp - : ToDouble(cumulated_max_energy); - if (CutIsEfficient(events, window_start, window_end, available_energy_lp, - lp_values, &temp_builder)) { - OverloadedTimeWindowWithMakespan w; - w.start = window_start; - w.end = window_end; - w.fixed_energy_rhs = use_makespan - ? cumulated_max_energy_before_makespan_min - : cumulated_max_energy; - w.use_makespan = use_makespan; - w.use_subset_sum = - use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum; - overloaded_time_windows.push_back(std::move(w)); + const bool use_subset_sum_in_cut = + use_makespan ? use_subset_sum_before_makespan_min : use_subset_sum; + + if (use_makespan) { // Add the energy from makespan_min to makespan. + temp_builder.AddConstant(makespan_min * capacity); + temp_builder.AddTerm(makespan, -capacity); } - } - } - if (overloaded_time_windows.empty()) return; - - VLOG(3) << "GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity: " - << events.size() << " events, " << time_points.size() - << " time points, " << overloaded_time_windows.size() - << " overloads detected"; - - TopNCuts top_n_cuts(5); - for (const auto& w : overloaded_time_windows) { - bool cut_generated = true; - bool add_opt_to_name = false; - bool add_lifted_to_name = false; - bool add_quadratic_to_name = false; - bool add_energy_to_name = false; - LinearConstraintBuilder cut(model, kMinIntegerValue, w.fixed_energy_rhs); - - if (w.use_makespan) { // Add the energy from makespan_min to makespan. - cut.AddConstant(makespan_min * capacity); - cut.AddTerm(makespan, -capacity); - } - - // Add contributions from all events. - for (const EnergyEvent& event : events) { - if (!AddOneEvent(event, w.start, w.end, &cut, &add_energy_to_name, - &add_quadratic_to_name, &add_opt_to_name, - &add_lifted_to_name)) { - cut_generated = false; - break; // Exit the event loop. + // Add contributions from all events. + for (const EnergyEvent& event : events) { + if (!AddOneEvent(event, window_start, window_end, temp_builder, + &add_energy_to_name, &add_quadratic_to_name, + &add_opt_to_name, &add_lifted_to_name)) { + cut_generated = false; + break; // Exit the event loop. + } } - } - if (cut_generated) { - std::string full_name(cut_name); - if (add_opt_to_name) full_name.append("_optional"); - if (add_quadratic_to_name) full_name.append("_quadratic"); - if (add_lifted_to_name) full_name.append("_lifted"); - if (add_energy_to_name) full_name.append("_energy"); - if (w.use_makespan) full_name.append("_makespan"); - if (w.use_subset_sum) full_name.append("_subsetsum"); - top_n_cuts.AddCut(cut.Build(), full_name, lp_values); + // We can break here are any further iteration on j will hit the same + // issue. + if (!cut_generated) break; + + // Build the cut to evaluate its efficacy. + const IntegerValue energy_rhs = + use_makespan ? cumulated_max_energy_before_makespan_min + : cumulated_max_energy; + LinearConstraint potential_cut = + temp_builder.BuildConstraint(kMinIntegerValue, energy_rhs); + + if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) { + std::string full_name(cut_name); + if (add_energy_to_name) full_name.append("_energy"); + if (add_lifted_to_name) full_name.append("_lifted"); + if (use_makespan) full_name.append("_makespan"); + if (add_opt_to_name) full_name.append("_optional"); + if (add_quadratic_to_name) full_name.append("_quadratic"); + if (use_subset_sum_in_cut) full_name.append("_subsetsum"); + top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values); + } } } @@ -547,11 +530,6 @@ void GenerateCumulativeEnergeticCuts( // The complexity of this loop is n^3. if we follow the latest research, we // could implement this in n log^2(n). Still, this is not visible in the // profile as we only this method at the root node, - struct OverloadedTimeWindow { - IntegerValue start; - IntegerValue end; - }; - std::vector overloaded_time_windows; const double capacity_lp = capacity.LpValue(lp_values); // Compute relevant time points. @@ -559,17 +537,18 @@ void GenerateCumulativeEnergeticCuts( absl::btree_set time_points_set; IntegerValue max_end_min = kMinIntegerValue; for (const EnergyEvent& event : events) { - time_points_set.insert(event.x_start_min); - time_points_set.insert(event.x_start_max); - time_points_set.insert(event.x_end_min); - time_points_set.insert(event.x_end_max); - max_end_min = std::max(max_end_min, event.x_end_min); + time_points_set.insert(event.start_min); + time_points_set.insert(event.start_max); + time_points_set.insert(event.end_min); + time_points_set.insert(event.end_max); + max_end_min = std::max(max_end_min, event.end_min); } const std::vector time_points(time_points_set.begin(), time_points_set.end()); const int num_time_points = time_points.size(); LinearConstraintBuilder temp_builder(model); + TopNCuts top_n_cuts(5); for (int i = 0; i + 1 < num_time_points; ++i) { // Checks the time limit if the problem is too big. if (events.size() > 50 && time_limit->LimitReached()) return; @@ -583,49 +562,44 @@ void GenerateCumulativeEnergeticCuts( const double available_energy_lp = ToDouble(window_end - window_start) * capacity_lp; if (available_energy_lp >= max_possible_energy_lp) break; - if (CutIsEfficient(events, window_start, window_end, available_energy_lp, - lp_values, &temp_builder)) { - overloaded_time_windows.push_back({window_start, window_end}); + + bool cut_generated = true; + bool add_opt_to_name = false; + bool add_lifted_to_name = false; + bool add_quadratic_to_name = false; + bool add_energy_to_name = false; + temp_builder.Clear(); + + // Compute the max energy available for the tasks. + temp_builder.AddTerm(capacity, window_start - window_end); + + // Add all contributions. + for (const EnergyEvent& event : events) { + if (!AddOneEvent(event, window_start, window_end, temp_builder, + &add_energy_to_name, &add_quadratic_to_name, + &add_opt_to_name, &add_lifted_to_name)) { + cut_generated = false; + break; // Exit the event loop. + } } - } - } - if (overloaded_time_windows.empty()) return; + // We can break here are any further iteration on j will hit the same + // issue. + if (!cut_generated) break; - VLOG(3) << "GenerateCumulativeEnergeticCuts: " << events.size() << " events, " - << time_points.size() << " time points, " - << overloaded_time_windows.size() << " overloads detected"; + // Build the cut to evaluate its efficacy. + LinearConstraint potential_cut = + temp_builder.BuildConstraint(kMinIntegerValue, 0); - TopNCuts top_n_cuts(5); - for (const auto& [window_start, window_end] : overloaded_time_windows) { - bool cut_generated = true; - bool add_opt_to_name = false; - bool add_lifted_to_name = false; - bool add_quadratic_to_name = false; - bool add_energy_to_name = false; - LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0)); - - // Compute the max energy available for the tasks. - cut.AddTerm(capacity, window_start - window_end); - - // Add all contributions. - for (const EnergyEvent& event : events) { - if (!AddOneEvent(event, window_start, window_end, &cut, - &add_energy_to_name, &add_quadratic_to_name, - &add_opt_to_name, &add_lifted_to_name)) { - cut_generated = false; - break; // Exit the event loop. + if (potential_cut.NormalizedViolation(lp_values) >= kMinCutViolation) { + std::string full_name(cut_name); + if (add_energy_to_name) full_name.append("_energy"); + if (add_lifted_to_name) full_name.append("_lifted"); + if (add_opt_to_name) full_name.append("_optional"); + if (add_quadratic_to_name) full_name.append("_quadratic"); + top_n_cuts.AddCut(std::move(potential_cut), full_name, lp_values); } } - - if (cut_generated) { - std::string full_name(cut_name); - if (add_opt_to_name) full_name.append("_optional"); - if (add_quadratic_to_name) full_name.append("_quadratic"); - if (add_lifted_to_name) full_name.append("_lifted"); - if (add_energy_to_name) full_name.append("_energy"); - top_n_cuts.AddCut(cut.Build(), full_name, lp_values); - } } top_n_cuts.TransferToManager(manager); @@ -648,16 +622,17 @@ CutGenerator CreateCumulativeEnergyCutGenerator( gtl::STLSortAndRemoveDuplicates(&result.vars); IntegerTrail* integer_trail = model->GetOrCreate(); TimeLimit* time_limit = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); result.generate_cuts = [makespan, capacity, demands_helper, helper, - integer_trail, time_limit, + integer_trail, time_limit, sat_solver, model](LinearConstraintManager* manager) { if (!helper->SynchronizeAndSetTimeDirection(true)) return false; if (!demands_helper->CacheAllEnergyValues()) return true; - const VariablesAssignment& assignment = - model->GetOrCreate()->Assignment(); const auto& lp_values = manager->LpValues(); + const VariablesAssignment& assignment = sat_solver->Assignment(); + std::vector events; for (int i = 0; i < helper->NumTasks(); ++i) { if (helper->IsAbsent(i)) continue; @@ -665,14 +640,16 @@ CutGenerator CreateCumulativeEnergyCutGenerator( if (demands_helper->DemandMax(i) == 0 || helper->SizeMin(i) == 0) { continue; } + if (!DecomposedEnergyIsPropagated(assignment, i, helper, + demands_helper)) { + return true; // Propagation did not reach a fixed point. Abort. + } EnergyEvent e(i, helper); - e.y_size = demands_helper->Demands()[i]; - e.y_size_min = demands_helper->DemandMin(i); - e.y_size_max = demands_helper->DemandMax(i); + e.demand = demands_helper->Demands()[i]; + e.demand_min = demands_helper->DemandMin(i); e.decomposed_energy = demands_helper->DecomposedEnergies()[i]; e.energy_min = demands_helper->EnergyMin(i); - e.PropagateDecomposedEnergy(assignment); e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i); if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); @@ -723,10 +700,9 @@ CutGenerator CreateNoOverlapEnergyCutGenerator( if (helper->SizeMin(i) == 0) continue; EnergyEvent e(i, helper); - e.y_size = IntegerValue(1); - e.y_size_min = IntegerValue(1); - e.y_size_max = IntegerValue(1); - e.energy_min = e.x_size_min; + e.demand = IntegerValue(1); + e.demand_min = IntegerValue(1); + e.energy_min = e.size_min; if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); } @@ -1070,20 +1046,40 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator( return result; } -CtEvent::CtEvent(int t, SchedulingConstraintHelper* x_helper) - : BaseEvent(t, x_helper) {} +CompletionTimeEvent::CompletionTimeEvent(int t, + SchedulingConstraintHelper* x_helper, + SchedulingDemandHelper* demands_helper) + : task_index(t), + start_min(x_helper->StartMin(t)), + start_max(x_helper->StartMax(t)), + end_min(x_helper->EndMin(t)), + end_max(x_helper->EndMax(t)), + size_min(x_helper->SizeMin(t)), + end(x_helper->Ends()[t]) { + if (demands_helper == nullptr) { + demand_min = 1; + demand_is_fixed = true; + energy_min = size_min; + use_energy = false; + } else { + demand_min = demands_helper->DemandMin(t); + demand_is_fixed = demands_helper->DemandIsFixed(t); + // Default values for energy. Will be updated if decomposed energy is + // not empty. + energy_min = demand_min * size_min; + use_energy = false; + decomposed_energy = demands_helper->DecomposedEnergies()[t]; + } +} -std::string CtEvent::DebugString() const { +std::string CompletionTimeEvent::DebugString() const { return absl::StrCat( - "CtEvent(x_end = ", x_end.DebugString(), - ", x_start_min = ", x_start_min.value(), - ", x_start_max = ", x_start_max.value(), - ", x_size_min = ", x_size_min.value(), - ", x_size_max = ", x_size_max.value(), - ", x_end_min = ", x_end_min.value(), ", x_end_max = ", x_end_max.value(), - ", x_lp_end = ", x_lp_end, ", y_size_min = ", y_size_min.value(), - ", y_size_max = ", y_size_max.value(), - ", energy_min = ", energy_min.value(), ", use_energy = ", use_energy, + "CompletionTimeEvent(task_index = ", task_index, + ", start_min = ", start_min, ", start_max = ", start_max, + ", size_min = ", size_min, ", end = ", end.DebugString(), + ", lp_end = ", lp_end, ", size_min = ", size_min, + " demand_min = ", demand_min, ", demand_is_fixed = ", demand_is_fixed, + ", energy_min = ", energy_min, ", use_energy = ", use_energy, ", lifted = ", lifted, ", decomposed_energy = [", absl::StrJoin(decomposed_energy, ", ", [](std::string* out, const LiteralValueValue& e) { @@ -1094,29 +1090,79 @@ std::string CtEvent::DebugString() const { namespace { +bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap( + absl::Span events, + absl::Span permutation, IntegerValue& sum_of_ends, + IntegerValue& sum_of_weighted_ends) { + sum_of_ends = 0; + sum_of_weighted_ends = 0; + IntegerValue end_min_of_previous_task = kMinIntegerValue; + for (const int index : permutation) { + const CompletionTimeEvent& event = events[index]; + const IntegerValue threshold = + std::max(event.start_min, end_min_of_previous_task); + if (event.start_max < threshold) return false; // Infeasible. + end_min_of_previous_task = threshold + event.size_min; + sum_of_ends += end_min_of_previous_task; + sum_of_weighted_ends += event.energy_min * end_min_of_previous_task; + } + return true; +} + // This functions packs all events in a cumulative of capacity 'capacity_max' // following the given permutation. It returns the sum of end mins and the sum -// of end mins weighted by event.weight. +// of end mins weighted by event.energy_min. // -// It ensures that if event_j is after event_i in the permutation, then event_j -// starts exactly at the same time or after event_i. +// It ensures that if event_j is after event_i in the permutation, then +// event_j starts exactly at the same time or after event_i. // // It returns false if one event cannot start before event.start_max. -bool ComputeWeightedSumOfEndMinsForOnePermutation( - absl::Span events, IntegerValue capacity_max, +bool ComputeWeightedSumOfEndMinsOfOnePermutation( + absl::Span events, + absl::Span permutation, IntegerValue capacity_max, IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends, std::vector>& profile, std::vector>& new_profile) { + DCHECK_EQ(permutation.size(), events.size()); + + if (capacity_max == 1) { + return ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap( + events, permutation, sum_of_ends, sum_of_weighted_ends); + } + + // Set default values. sum_of_ends = 0; sum_of_weighted_ends = 0; - // The profile (and new profile) is a set of (time, capa_left) pairs, ordered - // by increasing time and capa_left. + // Is the permutation feasible ? + // ei = events[permutation[i]], ej = events[permutation[j]], i < j + // - start_max(ej) >= start_min(ei) + IntegerValue demand_min_of_previous_task = 0; + IntegerValue start_min_of_previous_task = kMinIntegerValue; + IntegerValue end_min_of_previous_task = kMinIntegerValue; + for (const int index : permutation) { + const CompletionTimeEvent& event = events[index]; + const IntegerValue threshold = + std::max(event.start_min, + (event.demand_min + demand_min_of_previous_task > capacity_max) + ? end_min_of_previous_task + : start_min_of_previous_task); + if (event.start_max < threshold) { + return false; + } + start_min_of_previous_task = threshold; + end_min_of_previous_task = threshold + event.size_min; + demand_min_of_previous_task = event.demand_min; + } + + // The profile (and new profile) is a set of (time, capa_left) pairs, + // ordered by increasing time and capa_left. profile.clear(); profile.emplace_back(kMinIntegerValue, capacity_max); profile.emplace_back(kMaxIntegerValue, capacity_max); IntegerValue start_of_previous_task = kMinIntegerValue; - for (const PermutableEvent& event : events) { + for (const int index : permutation) { + const CompletionTimeEvent& event = events[index]; const IntegerValue start_min = std::max(event.start_min, start_of_previous_task); @@ -1124,39 +1170,43 @@ bool ComputeWeightedSumOfEndMinsForOnePermutation( // Then push until we find a step with enough capacity. int current = 0; while (profile[current + 1].first <= start_min || - profile[current].second < event.demand) { + profile[current].second < event.demand_min) { ++current; } const IntegerValue actual_start = std::max(start_min, profile[current].first); + start_of_previous_task = actual_start; // Compatible with the event.start_max ? - if (actual_start > event.start_max) return false; + if (actual_start > event.start_max) { + return false; + } + + const IntegerValue actual_end = actual_start + event.size_min; - const IntegerValue actual_end = actual_start + event.size; sum_of_ends += actual_end; - sum_of_weighted_ends += event.weight * actual_end; + sum_of_weighted_ends += event.energy_min * actual_end; // No need to update the profile on the last loop. - if (&event == &events.back()) break; + if (event.task_index == events[permutation.back()].task_index) break; // Update the profile. new_profile.clear(); new_profile.push_back( - {actual_start, profile[current].second - event.demand}); + {actual_start, profile[current].second - event.demand_min}); ++current; while (profile[current].first < actual_end) { new_profile.push_back( - {profile[current].first, profile[current].second - event.demand}); + {profile[current].first, profile[current].second - event.demand_min}); ++current; } if (profile[current].first > actual_end) { new_profile.push_back( - {actual_end, new_profile.back().second + event.demand}); + {actual_end, new_profile.back().second + event.demand_min}); } while (current < profile.size()) { new_profile.push_back(profile[current]); @@ -1169,102 +1219,105 @@ bool ComputeWeightedSumOfEndMinsForOnePermutation( } // namespace -bool ComputeMinSumOfWeightedEndMins(std::vector& events, - IntegerValue capacity_max, - IntegerValue& min_sum_of_end_mins, - IntegerValue& min_sum_of_weighted_end_mins, - IntegerValue unweighted_threshold, - IntegerValue weighted_threshold) { +bool ComputeMinSumOfWeightedEndMins( + absl::Span events, IntegerValue capacity_max, + double sum_of_ends_lp, double sum_of_weighted_ends_lp, + IntegerValue& min_sum_of_end_mins, + IntegerValue& min_sum_of_weighted_end_mins) { int num_explored = 0; int num_pruned = 0; min_sum_of_end_mins = kMaxIntegerValue; min_sum_of_weighted_end_mins = kMaxIntegerValue; + bool aborted = false; + const int64_t unweighted_threshold = + static_cast(std::floor(sum_of_ends_lp + kMinCutViolation)); + const int64_t weighted_threshold = static_cast( + std::floor(sum_of_weighted_ends_lp + kMinCutViolation)); - // Reusable storage for ComputeWeightedSumOfEndMinsForOnePermutation(). + // Reusable storage for ComputeWeightedSumOfEndMinsOfOnePermutation(). std::vector> profile; std::vector> new_profile; + std::vector permutation(events.size()); + std::iota(permutation.begin(), permutation.end(), 0); do { IntegerValue sum_of_ends(0); IntegerValue sum_of_weighted_ends(0); - if (ComputeWeightedSumOfEndMinsForOnePermutation( - events, capacity_max, sum_of_ends, sum_of_weighted_ends, profile, - new_profile)) { + if (ComputeWeightedSumOfEndMinsOfOnePermutation( + events, permutation, capacity_max, sum_of_ends, + sum_of_weighted_ends, profile, new_profile)) { min_sum_of_end_mins = std::min(sum_of_ends, min_sum_of_end_mins); min_sum_of_weighted_end_mins = std::min(sum_of_weighted_ends, min_sum_of_weighted_end_mins); num_explored++; if (min_sum_of_end_mins <= unweighted_threshold && min_sum_of_weighted_end_mins <= weighted_threshold) { + aborted = true; break; } } else { num_pruned++; } - } while (std::next_permutation(events.begin(), events.end())); + } while (std::next_permutation(permutation.begin(), permutation.end())); VLOG(3) << "DP: size=" << events.size() << ", explored = " << num_explored - << ", pruned = " << num_pruned + << ", pruned = " << num_pruned << ", aborted = " << aborted << ", min_sum_of_end_mins = " << min_sum_of_end_mins - << ", min_sum_of_weighted_end_mins = " - << min_sum_of_weighted_end_mins; + << ", min_sum_of_weighted_end_mins = " << min_sum_of_weighted_end_mins + << ", unweighted_threshold = " << unweighted_threshold + << ", weighted_threshold = " << weighted_threshold; return num_explored > 0; } // TODO(user): Improve performance // - detect disjoint tasks (no need to crossover to the second part) // - better caching of explored states -void GenerateShortCompletionTimeCutsWithExactBound( - const std::string& cut_name, std::vector events, +ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( + const std::string& cut_name, std::vector events, IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); // Sort by start min to bucketize by start_min. - std::stable_sort( - events.begin(), events.end(), [](const CtEvent& e1, const CtEvent& e2) { - return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < - std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); + std::sort( + events.begin(), events.end(), + [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) { + return std::tie(e1.start_min, e1.energy_min, e1.lp_end, e1.task_index) < + std::tie(e2.start_min, e2.energy_min, e2.lp_end, e2.task_index); }); - std::vector permutable_events; for (int start = 0; start + 1 < events.size(); ++start) { // Skip to the next start_min value. - if (start > 0 && - events[start].x_start_min == events[start - 1].x_start_min) { + if (start > 0 && events[start].start_min == events[start - 1].start_min) { continue; } - const IntegerValue sequence_start_min = events[start].x_start_min; - std::vector residual_tasks(events.begin() + start, events.end()); + const IntegerValue sequence_start_min = events[start].start_min; + std::vector residual_tasks(events.begin() + start, + events.end()); // We look at events that start before sequence_start_min, but are forced - // to cross this time point. In that case, we replace this event by a - // truncated event starting at sequence_start_min. To do this, we reduce - // the size_min, and align the start_min with the sequence_start_min. + // to cross this time point. for (int before = 0; before < start; ++before) { - if (events[before].x_start_min + events[before].x_size_min > + if (events[before].start_min + events[before].size_min > sequence_start_min) { residual_tasks.push_back(events[before]); // Copy. residual_tasks.back().lifted = true; } } - std::stable_sort(residual_tasks.begin(), residual_tasks.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return e1.x_lp_end < e2.x_lp_end; - }); + std::sort(residual_tasks.begin(), residual_tasks.end(), + [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) { + return std::tie(e1.lp_end, e1.task_index) < + std::tie(e2.lp_end, e2.task_index); + }); - IntegerValue sum_of_durations(0); - IntegerValue sum_of_energies(0); double sum_of_ends_lp = 0.0; double sum_of_weighted_ends_lp = 0.0; - IntegerValue sum_of_demands(0); + IntegerValue sum_of_demands = 0; + IntegerValue sum_of_energies = 0; - permutable_events.clear(); for (int i = 0; i < std::min(residual_tasks.size(), 7); ++i) { - const CtEvent& event = residual_tasks[i]; - permutable_events.emplace_back(i, event); - sum_of_ends_lp += event.x_lp_end; - sum_of_weighted_ends_lp += event.x_lp_end * ToDouble(event.y_size_min); - sum_of_demands += event.y_size_min; - sum_of_durations += event.x_size_min; - sum_of_energies += event.x_size_min * event.y_size_min; + const CompletionTimeEvent& event = residual_tasks[i]; + sum_of_ends_lp += event.lp_end; + sum_of_weighted_ends_lp += event.lp_end * ToDouble(event.energy_min); + sum_of_demands += event.demand_min; + sum_of_energies += event.energy_min; // Both cases with 1 or 2 tasks are trivial and independent of the order. // Also, if capacity is not exceeded, pushing all ends left is a valid LP @@ -1273,25 +1326,15 @@ void GenerateShortCompletionTimeCutsWithExactBound( IntegerValue min_sum_of_end_mins = kMaxIntegerValue; IntegerValue min_sum_of_weighted_end_mins = kMaxIntegerValue; - for (int j = 0; j <= i; ++j) { - // We re-index the elements, so we will start enumerating the - // permutation from there. Note that if the previous i caused an abort - // because of the threshold, we might abort right away again! - permutable_events[j].index = j; - } if (!ComputeMinSumOfWeightedEndMins( - permutable_events, capacity_max, min_sum_of_end_mins, - min_sum_of_weighted_end_mins, - /*unweighted_threshold=*/ - std::floor(sum_of_ends_lp + kMinCutViolation), - /*weighted_threshold=*/ - std::floor(sum_of_weighted_ends_lp + kMinCutViolation))) { - break; + absl::MakeSpan(residual_tasks).first(i + 1), capacity_max, + sum_of_ends_lp, sum_of_weighted_ends_lp, min_sum_of_end_mins, + min_sum_of_weighted_end_mins)) { + return false; } const double unweigthed_violation = - (ToDouble(min_sum_of_end_mins) - sum_of_ends_lp) / - ToDouble(sum_of_durations); + (ToDouble(min_sum_of_end_mins) - sum_of_ends_lp) / ToDouble(i + 1); const double weighted_violation = (ToDouble(min_sum_of_weighted_end_mins) - sum_of_weighted_ends_lp) / ToDouble(sum_of_energies); @@ -1303,11 +1346,11 @@ void GenerateShortCompletionTimeCutsWithExactBound( kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { - const CtEvent& event = residual_tasks[j]; + const CompletionTimeEvent& event = residual_tasks[j]; is_lifted |= event.lifted; - cut.AddTerm(event.x_end, IntegerValue(1)); + cut.AddTerm(event.end, IntegerValue(1)); } - std::string full_name = cut_name + "_unweighted"; + std::string full_name = cut_name; if (is_lifted) full_name.append("_lifted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } @@ -1319,57 +1362,68 @@ void GenerateShortCompletionTimeCutsWithExactBound( kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { - const CtEvent& event = residual_tasks[j]; + const CompletionTimeEvent& event = residual_tasks[j]; is_lifted |= event.lifted; - cut.AddTerm(event.x_end, event.y_size_min); + cut.AddTerm(event.end, event.energy_min); } - std::string full_name = cut_name + "_weighted"; + std::string full_name = cut_name; if (is_lifted) full_name.append("_lifted"); + full_name.append("_weighted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } } } top_n_cuts.TransferToManager(manager); + return true; } namespace { // Returns a copy of the event with the start time increased to time. // Energy (min and decomposed) are updated accordingly. -CtEvent CopyAndTrimEventAfter(const CtEvent& old_event, IntegerValue time, - const VariablesAssignment& assignment) { - CHECK_GT(time, old_event.x_start_min); - CHECK_GT(old_event.x_start_min + old_event.x_size_min, time); - CtEvent event = old_event; // Copy. +CompletionTimeEvent CopyAndTrimEventAfter(const CompletionTimeEvent& old_event, + IntegerValue time) { + CHECK_GT(time, old_event.start_min); + CHECK_GT(old_event.start_min + old_event.size_min, time); + CompletionTimeEvent event = old_event; // Copy. event.lifted = true; - // Trim the decomposed energy and compute the energy min in the window. - const IntegerValue shift = time - event.x_start_min; + // Trim the decomposed energy and compute the energy min in the window. + const IntegerValue shift = time - event.start_min; CHECK_GT(shift, IntegerValue(0)); - event.x_size_min -= shift; - event.x_size_max -= shift; - event.energy_min = event.x_size_min * event.y_size_min; + event.size_min -= shift; + event.energy_min = event.size_min * event.demand_min; if (!event.decomposed_energy.empty()) { - // Trim durations - for (auto& [literal, size, demand] : event.decomposed_energy) { - CHECK_GT(size, shift); - size -= shift; + // Trim fixed sizes and recompute the decomposed energy min. + IntegerValue propagated_energy_min = kMaxIntegerValue; + for (auto& [literal, fixed_size, fixed_demand] : event.decomposed_energy) { + CHECK_GT(fixed_size, shift); + fixed_size -= shift; + propagated_energy_min = + std::min(propagated_energy_min, fixed_demand * fixed_size); + } + + DCHECK_GT(propagated_energy_min, 0); + if (propagated_energy_min > event.energy_min) { + event.use_energy = true; + event.energy_min = propagated_energy_min; + } else { + event.use_energy = false; } - event.PropagateDecomposedEnergy(assignment); } - event.x_start_min = time; + event.start_min = time; return event; } // Collects all possible demand values for the event, and adds them to the // subset sum of the reachable capacity of the cumulative constraint. void AddEventDemandsToCapacitySubsetSum( - const CtEvent& event, const VariablesAssignment& assignment, + const CompletionTimeEvent& event, const VariablesAssignment& assignment, IntegerValue capacity_max, std::vector& tmp_possible_demands, MaxBoundedSubsetSum& dp) { if (dp.CurrentMax() != capacity_max) { - if (event.y_size_is_fixed()) { - dp.Add(event.y_size_min.value()); + if (event.demand_is_fixed) { + dp.Add(event.demand_min.value()); } else if (!event.decomposed_energy.empty()) { tmp_possible_demands.clear(); for (const auto& [literal, size, demand] : event.decomposed_energy) { @@ -1381,7 +1435,7 @@ void AddEventDemandsToCapacitySubsetSum( tmp_possible_demands.push_back(demand.value()); } dp.AddChoices(tmp_possible_demands); - } else { // event.y_size_min is not fixed, we abort. + } else { // event.demand_min is not fixed, we abort. // TODO(user): We could still add all events in the range if the range // is small. dp.Add(capacity_max.value()); @@ -1435,11 +1489,10 @@ void AddEventDemandsToCapacitySubsetSum( // - first, we loop on potential start times in increasing order. // - second loop, we add tasks that must contribute after this start time // ordered by increasing end time in the LP relaxation. -void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, - std::vector events, - IntegerValue capacity_max, - bool skip_low_sizes, Model* model, - LinearConstraintManager* manager) { +void GenerateCompletionTimeCutsWithEnergy( + absl::string_view cut_name, std::vector events, + IntegerValue capacity_max, bool skip_low_sizes, Model* model, + LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); const VariablesAssignment& assignment = model->GetOrCreate()->Assignment(); @@ -1447,21 +1500,22 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, // Sort by start min to bucketize by start_min. std::stable_sort( - events.begin(), events.end(), [](const CtEvent& e1, const CtEvent& e2) { - return std::tie(e1.x_start_min, e1.y_size_min, e1.x_lp_end) < - std::tie(e2.x_start_min, e2.y_size_min, e2.x_lp_end); + events.begin(), events.end(), + [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) { + return std::tie(e1.start_min, e1.demand_min, e1.lp_end) < + std::tie(e2.start_min, e2.demand_min, e2.lp_end); }); // First loop: we loop on potential start times. for (int start = 0; start + 1 < events.size(); ++start) { // Skip to the next start_min value. - if (start > 0 && - events[start].x_start_min == events[start - 1].x_start_min) { + if (start > 0 && events[start].start_min == events[start - 1].start_min) { continue; } - const IntegerValue sequence_start_min = events[start].x_start_min; - std::vector residual_tasks(events.begin() + start, events.end()); + const IntegerValue sequence_start_min = events[start].start_min; + std::vector residual_tasks(events.begin() + start, + events.end()); // We look at events that start before sequence_start_min, but are forced // to cross this time point. In that case, we replace this event by a @@ -1469,10 +1523,10 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, // the size_min, align the start_min with the sequence_start_min, and // scale the energy down accordingly. for (int before = 0; before < start; ++before) { - if (events[before].x_start_min + events[before].x_size_min > + if (events[before].start_min + events[before].size_min > sequence_start_min) { - CtEvent event = CopyAndTrimEventAfter(events[before], - sequence_start_min, assignment); + CompletionTimeEvent event = + CopyAndTrimEventAfter(events[before], sequence_start_min); if (event.energy_min <= 0) continue; residual_tasks.push_back(std::move(event)); } @@ -1501,27 +1555,28 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, // We will add tasks one by one, sorted by end time, and evaluate the // potential cut at each step. - std::stable_sort(residual_tasks.begin(), residual_tasks.end(), - [](const CtEvent& e1, const CtEvent& e2) { - return e1.x_lp_end < e2.x_lp_end; - }); + std::stable_sort( + residual_tasks.begin(), residual_tasks.end(), + [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) { + return e1.lp_end < e2.lp_end; + }); // Second loop: we add tasks one by one. for (int i = 0; i < residual_tasks.size(); ++i) { - const CtEvent& event = residual_tasks[i]; - DCHECK_GE(event.x_start_min, sequence_start_min); + const CompletionTimeEvent& event = residual_tasks[i]; + DCHECK_GE(event.start_min, sequence_start_min); // Make sure we do not overflow. if (!AddTo(event.energy_min, &sum_energy)) break; // In the no_overlap case, we have: - // area = event.x_size_min ^ 2 + // area = event.size_min ^ 2 // In the simple cumulative case, we split split the task // (demand_min, size_min) into demand_min tasks in the no_overlap case. - // area = event.y_size_min * event.x_size_min * event.x_size_min + // area = event.demand_min * event.size_min * event.size_min // In the cumulative case, we can have energy_min > side_min * demand_min. // In that case, we use energy_min * size_min. if (event.decomposed_energy.empty()) { - if (!AddProductTo(event.energy_min, event.x_size_min, + if (!AddProductTo(event.energy_min, event.size_min, &sum_event_contributions)) { break; } @@ -1537,30 +1592,16 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, min_shape_area = std::min(min_shape_area, shape_area); } } - if (min_shape_area < event.energy_min * event.x_size_min) { - VLOG(2) << "min_shape_area: " << min_shape_area - << " energy_min: " << event.energy_min - << " x_size_min: " << event.x_size_min - << "simple_min_shape_area: " - << event.energy_min * event.x_size_min; - VLOG(2) << " event = " << event.DebugString(); - } - CHECK_GE(min_shape_area, event.energy_min * event.x_size_min); - - if (!AddTo(min_shape_area, &sum_event_contributions)) break; - if (min_shape_area > event.energy_min * event.x_size_min) { - VLOG(2) << "min_shape_area: " << min_shape_area - << " simple_min_shape_area: " - << event.energy_min * event.x_size_min; - VLOG(2) << " event = " << event.DebugString(); - + CHECK_GE(min_shape_area, event.energy_min * event.size_min); + if (min_shape_area > event.energy_min * event.size_min) { uses_shapes = true; } + if (!AddTo(min_shape_area, &sum_event_contributions)) break; } if (!AddSquareTo(event.energy_min, &sum_square_energy)) break; - lp_contrib += event.x_lp_end * ToDouble(event.energy_min); - current_start_min = std::min(current_start_min, event.x_start_min); + lp_contrib += event.lp_end * ToDouble(event.energy_min); + current_start_min = std::min(current_start_min, event.start_min); // Maintain the reachable capacity with a bounded complexity subset sum. AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max, @@ -1614,16 +1655,16 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, bool is_lifted = false; bool add_energy_to_name = false; for (int i = 0; i <= best_end; ++i) { - const CtEvent& event = residual_tasks[i]; + const CompletionTimeEvent& event = residual_tasks[i]; is_lifted |= event.lifted; add_energy_to_name |= event.use_energy; - cut.AddTerm(event.x_end, event.energy_min); + cut.AddTerm(event.end, event.energy_min); } std::string full_name(cut_name); - if (is_lifted) full_name.append("_lifted"); if (add_energy_to_name) full_name.append("_energy"); - if (best_uses_subset_sum) full_name.append("_subsetsum"); + if (is_lifted) full_name.append("_lifted"); if (best_uses_shapes) full_name.append("_shapes"); + if (best_uses_subset_sum) full_name.append("_subsetsum"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } } @@ -1641,38 +1682,39 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( result.generate_cuts = [helper, model](LinearConstraintManager* manager) { if (!helper->SynchronizeAndSetTimeDirection(true)) return false; - auto generate_cuts = [model, manager, helper](bool mirror) { - std::vector events; + auto generate_cuts = [model, manager, + helper](bool time_is_forward) -> bool { + if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) { + return false; + } + std::vector events; const auto& lp_values = manager->LpValues(); for (int index = 0; index < helper->NumTasks(); ++index) { if (!helper->IsPresent(index)) continue; const IntegerValue size_min = helper->SizeMin(index); if (size_min > 0) { - const AffineExpression end_expr = helper->Ends()[index]; - CtEvent event(index, helper); - event.x_end = end_expr; - event.x_lp_end = end_expr.LpValue(lp_values); - event.y_size_min = IntegerValue(1); - event.y_size_max = IntegerValue(1); - event.energy_min = size_min; + CompletionTimeEvent event(index, helper, nullptr); + event.lp_end = event.end.LpValue(lp_values); events.push_back(event); } } - const std::string mirror_str = mirror ? "_mirror" : ""; - GenerateShortCompletionTimeCutsWithExactBound( - absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), events, - /*capacity_max=*/IntegerValue(1), model, manager); + const std::string mirror_str = time_is_forward ? "" : "_mirror"; + if (!GenerateShortCompletionTimeCutsWithExactBound( + absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), + events, + /*capacity_max=*/IntegerValue(1), model, manager)) { + return false; + } GenerateCompletionTimeCutsWithEnergy( absl::StrCat("NoOverlapCompletionTimeQueyrane", mirror_str), std::move(events), /*capacity_max=*/IntegerValue(1), /*skip_low_sizes=*/true, model, manager); + return true; }; - if (!helper->SynchronizeAndSetTimeDirection(true)) return false; - generate_cuts(false); - if (!helper->SynchronizeAndSetTimeDirection(false)) return false; - generate_cuts(true); + if (!generate_cuts(/*time_is_forward=*/true)) return false; + if (!generate_cuts(/*time_is_forward=*/false)) return false; return true; }; return result; @@ -1690,50 +1732,52 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( gtl::STLSortAndRemoveDuplicates(&result.vars); IntegerTrail* integer_trail = model->GetOrCreate(); + SatSolver* sat_solver = model->GetOrCreate(); result.generate_cuts = [integer_trail, helper, demands_helper, capacity, - model](LinearConstraintManager* manager) { - if (!helper->SynchronizeAndSetTimeDirection(true)) return false; - if (!demands_helper->CacheAllEnergyValues()) return true; + sat_solver, + model](LinearConstraintManager* manager) -> bool { + auto generate_cuts = [integer_trail, sat_solver, model, manager, helper, + demands_helper, + capacity](bool time_is_forward) -> bool { + if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) { + return false; + } + if (!demands_helper->CacheAllEnergyValues()) return true; - auto generate_cuts = [integer_trail, model, manager, helper, demands_helper, - capacity](bool mirror) { - std::vector events; + std::vector events; const auto& lp_values = manager->LpValues(); - const VariablesAssignment& assignment = - model->GetOrCreate()->Assignment(); + const VariablesAssignment& assignment = sat_solver->Assignment(); for (int index = 0; index < helper->NumTasks(); ++index) { if (!helper->IsPresent(index)) continue; + if (!DecomposedEnergyIsPropagated(assignment, index, helper, + demands_helper)) { + return true; // Propagation did not reach a fixed point. Abort. + } if (helper->SizeMin(index) > 0 && demands_helper->DemandMin(index) > 0) { - CtEvent event(index, helper); - event.x_end = helper->Ends()[index]; - event.x_lp_end = event.x_end.LpValue(lp_values); - event.y_size_min = demands_helper->DemandMin(index); - event.y_size_max = demands_helper->DemandMax(index); - event.energy_min = demands_helper->EnergyMin(index); - event.use_energy = - event.energy_min > event.x_size_min * event.y_size_min; - event.decomposed_energy = demands_helper->DecomposedEnergies()[index]; - event.PropagateDecomposedEnergy(assignment); + CompletionTimeEvent event(index, helper, demands_helper); + event.lp_end = event.end.LpValue(lp_values); events.push_back(event); } } const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - const std::string mirror_str = mirror ? "_mirror" : ""; - GenerateShortCompletionTimeCutsWithExactBound( - absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str), - events, capacity_max, model, manager); + const std::string mirror_str = time_is_forward ? "" : "_mirror"; + if (!GenerateShortCompletionTimeCutsWithExactBound( + absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str), + events, capacity_max, model, manager)) { + return false; + } GenerateCompletionTimeCutsWithEnergy( absl::StrCat("CumulativeCompletionTimeQueyrane", mirror_str), std::move(events), capacity_max, /*skip_low_sizes=*/true, model, manager); + return true; }; - if (!helper->SynchronizeAndSetTimeDirection(true)) return false; - generate_cuts(false); - if (!helper->SynchronizeAndSetTimeDirection(false)) return false; - generate_cuts(true); + + if (!generate_cuts(/*time_is_forward=*/true)) return false; + if (!generate_cuts(/*time_is_forward=*/false)) return false; return true; }; return result; diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index e9a6a4be9b..f000d7bdf6 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -18,11 +18,11 @@ #include #include +#include "absl/types/span.h" #include "ortools/sat/cuts.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" -#include "ortools/sat/sat_base.h" #include "ortools/sat/scheduling_helpers.h" namespace operations_research { @@ -100,21 +100,34 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( // Internal methods and data structures, useful for testing. -// Base event type for scheduling cuts. -struct BaseEvent { - BaseEvent(int t, SchedulingConstraintHelper* x_helper); +// Stores the event for a task (interval, demand). +// For a no_overlap constraint, demand is always between 0 and 1. +// For a cumulative constraint, demand must be between 0 and capacity_max. +struct CompletionTimeEvent { + CompletionTimeEvent(int t, SchedulingConstraintHelper* x_helper, + SchedulingDemandHelper* demands_helper); - // Cache of the intervals bound on the x direction. - IntegerValue x_start_min; - IntegerValue x_start_max; - IntegerValue x_end_min; - IntegerValue x_end_max; - IntegerValue x_size_min; - IntegerValue x_size_max; + // The index of the task in the helper. + int task_index; - // Cache of the bounds on the y direction. - IntegerValue y_size_min; - IntegerValue y_size_max; + // Cache of the bounds of the interval. + IntegerValue start_min; + IntegerValue start_max; + IntegerValue end_min; + IntegerValue end_max; + IntegerValue size_min; + + // The lp value of the end of the interval. + AffineExpression end; + double lp_end = 0.0; + + // Cache of the bounds of the demand. + IntegerValue demand_min; + + // If we know that the size on y is fixed, we can use some heuristic to + // compute the maximum subset sums under the capacity and use that instead + // of the full capacity. + bool demand_is_fixed = false; // The energy min of this event. IntegerValue energy_min; @@ -127,24 +140,6 @@ struct BaseEvent { // model. bool use_energy = false; - // If we know that the size on y is fixed, we can use some heuristic to - // compute the maximum subset sums under the capacity and use that instead - // of the full capacity. - bool y_size_is_fixed() const { return y_size_min == y_size_max; } - void PropagateDecomposedEnergy(const VariablesAssignment& assignment); -}; - -// Stores the event for a rectangle along the two axis x and y. -// For a no_overlap constraint, y is always of size 1 between 0 and 1. -// For a cumulative constraint, y is the demand that must be between 0 and -// capacity_max. -struct CtEvent : BaseEvent { - CtEvent(int t, SchedulingConstraintHelper* x_helper); - - // The lp value of the end of the x interval. - AffineExpression x_end; - double x_lp_end; - // Indicates if the cut is lifted, that is if it includes tasks that are not // strictly contained in the current time window. bool lifted = false; @@ -160,30 +155,11 @@ struct CtEvent : BaseEvent { // small, like <= 10. They should also starts in index order. // // Optim: If both sums are proven <= to the corresponding threshold, we abort. -struct PermutableEvent { - PermutableEvent(int i, CtEvent e) - : index(i), - start_min(e.x_start_min), - start_max(e.x_start_max), - size(e.x_size_min), - demand(e.y_size_min), - weight(e.y_size_min) {} - - bool operator<(const PermutableEvent& o) const { return index < o.index; } - - int index; // for < to be used by std::next_permutation(). - IntegerValue start_min; - IntegerValue start_max; - IntegerValue size; - IntegerValue demand; - IntegerValue weight; -}; -bool ComputeMinSumOfWeightedEndMins(std::vector& events, - IntegerValue capacity_max, - IntegerValue& min_sum_of_end_mins, - IntegerValue& min_sum_of_weighted_end_mins, - IntegerValue unweighted_threshold, - IntegerValue weighted_threshold); +bool ComputeMinSumOfWeightedEndMins( + absl::Span events, IntegerValue capacity_max, + double sum_of_ends_lp, double sum_of_weighted_ends_lp, + IntegerValue& min_sum_of_end_mins, + IntegerValue& min_sum_of_weighted_end_mins); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 3d10a23b59..9c24ac0afa 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -15,7 +15,6 @@ #include -#include #include #include #include @@ -37,6 +36,7 @@ #include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_base.h" +#include "ortools/sat/scheduling_helpers.h" #include "ortools/util/strong_integers.h" namespace operations_research { @@ -398,21 +398,21 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3) { SchedulingConstraintHelper* helper = model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); - CtEvent e1(0, helper); - e1.y_size_min = two; - CtEvent e2(1, helper); - e2.y_size_min = one; - CtEvent e3(2, helper); - e3.y_size_min = one; - std::vector events = {{0, e1}, {1, e2}, {1, e3}}; + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({two, one, one}, helper, &model); + model.TakeOwnership(demands_helper); + CompletionTimeEvent e1(0, helper, demands_helper); + CompletionTimeEvent e2(1, helper, demands_helper); + CompletionTimeEvent e3(2, helper, demands_helper); + const std::vector events = {e1, e2, e3}; - IntegerValue min_sum_of_end_mins(0); - IntegerValue min_sum_of_weighted_end_mins(0); - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( - events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, - kMinIntegerValue, kMinIntegerValue)); + IntegerValue min_sum_of_end_mins = 0; + IntegerValue min_sum_of_weighted_end_mins = 0; + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, + min_sum_of_end_mins, + min_sum_of_weighted_end_mins)); EXPECT_EQ(min_sum_of_end_mins, 17); - EXPECT_EQ(min_sum_of_weighted_end_mins, 21); + EXPECT_EQ(min_sum_of_weighted_end_mins, 86); } TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { @@ -442,21 +442,22 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { SchedulingConstraintHelper* helper = model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); - CtEvent e1(0, helper); - e1.y_size_min = two; - CtEvent e2(1, helper); - e2.y_size_min = one; - CtEvent e3(2, helper); - e3.y_size_min = one; - std::vector events = {{0, e1}, {1, e2}, {2, e3}}; + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({two, one, one}, helper, &model); + model.TakeOwnership(demands_helper); - IntegerValue min_sum_of_end_mins(0); - IntegerValue min_sum_of_weighted_end_mins(0); - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( - events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, - kMinIntegerValue, kMinIntegerValue)); + CompletionTimeEvent e1(0, helper, demands_helper); + CompletionTimeEvent e2(1, helper, demands_helper); + CompletionTimeEvent e3(2, helper, demands_helper); + const std::vector events = {e1, e2, e3}; + + IntegerValue min_sum_of_end_mins = 0; + IntegerValue min_sum_of_weighted_end_mins = 0; + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, + min_sum_of_end_mins, + min_sum_of_weighted_end_mins)); EXPECT_EQ(min_sum_of_end_mins, 18); - EXPECT_EQ(min_sum_of_weighted_end_mins, 21); + EXPECT_EQ(min_sum_of_weighted_end_mins, 86); } TEST(ComputeMinSumOfEndMinsTest, Infeasible) { @@ -486,19 +487,20 @@ TEST(ComputeMinSumOfEndMinsTest, Infeasible) { SchedulingConstraintHelper* helper = model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); - CtEvent e1(0, helper); - e1.y_size_min = two; - CtEvent e2(1, helper); - e2.y_size_min = one; - CtEvent e3(2, helper); - e3.y_size_min = one; - std::vector events = {{0, e1}, {1, e2}, {2, e3}}; + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({two, one, one}, helper, &model); + model.TakeOwnership(demands_helper); - IntegerValue min_sum_of_end_mins(0); - IntegerValue min_sum_of_weighted_end_mins(0); - ASSERT_FALSE(ComputeMinSumOfWeightedEndMins( - events, two, min_sum_of_end_mins, min_sum_of_weighted_end_mins, - kMinIntegerValue, kMinIntegerValue)); + CompletionTimeEvent e1(0, helper, demands_helper); + CompletionTimeEvent e2(1, helper, demands_helper); + CompletionTimeEvent e3(2, helper, demands_helper); + const std::vector events = {e1, e2, e3}; + + IntegerValue min_sum_of_end_mins = 0; + IntegerValue min_sum_of_weighted_end_mins = 0; + ASSERT_FALSE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, + min_sum_of_end_mins, + min_sum_of_weighted_end_mins)); } int64_t ExactMakespan(absl::Span sizes, std::vector& demands, @@ -539,18 +541,25 @@ int64_t ExactMakespanBruteForce(absl::Span sizes, SchedulingConstraintHelper* helper = model.GetOrCreate()->GetOrCreateHelper(intervals); - std::vector events; + std::vector demands_expr; for (int i = 0; i < demands.size(); ++i) { - CtEvent e(i, helper); - e.y_size_min = demands[i]; - events.emplace_back(i, e); + demands_expr.push_back(AffineExpression(demands[i])); + } + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper(demands_expr, helper, &model); + model.TakeOwnership(demands_helper); + + std::vector events; + for (int i = 0; i < demands.size(); ++i) { + CompletionTimeEvent e(i, helper, demands_helper); + events.push_back(e); } - IntegerValue min_sum_of_end_mins(0); - IntegerValue min_sum_of_weighted_end_mins(0); - EXPECT_TRUE(ComputeMinSumOfWeightedEndMins( - events, IntegerValue(capacity), min_sum_of_end_mins, - min_sum_of_weighted_end_mins, kMinIntegerValue, kMinIntegerValue)); + IntegerValue min_sum_of_end_mins = 0; + IntegerValue min_sum_of_weighted_end_mins = 0; + EXPECT_TRUE(ComputeMinSumOfWeightedEndMins(events, capacity, 0.01, 0.01, + min_sum_of_end_mins, + min_sum_of_weighted_end_mins)); return min_sum_of_end_mins.value(); } diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index 9b10e4c91b..bc596380ec 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -21,11 +21,9 @@ #include #include "absl/log/check.h" -#include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" -#include "ortools/base/strong_vector.h" #include "ortools/sat/implied_bounds.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" diff --git a/ortools/sat/simplification.h b/ortools/sat/simplification.h index ab7b43685b..75627dd2f1 100644 --- a/ortools/sat/simplification.h +++ b/ortools/sat/simplification.h @@ -86,14 +86,15 @@ class SatPostsolver { int NumClauses() const { return clauses_start_.size(); } std::vector Clause(int i) const { // TODO(user): we could avoid the copy here, but because clauses_literals_ - // is a deque, we do need a special return class and cannot juste use + // is a deque, we do need a special return class and cannot just use // absl::Span for instance. - const int begin = clauses_start_[i]; - const int end = i + 1 < clauses_start_.size() ? clauses_start_[i + 1] - : clauses_literals_.size(); + const int64_t begin = clauses_start_[i]; + const int64_t end = i + 1 < clauses_start_.size() + ? clauses_start_[i + 1] + : clauses_literals_.size(); std::vector result(clauses_literals_.begin() + begin, clauses_literals_.begin() + end); - for (int j = 0; j < result.size(); ++j) { + for (int64_t j = 0; j < result.size(); ++j) { if (result[j] == associated_literal_[i]) { std::swap(result[0], result[j]); break; @@ -118,7 +119,7 @@ class SatPostsolver { // Stores the arguments of the Add() calls: clauses_start_[i] is the index of // the first literal of the clause #i in the clauses_literals_ deque. - std::vector clauses_start_; + std::vector clauses_start_; std::deque clauses_literals_; std::vector associated_literal_; From 77e5e44746a379014c7d3d86ad813eb9e20d2b5b Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 14 May 2025 13:33:34 +0200 Subject: [PATCH 003/509] minor cleanup --- ortools/graph/BUILD.bazel | 2 +- ortools/graph/iterators_test.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 9c6e2d8532..ce0aeeba3c 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -858,7 +858,7 @@ cc_test( deps = [ ":iterators", "//ortools/base:gmock_main", - "//ortools/base:int_type", + "//ortools/base:strong_int", ], ) diff --git a/ortools/graph/iterators_test.cc b/ortools/graph/iterators_test.cc index 4f469de963..47fba3b100 100644 --- a/ortools/graph/iterators_test.cc +++ b/ortools/graph/iterators_test.cc @@ -18,12 +18,12 @@ #include #include "gtest/gtest.h" -#include "ortools/base/int_type.h" +#include "ortools/base/strong_int.h" namespace util { namespace { -DEFINE_INT_TYPE(TestIndex, int64_t); +DEFINE_STRONG_INT_TYPE(TestIndex, int64_t); #if __cplusplus >= 202002L static_assert(std::random_access_iterator>); From c30fefab04977ab91823a19a7948e56c25fb99e4 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 14 May 2025 16:58:06 +0200 Subject: [PATCH 004/509] fixes --- ortools/graph/hamiltonian_path.h | 46 ++++++++++--------- ortools/sat/BUILD.bazel | 1 + ortools/sat/clause.h | 1 - ortools/sat/cp_model_presolve.cc | 2 +- ortools/sat/primary_variables_test.cc | 2 +- ortools/sat/probing.cc | 5 ++- ortools/sat/sat_inprocessing.cc | 5 ++- ortools/sat/sat_solver.cc | 10 +++++ ortools/sat/sat_solver.h | 8 ++-- ortools/sat/scheduling_cuts.cc | 65 +++++++++++---------------- ortools/sat/scheduling_cuts.h | 2 +- 11 files changed, 77 insertions(+), 70 deletions(-) diff --git a/ortools/graph/hamiltonian_path.h b/ortools/graph/hamiltonian_path.h index dc4b754da9..852a063839 100644 --- a/ortools/graph/hamiltonian_path.h +++ b/ortools/graph/hamiltonian_path.h @@ -21,7 +21,7 @@ // // The Shortest Hamiltonian Path Problem (SHPP) is similar to the Traveling // Salesperson Problem (TSP). -// You have to visit all the cities, starting from a given kOne and you +// You have to visit all the cities, starting from a given one and you // do not need to return to your starting point. With the TSP, you can start // anywhere, but you have to return to your start location. // @@ -41,12 +41,16 @@ // Here is how the algorithm works: // Let us denote the nodes to be visited by their indices 0 .. n - 1 // Let us pick 0 as the starting node. -// Let d(i,j) denote the distance (or cost) from i to j. -// f(S, j) where S is a set of nodes and j is a node in S is defined as follows: +// Let cost(i,j) denote the cost (or distance) to go from i to j. +// f(S, j), where S is a set of nodes and j is a node in S, is defined as the +// total cost of the shortest path from 0 to j going through all nodes of S. +// +// We can prove easily that it satisfy the following relation: // f(S, j) = min (i in S \ {j}, f(S \ {j}, i) + cost(i, j)) // (j is an element of S) +// // Note that this formulation, from the original Held-Karp paper is a bit -// different, but equivalent to the kOne used in Caseau and Laburthe, Solving +// different, but equivalent to the one used in Caseau and Laburthe, Solving // Small TSPs with Constraints, 1997, ICLP // f(S, j) = min (i in S, f(S \ {i}, i) + cost(i, j)) // (j is not an element of S) @@ -73,6 +77,8 @@ // To implement dynamic programming, we store the preceding results of // computing f(S,j) in an array M[Offset(S,j)]. See the comments about // LatticeMemoryManager::BaseOffset() to see how this is computed. +// This is really what brings the performance of the algorithm, because memory +// is accessed in sequential order, without risking to thrash the cache. // // Keywords: Traveling Salesman, Hamiltonian Path, Dynamic Programming, // Held, Karp. @@ -129,9 +135,9 @@ class Set { typedef Integer IntegerType; // Useful constants. - static constexpr Integer kOne = static_cast(1); - static constexpr Integer kZero = static_cast(0); - static const int MaxCardinality = 8 * sizeof(Integer); // NOLINT + static constexpr Integer kOne = Integer{1}; + static constexpr Integer kZero = Integer{0}; + static constexpr int kMaxCardinality = std::numeric_limits::digits; // Construct a set from an Integer. explicit Set(Integer n) : value_(n) { @@ -143,7 +149,7 @@ class Set { Integer value() const { return value_; } static Set FullSet(Integer card) { - return card == 0 ? Set(0) : Set(~kZero >> (MaxCardinality - card)); + return card == 0 ? Set(0) : Set(~kZero >> (kMaxCardinality - card)); } // Returns the singleton set with 'n' as its only element. @@ -355,12 +361,12 @@ class LatticeMemoryManager { template void LatticeMemoryManager::Init(int max_card) { DCHECK_LT(0, max_card); - DCHECK_GE(Set::MaxCardinality, max_card); + DCHECK_LE(max_card, Set::kMaxCardinality); if (max_card <= max_card_) return; max_card_ = max_card; binomial_coefficients_.resize(max_card_ + 1); - // Initialize binomial_coefficients_ using Pascal's triangle recursion. + // Initialize binomial_coefficients_ using Pascal's triangle recurrence for (int n = 0; n <= max_card_; ++n) { binomial_coefficients_[n].resize(n + 2); binomial_coefficients_[n][0] = 1; @@ -418,7 +424,7 @@ uint64_t LatticeMemoryManager::BaseOffset(int card, DCHECK_EQ(card, node_rank); // Note(user): It is possible to get rid of base_offset_[card] by using a 2-D // array. It would also make it possible to free all the memory but the layer - // being constructed and the preceding kOne, if another lattice of paths is + // being constructed and the preceding one, if another lattice of paths is // constructed. // TODO(user): Evaluate the interest of the above. // There are 'card' f(set, j) to store. That is why we need to multiply @@ -465,14 +471,14 @@ class HamiltonianPathSolver { // stored public: // In 2010, 26 was the maximum solvable with 24 Gigs of RAM, and it took - // several minutes. With this 2014 version of the code, kOne may go a little + // several minutes. With this 2014 version of the code, one may go a little // higher, but considering the complexity of the algorithm (n*2^n), and that // there are very good ways to solve TSP with more than 32 cities, // we limit ourselves to 32 cites. // This is why we define the type NodeSet to be 32-bit wide. // TODO(user): remove this limitation by using pruning techniques. - typedef uint32_t Integer; - typedef Set NodeSet; + using Integer = uint32_t; + using NodeSet = Set; explicit HamiltonianPathSolver(CostFunction cost); HamiltonianPathSolver(int num_nodes, CostFunction cost); @@ -553,7 +559,7 @@ class HamiltonianPathSolver { // Returns the cost value between two nodes. CostType Cost(int i, int j) { return cost_(i, j); } - // Does all the Dynamic Progamming iterations. + // Does all the Dynamic Programming iterations. void Solve(); // Computes a path by looking at the information in mem_. @@ -618,7 +624,7 @@ HamiltonianPathSolver::HamiltonianPathSolver( robustness_checked_(false), triangle_inequality_checked_(false), solved_(false) { - CHECK_GE(NodeSet::MaxCardinality, num_nodes_); + CHECK_GE(NodeSet::kMaxCardinality, num_nodes_); CHECK(cost_.Check()); } @@ -636,7 +642,7 @@ void HamiltonianPathSolver::ChangeCostMatrix( solved_ = false; cost_.Reset(cost); num_nodes_ = num_nodes; - CHECK_GE(NodeSet::MaxCardinality, num_nodes_); + CHECK_GE(NodeSet::kMaxCardinality, num_nodes_); CHECK(cost_.Check()); } @@ -701,7 +707,7 @@ void HamiltonianPathSolver::Solve() { const NodeSet full_set = NodeSet::FullSet(num_nodes_); - // Get the cost of the tsp from node 0. It is the path that leaves 0 and goes + // Get the cost of the TSP from node 0. It is the path that leaves 0 and goes // through all other nodes, and returns at 0, with minimal cost. tsp_cost_ = mem_.Value(full_set, 0); tsp_path_ = ComputePath(tsp_cost_, full_set, 0); @@ -710,7 +716,7 @@ void HamiltonianPathSolver::Solve() { hamiltonian_costs_.resize(num_nodes_); // Compute the cost of the Hamiltonian paths starting from node 0, going // through all the other nodes, and ending at end_node. Compute the minimum - // kOne along the way. + // one along the way. CostType min_hamiltonian_cost = std::numeric_limits::max(); const NodeSet hamiltonian_set = full_set.RemoveElement(0); for (int end_node : hamiltonian_set) { @@ -885,7 +891,7 @@ class PruningHamiltonianSolver { // guaranteed to be smaller than or equal to the cost of Hamiltonian path, // because Hamiltonian path is a spanning tree itself. - // TODO(user): Use generic map-based cache instead of lattice-based kOne. + // TODO(user): Use generic map-based cache instead of lattice-based one. // TODO(user): Use SaturatedArithmetic for better precision. public: diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index b1eb60e362..f65627520b 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2934,6 +2934,7 @@ cc_test( "//ortools/util:strong_integers", "@abseil-cpp//absl/base:log_severity", "@abseil-cpp//absl/random", + "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", ], ) diff --git a/ortools/sat/clause.h b/ortools/sat/clause.h index de837c5a26..37ad5178b0 100644 --- a/ortools/sat/clause.h +++ b/ortools/sat/clause.h @@ -904,7 +904,6 @@ class BinaryImplicationGraph : public SatPropagator { // enough for us and we could store in common the inlined/not-inlined size. util_intops::StrongVector> implications_; - int64_t num_implications_ = 0; // Used by RemoveDuplicates() and NotifyPossibleDuplicate(). util_intops::StrongVector might_have_dups_; diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 384d638a08..76db43e093 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -8311,7 +8311,7 @@ bool CpModelPresolver::PresolvePureSatPart() { for (int i = 0; i < sat_solver->LiteralTrail().Index(); ++i) { sat_postsolver.FixVariable(sat_solver->LiteralTrail()[i]); } - sat_solver->ExtractClauses(&sat_presolver); + if (!sat_solver->ExtractClauses(&sat_presolver)) return false; // Run the presolve for a small number of passes. // TODO(user): Add a local time limit? this can be slow on big SAT problem. diff --git a/ortools/sat/primary_variables_test.cc b/ortools/sat/primary_variables_test.cc index 66f8a104b6..05e1cd34da 100644 --- a/ortools/sat/primary_variables_test.cc +++ b/ortools/sat/primary_variables_test.cc @@ -129,7 +129,7 @@ TEST(PrimaryVariablesTest, WithExactlyOne) { ComputeVariableRelationships(model); EXPECT_EQ(relationships.secondary_variables.size(), 1); const ConstraintProto expected = ParseTestProto(R"pb( - exactly_one { literals: [ 0, 1, 2, 3 ] } + exactly_one { literals: [ 0, 1, 2, 3 ] } )pb"); EXPECT_THAT(relationships.dependency_resolution_constraint, ElementsAre(EqualsProto(expected))); diff --git a/ortools/sat/probing.cc b/ortools/sat/probing.cc index 72704d2caf..e158151b22 100644 --- a/ortools/sat/probing.cc +++ b/ortools/sat/probing.cc @@ -894,8 +894,9 @@ bool FailedLiteralProbingRound(ProbingOptions options, Model* model) { time_limit->GetElapsedDeterministicTime() > limit; LOG_IF(INFO, options.log_info) << "Probing. " - << " num_probed: " << num_probed << " num_fixed: +" << num_newly_fixed - << " (" << num_fixed << "/" << num_variables << ")" + << " num_probed: " << num_probed << "/" << probing_order.size() + << " num_fixed: +" << num_newly_fixed << " (" << num_fixed << "/" + << num_variables << ")" << " explicit_fix:" << num_explicit_fix << " num_conflicts:" << num_conflicts << " new_binary_clauses: " << num_new_binary diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index 796e7ae4aa..4d17f1400c 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -156,9 +156,12 @@ bool Inprocessing::PresolveLoop(SatPresolveOptions options) { break; } - // TODO(user): Maintain the total number of literals in the watched clauses. + // Tricky: It is important to clean-up any potential equivalence left in + // case we aborted early due to the limit. + RETURN_IF_FALSE(RemoveFixedAndEquivalentVariables(log_round_info)); if (!LevelZeroPropagate()) return false; + // TODO(user): Maintain the total number of literals in the watched clauses. SOLVER_LOG( logger_, "[Pure SAT presolve]", " num_fixed: ", trail_->Index(), " num_redundant: ", implication_graph_->num_redundant_literals() / 2, "/", diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index e457281736..ef554832d4 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -1850,6 +1850,7 @@ void SatSolver::ProcessNewlyFixedVariables() { // We remove the clauses that are always true and the fixed literals from the // others. Note that none of the clause should be all false because we should // have detected a conflict before this is called. + const int saved_index = trail_->Index(); for (SatClause* clause : clauses_propagator_->AllClausesInCreationOrder()) { if (clause->IsRemoved()) continue; @@ -1877,6 +1878,15 @@ void SatSolver::ProcessNewlyFixedVariables() { AddBinaryClauseInternal(clause->FirstLiteral(), clause->SecondLiteral()); clauses_propagator_->LazyDetach(clause); ++num_binary; + + // Tricky: AddBinaryClauseInternal() might fix literal if there is some + // unprocessed equivalent literal, and the binary clause turn out to be + // unary. This shouldn't happen otherwise the logic of + // RemoveFixedLiteralsAndTestIfTrue() might fail. + // + // TODO(user): This still happen in SAT22.Carry_Save_Fast_1.cnf.cnf.xz, + // it might not directly lead to a bug, but should still be fixed. + DCHECK_EQ(trail_->Index(), saved_index); continue; } } diff --git a/ortools/sat/sat_solver.h b/ortools/sat/sat_solver.h index 170502c936..e0032bb699 100644 --- a/ortools/sat/sat_solver.h +++ b/ortools/sat/sat_solver.h @@ -345,10 +345,8 @@ class SatSolver { // // TODO(user): also copy the removable clauses? template - void ExtractClauses(Output* out) { - CHECK(!ModelIsUnsat()); - Backtrack(0); - if (!FinishPropagation()) return; + bool ExtractClauses(Output* out) { + if (!ResetToLevelZero()) return false; // It is important to process the newly fixed variables, so they are not // present in the clauses we export. @@ -366,6 +364,8 @@ class SatSolver { out->AddClause(clause->AsSpan()); } } + + return true; } // Functions to manage the set of learned binary clauses. diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 39be5600b4..80a5fea030 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -83,6 +83,7 @@ bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t, return false; } if (num_false_literals == decomposed_energy.size()) return false; + if (decomposed_energy.size() == 1 && num_true_literals != 1) return false; // Checks the propagations of the bounds of the size and the demand. IntegerValue propagated_size_min = kMaxIntegerValue; @@ -149,7 +150,7 @@ struct EnergyEvent { // If non empty, a decomposed view of the energy of this event. // First value in each pair is size, second is demand. std::vector decomposed_energy; - bool use_energy = false; + bool use_decomposed_energy = false; // We need this for linearizing the energy in some cases. AffineExpression demand; @@ -649,6 +650,10 @@ CutGenerator CreateCumulativeEnergyCutGenerator( e.demand = demands_helper->Demands()[i]; e.demand_min = demands_helper->DemandMin(i); e.decomposed_energy = demands_helper->DecomposedEnergies()[i]; + if (e.decomposed_energy.size() == 1) { + // We know it was propagated correctly. We can remove this field. + e.decomposed_energy.clear(); + } e.energy_min = demands_helper->EnergyMin(i); e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i); if (!helper->IsPresent(i)) { @@ -743,7 +748,7 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( LinearExpression demand; double demand_lp = 0.0; bool is_positive = false; - bool use_energy = false; + bool use_decomposed_energy_min = false; bool is_optional = false; }; @@ -779,7 +784,8 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( } e1.demand_lp = e1.demand.LpValue(lp_values); e1.is_positive = true; - e1.use_energy = !demands_helper->DecomposedEnergies()[i].empty(); + e1.use_decomposed_energy_min = + !demands_helper->DecomposedEnergies()[i].empty(); e1.is_optional = !helper->IsPresent(i); TimeTableEvent e2 = e1; @@ -816,7 +822,7 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( if (sum_of_demand_lp >= capacity_lp + kMinCutViolation) { // Create cut. - bool use_energy = false; + bool use_decomposed_energy_min = false; bool use_optional = false; LinearConstraintBuilder cut(model, kMinIntegerValue, IntegerValue(0)); cut.AddTerm(capacity, IntegerValue(-1)); @@ -834,13 +840,13 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( } cut.AddLinearExpression(cut_event.demand, IntegerValue(1)); - use_energy |= cut_event.use_energy; + use_decomposed_energy_min |= cut_event.use_decomposed_energy_min; use_optional |= cut_event.is_optional; } std::string cut_name = "CumulativeTimeTable"; if (use_optional) cut_name += "_optional"; - if (use_energy) cut_name += "_energy"; + if (use_decomposed_energy_min) cut_name += "_energy"; top_n_cuts.AddCut(cut.Build(), cut_name, lp_values); } } @@ -1060,15 +1066,19 @@ CompletionTimeEvent::CompletionTimeEvent(int t, demand_min = 1; demand_is_fixed = true; energy_min = size_min; - use_energy = false; + use_decomposed_energy_min = false; } else { demand_min = demands_helper->DemandMin(t); demand_is_fixed = demands_helper->DemandIsFixed(t); // Default values for energy. Will be updated if decomposed energy is // not empty. energy_min = demand_min * size_min; - use_energy = false; + use_decomposed_energy_min = false; decomposed_energy = demands_helper->DecomposedEnergies()[t]; + if (decomposed_energy.size() == 1) { + // We know everything is propagated, we can remove this field. + decomposed_energy.clear(); + } } } @@ -1079,7 +1089,8 @@ std::string CompletionTimeEvent::DebugString() const { ", size_min = ", size_min, ", end = ", end.DebugString(), ", lp_end = ", lp_end, ", size_min = ", size_min, " demand_min = ", demand_min, ", demand_is_fixed = ", demand_is_fixed, - ", energy_min = ", energy_min, ", use_energy = ", use_energy, + ", energy_min = ", energy_min, + ", use_decomposed_energy_min = ", use_decomposed_energy_min, ", lifted = ", lifted, ", decomposed_energy = [", absl::StrJoin(decomposed_energy, ", ", [](std::string* out, const LiteralValueValue& e) { @@ -1405,10 +1416,10 @@ CompletionTimeEvent CopyAndTrimEventAfter(const CompletionTimeEvent& old_event, DCHECK_GT(propagated_energy_min, 0); if (propagated_energy_min > event.energy_min) { - event.use_energy = true; + event.use_decomposed_energy_min = true; event.energy_min = propagated_energy_min; } else { - event.use_energy = false; + event.use_decomposed_energy_min = false; } } event.start_min = time; @@ -1537,7 +1548,6 @@ void GenerateCompletionTimeCutsWithEnergy( double best_efficacy = 0.01; IntegerValue best_min_contrib = 0; bool best_uses_subset_sum = false; - bool best_uses_shapes = false; // Used in the first term of the rhs of the equation. IntegerValue sum_event_contributions = 0; @@ -1545,8 +1555,6 @@ void GenerateCompletionTimeCutsWithEnergy( IntegerValue sum_energy = 0; // For normalization. IntegerValue sum_square_energy = 0; - // Does the cut uses shapes when computing individual event contributions. - bool uses_shapes = false; double lp_contrib = 0.0; IntegerValue current_start_min(kMaxIntegerValue); @@ -1575,28 +1583,9 @@ void GenerateCompletionTimeCutsWithEnergy( // area = event.demand_min * event.size_min * event.size_min // In the cumulative case, we can have energy_min > side_min * demand_min. // In that case, we use energy_min * size_min. - if (event.decomposed_energy.empty()) { - if (!AddProductTo(event.energy_min, event.size_min, - &sum_event_contributions)) { - break; - } - } else { - IntegerValue min_shape_area = kMaxIntegerValue; - for (const auto& [literal, size, demand] : event.decomposed_energy) { - IntegerValue shape_area = CapProdI(CapProdI(size, size), demand); - if (assignment.LiteralIsFalse(literal)) continue; - if (assignment.LiteralIsTrue(literal)) { - min_shape_area = shape_area; - break; - } else { - min_shape_area = std::min(min_shape_area, shape_area); - } - } - CHECK_GE(min_shape_area, event.energy_min * event.size_min); - if (min_shape_area > event.energy_min * event.size_min) { - uses_shapes = true; - } - if (!AddTo(min_shape_area, &sum_event_contributions)) break; + if (!AddProductTo(event.energy_min, event.size_min, + &sum_event_contributions)) { + break; } if (!AddSquareTo(event.energy_min, &sum_square_energy)) break; @@ -1644,7 +1633,6 @@ void GenerateCompletionTimeCutsWithEnergy( best_end = i; best_min_contrib = min_contrib; best_uses_subset_sum = reachable_capacity < capacity_max; - best_uses_shapes = uses_shapes; } } @@ -1657,13 +1645,12 @@ void GenerateCompletionTimeCutsWithEnergy( for (int i = 0; i <= best_end; ++i) { const CompletionTimeEvent& event = residual_tasks[i]; is_lifted |= event.lifted; - add_energy_to_name |= event.use_energy; + add_energy_to_name |= event.use_decomposed_energy_min; cut.AddTerm(event.end, event.energy_min); } std::string full_name(cut_name); if (add_energy_to_name) full_name.append("_energy"); if (is_lifted) full_name.append("_lifted"); - if (best_uses_shapes) full_name.append("_shapes"); if (best_uses_subset_sum) full_name.append("_subsetsum"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index f000d7bdf6..53d27f1061 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -138,7 +138,7 @@ struct CompletionTimeEvent { // Indicates if the events used the optional energy information from the // model. - bool use_energy = false; + bool use_decomposed_energy_min = false; // Indicates if the cut is lifted, that is if it includes tasks that are not // strictly contained in the current time window. From 709fe9324a18262df702dae3db5fee911eb22a06 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 14 May 2025 17:08:20 +0200 Subject: [PATCH 005/509] [CP-SAT] fix dtime reporting for lns --- ortools/sat/cp_model_lns.cc | 8 ++++---- ortools/sat/cp_model_lns.h | 5 +++-- ortools/sat/cp_model_solver.cc | 10 +++++++--- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 53bb6da5dd..5281a662a4 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -1247,7 +1247,7 @@ double NeighborhoodGenerator::GetUCBScore(int64_t total_num_calls) const { return current_average_ + sqrt((2 * log(total_num_calls)) / num_calls_); } -double NeighborhoodGenerator::Synchronize() { +absl::Span NeighborhoodGenerator::Synchronize() { absl::MutexLock mutex_lock(&generator_mutex_); // To make the whole update process deterministic, we currently sort the @@ -1258,7 +1258,7 @@ double NeighborhoodGenerator::Synchronize() { int num_fully_solved_in_batch = 0; int num_not_fully_solved_in_batch = 0; - double total_dtime = 0.0; + tmp_dtimes_.clear(); for (const SolveData& data : solve_data_) { ++num_calls_; @@ -1304,7 +1304,7 @@ double NeighborhoodGenerator::Synchronize() { current_average_ = 0.9 * current_average_ + 0.1 * gain_per_time_unit; } - total_dtime += data.deterministic_time; + tmp_dtimes_.push_back(data.deterministic_time); } // Update the difficulty. @@ -1327,7 +1327,7 @@ double NeighborhoodGenerator::Synchronize() { } solve_data_.clear(); - return total_dtime; + return tmp_dtimes_; } std::vector diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index bc151fb217..91b823b4e8 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -475,9 +475,9 @@ class NeighborhoodGenerator { } // Process all the recently added solve data and update this generator - // score and difficulty. This returns the sum of the deterministic time of + // score and difficulty. This returns list of the deterministic time of // each SolveData. - double Synchronize(); + absl::Span Synchronize(); // Returns a short description of the generator. std::string name() const { return name_; } @@ -528,6 +528,7 @@ class NeighborhoodGenerator { private: std::vector solve_data_; + std::vector tmp_dtimes_; // Current parameters to be used when generating/solving a neighborhood with // this generator. Only updated on Synchronize(). diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 680555185a..dcf164ef21 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1721,9 +1721,13 @@ class LnsSolver : public SubSolver { } void Synchronize() override { - const double dtime = generator_->Synchronize(); - AddTaskDeterministicDuration(dtime); - shared_->time_limit->AdvanceDeterministicTime(dtime); + double sum = 0.0; + const absl::Span dtimes = generator_->Synchronize(); + for (const double dtime : dtimes) { + sum += dtime; + AddTaskDeterministicDuration(dtime); + } + shared_->time_limit->AdvanceDeterministicTime(sum); } private: From 9355534e7dd62f13d11d58cfb9cdaa48f152ceec Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 16 May 2025 14:13:06 +0200 Subject: [PATCH 006/509] algorithms: export from google3 --- ortools/algorithms/BUILD.bazel | 6 +- ortools/algorithms/radix_sort.h | 135 ++++++++++++++++++------- ortools/algorithms/radix_sort_test.cc | 138 +++++++++++++++++++------- ortools/base/dump_vars.h | 5 +- 4 files changed, 208 insertions(+), 76 deletions(-) diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index 1d391a77d2..2d520cb7da 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -97,6 +97,7 @@ cc_library( deps = [ "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/base", + "@abseil-cpp//absl/base:log_severity", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/numeric:bits", @@ -118,14 +119,13 @@ cc_test( "//ortools/base:dump_vars", "//ortools/base:gmock_main", "//ortools/base:mathutil", - "//ortools/base:timer", "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/log", - "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/numeric:bits", + "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:bit_gen_ref", "@abseil-cpp//absl/random:distributions", - "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", "@com_google_benchmark//:benchmark", ], diff --git a/ortools/algorithms/radix_sort.h b/ortools/algorithms/radix_sort.h index 7ed8764b06..419bd930ef 100644 --- a/ortools/algorithms/radix_sort.h +++ b/ortools/algorithms/radix_sort.h @@ -30,9 +30,6 @@ // But the worst-case performance of RadixSort() is much faster than the // worst-case performance of std::sort(). // To be sure, you should benchmark your use case. -// -// TODO: it could be even faster than that when the values are in [0..N) for a -// known value N that's significantly lower than the max integer value. #include #include @@ -45,8 +42,10 @@ #include "absl/algorithm/container.h" #include "absl/base/casts.h" +#include "absl/base/log_severity.h" #include "absl/log/check.h" #include "absl/log/log.h" +#include "absl/numeric/bits.h" #include "absl/types/span.h" namespace operations_research { @@ -54,14 +53,24 @@ namespace operations_research { // Sorts an array of int, double, or other numeric types. Up to ~10x faster than // std::sort() when size ≥ 8k: go/radix-sort-bench. See file-level comment. template -void RadixSort(absl::Span values); +void RadixSort( + absl::Span values, + // ADVANCED USAGE: if you're sorting nonnegative integers, and suspect that + // their values use less bits than their full bit width, you may improve + // performance by setting `num_bits` to a lower value, for example + // NumBitsForZeroTo(max_value). It might even be faster to scan the values + // once just to do that, e.g., RadixSort(values, + // NumBitsForZeroTo(*absl::c_max_element(values))); + int num_bits = sizeof(T) * 8); + +template +int NumBitsForZeroTo(T max_value); // ADVANCED USAGE: For power users who know which radix_width or num_passes // they need, possibly differing from the canonical values used by RadixSort(). template void RadixSortTpl(absl::Span values); -// TODO(user): Support arbitrary types with an int() or other numerical getter. // TODO(user): Support the user providing already-allocated memory buffers // for the radix counts and/or for the temporary vector copy. @@ -240,49 +249,101 @@ void RadixSortTpl(absl::Span values) { } } -// TODO(user): Expose an API that takes the "max value" as argument, for -// users who want to take advantage of that knowledge to reduce the number of -// passes. template -void RadixSort(absl::Span values) { - switch (sizeof(T)) { - case 1: - if (values.size() < 300) { - absl::c_sort(values); +int NumBitsForZeroTo(T max_value) { + if constexpr (!std::is_integral_v) { + return sizeof(T) * 8; + } else { + using U = std::make_unsigned_t; + DCHECK_GE(max_value, 0); + return std::numeric_limits::digits - absl::countl_zero(max_value); + } +} + +#ifdef NDEBUG +const bool DEBUG_MODE = false; +#else +const bool DEBUG_MODE = true; +#endif + +template +void RadixSort(absl::Span values, int num_bits) { + // Debug-check that num_bits is valid w.r.t. the values given. + if constexpr (DEBUG_MODE) { + if constexpr (!std::is_integral_v) { + DCHECK_EQ(num_bits, sizeof(T) * 8); + } else if (!values.empty()) { + auto minmax_it = absl::c_minmax_element(values); + const T min_val = *minmax_it.first; + const T max_val = *minmax_it.second; + if (num_bits == 0) { + DCHECK_EQ(max_val, 0); } else { - RadixSortTpl(values); + using U = std::make_unsigned_t; + // We only shift by num_bits - 1, to avoid to potentially shift by the + // entire bit width, which would be undefined behavior. + DCHECK_LE(static_cast(max_val) >> (num_bits - 1), 1); + DCHECK_LE(static_cast(min_val) >> (num_bits - 1), 1); } - return; - case 2: - if (values.size() < 300) { - absl::c_sort(values); + } + } + + // This shortcut here is important to have early, guarded by as few "if" + // branches as possible, for the use case where the array is very small. + // For larger arrays below, the overhead of a few "if" is negligible. + if (values.size() < 300) { + absl::c_sort(values); + return; + } + + // TODO(user): More complex decision tree, based on benchmarks. This one + // is already nice, but some cases can surely be optimized. + if (num_bits <= 16) { + if (num_bits <= 8) { + RadixSortTpl(values); + } else { + RadixSortTpl(values); + } + } else if (num_bits <= 32) { // num_bits ∈ [17..32] + if (values.size() < 1000) { + if (num_bits <= 24) { + RadixSortTpl(values); } else { - RadixSortTpl(values); - } - return; - case 4: - if (values.size() < 300) { - absl::c_sort(values); - } else if (values.size() < 1000) { RadixSortTpl(values); - } else if (values.size() < 2'500'000) { - RadixSortTpl(values); - } else { - RadixSortTpl(values); } - return; - case 8: - if (values.size() < 5000) { - absl::c_sort(values); - } else if (values.size() < 1'500'000) { + } else if (values.size() < 2'500'000) { + if (num_bits <= 22) { + RadixSortTpl(values); + } else { + RadixSortTpl(values); + } + } else { + RadixSortTpl(values); + } + } else if (num_bits <= 64) { // num_bits ∈ [33..64] + if (values.size() < 5000) { + absl::c_sort(values); + } else if (values.size() < 1'500'000) { + if (num_bits <= 33) { + RadixSortTpl(values); + } else if (num_bits <= 44) { + RadixSortTpl(values); + } else if (num_bits <= 55) { + RadixSortTpl(values); + } else { RadixSortTpl(values); + } + } else { + if (num_bits <= 48) { + RadixSortTpl(values); } else { RadixSortTpl(values); } - return; + } + } else { + LOG(DFATAL) << "RadixSort() called with unsupported value type"; + absl::c_sort(values); } - LOG(DFATAL) << "RadixSort() called with unsupported value type"; - absl::c_sort(values); } } // namespace operations_research diff --git a/ortools/algorithms/radix_sort_test.cc b/ortools/algorithms/radix_sort_test.cc index 88957d3ed0..269b76c459 100644 --- a/ortools/algorithms/radix_sort_test.cc +++ b/ortools/algorithms/radix_sort_test.cc @@ -13,7 +13,6 @@ #include "ortools/algorithms/radix_sort.h" -#include #include #include #include @@ -25,6 +24,8 @@ #include "absl/algorithm/container.h" #include "absl/log/log.h" +#include "absl/numeric/bits.h" +#include "absl/numeric/int128.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" #include "absl/random/random.h" @@ -41,6 +42,28 @@ namespace { using ::testing::ElementsAre; using ::testing::IsEmpty; +template +class NumBitsForZeroToTest : public ::testing::Test {}; + +TYPED_TEST_SUITE_P(NumBitsForZeroToTest); + +TYPED_TEST_P(NumBitsForZeroToTest, CorrectnessStressTest) { + absl::BitGen rng; + constexpr int kNumTests = 1'000'000; + for (int test = 0; test < kNumTests; ++test) { + const TypeParam max_val = absl::LogUniform( + rng, 0, std::numeric_limits::max()); + const int num_bits = NumBitsForZeroTo(max_val); + EXPECT_LE(absl::int128{max_val}, absl::int128{1} << num_bits); + } +} + +REGISTER_TYPED_TEST_SUITE_P(NumBitsForZeroToTest, CorrectnessStressTest); +using IntTypes = ::testing::Types; + +INSTANTIATE_TYPED_TEST_SUITE_P(My, NumBitsForZeroToTest, IntTypes); + // If T is a floating-point type, ignores min_val / max_val. template std::vector RandomValues(absl::BitGenRef rng, size_t size, @@ -103,6 +126,9 @@ TYPED_TEST_P(RadixSortTest, RandomizedCorrectnessTestAgainstStdSortSmallSizes) { // Will we use the standard RadixSort() or the RadixSortTpl<>() variant? const bool use_main_radix_sort = absl::Bernoulli(rng, 0.5); + const bool use_num_bits = std::is_integral_v && + use_main_radix_sort && !allow_negative && + absl::Bernoulli(rng, 0.5); // We potentially test the "power usage" of calling RadixSortTpl<> with // radix_width * num_passes < num_bits(TypeParam), when the actual values @@ -128,7 +154,12 @@ TYPED_TEST_P(RadixSortTest, RandomizedCorrectnessTestAgainstStdSortSmallSizes) { int radix_width = -1; int num_passes = -1; if (use_main_radix_sort) { - RadixSort(absl::MakeSpan(sorted_values)); + if (use_num_bits) { + RadixSort(absl::MakeSpan(sorted_values), + NumBitsForZeroTo(max_abs_val.value())); + } else { + RadixSort(absl::MakeSpan(sorted_values)); + } } else { // Draw random (radix_width, num_passes) pairs until we get a valid one. constexpr int kMaxNumPasses = 8; @@ -147,8 +178,8 @@ TYPED_TEST_P(RadixSortTest, RandomizedCorrectnessTestAgainstStdSortSmallSizes) { absl::c_sort(expected_values); ASSERT_TRUE(sorted_values == expected_values) << DUMP_VARS(test, use_main_radix_sort, radix_width, num_passes, size, - allow_negative, val_bits, max_abs_val, unsorted_values, - sorted_values, expected_values); + allow_negative, use_num_bits, val_bits, max_abs_val, + unsorted_values, sorted_values, expected_values); } } @@ -205,10 +236,20 @@ TYPED_TEST_P(RadixSortTest, RandomizedCorrectnessTestAgainstStdSortLargeSizes) { std::vector values = RandomValues(rng, size, allow_negative, /*max_abs_val=*/{}); const bool use_main_radix_sort = absl::Bernoulli(rng, 0.5); + const bool use_num_bits = std::is_integral_v && + use_main_radix_sort && !allow_negative && + absl::Bernoulli(rng, 0.5); + int radix_width = -1; int num_passes = -1; if (use_main_radix_sort) { - RadixSort(absl::MakeSpan(values)); + if (use_num_bits) { + RadixSort( + absl::MakeSpan(values), + NumBitsForZeroTo(size == 0 ? 1 : *absl::c_max_element(values))); + } else { + RadixSort(absl::MakeSpan(values)); + } } else { radix_width = RandomRadixWidth(rng); num_passes = @@ -218,7 +259,7 @@ TYPED_TEST_P(RadixSortTest, RandomizedCorrectnessTestAgainstStdSortLargeSizes) { // Contrary to the 'small' stress test, we don't log the data upon failure. ASSERT_TRUE(absl::c_is_sorted(values)) << DUMP_VARS(test, use_main_radix_sort, radix_width, num_passes, size, - allow_negative); + allow_negative, use_num_bits); } } @@ -237,13 +278,16 @@ template std::vector SortedValues(size_t size) { const T offset = std::is_signed_v ? -static_cast(size) / 2 : T{0}; std::vector values(size); - for (size_t i = 0; i < size; ++i) values[i] = i = offset; + for (size_t i = 0; i < size; ++i) values[i] = i + offset; return values; } enum Algo { kStdSort, - kRadixSort, + kRadixSortTpl, + kRadixSortKnownMax, + kRadixSortComputeMax, + kRadixSortWorst, }; enum InputOrder { @@ -280,9 +324,22 @@ void BM_Sort(benchmark::State& state) { to_sort = values; if constexpr (algo == kStdSort) { absl::c_sort(to_sort); - } else { + } else if constexpr (algo == kRadixSortTpl) { absl::Span span{to_sort.data(), to_sort.size()}; RadixSortTpl(span); + } else if constexpr (algo == kRadixSortKnownMax) { + absl::Span span = absl::MakeSpan(to_sort); + RadixSort(span, NumBitsForZeroTo( + max_abs_val.value_or(std::numeric_limits::max()))); + } else if constexpr (algo == kRadixSortComputeMax) { + absl::Span span{to_sort.data(), to_sort.size()}; + RadixSort(span, NumBitsForZeroTo( + size == 0 ? 1 : *absl::c_max_element(to_sort))); + } else if constexpr (algo == kRadixSortWorst) { + absl::Span span{to_sort.data(), to_sort.size()}; + RadixSort(span); + } else { + LOG(DFATAL) << "Unsupported algo: " << algo; } benchmark::DoNotOptimize(to_sort); } @@ -317,114 +374,127 @@ BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(1, 128 << 10); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(16, 2048); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(256, 32 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 32 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(16, 2048); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(256, 32 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 32 << 20); -BENCHMARK(BM_Sort) +BENCHMARK(BM_Sort) ->RangeMultiplier(2) - ->Range(16, 2048); -BENCHMARK(BM_Sort) + ->Range(128 << 10, 32 << 20); +BENCHMARK(BM_Sort) ->RangeMultiplier(2) - ->Range(256, 32 << 20); -BENCHMARK(BM_SortRange(128 << 10, 32 << 20); +BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 32 << 20); -BENCHMARK(BM_Sort) + ->RangeMultiplier(2) + ->Range(16, 2048); +BENCHMARK(BM_Sort) + ->RangeMultiplier(2) + ->Range(256, 32 << 20); +BENCHMARK(BM_Sort) + ->RangeMultiplier(2) + ->Range(128 << 10, 32 << 20); + +BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(2048, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) ->Arg(32 << 20) ->Arg(128 << 20); -BENCHMARK(BM_Sort) ->RangeMultiplier(2) ->Range(128 << 10, 8 << 20) diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h index 449cb84ac5..8413948cd3 100644 --- a/ortools/base/dump_vars.h +++ b/ortools/base/dump_vars.h @@ -62,6 +62,7 @@ #define DUMP_FOR_EACH_N9(F, a, ...) F(a) DUMP_FOR_EACH_N8(F, __VA_ARGS__) #define DUMP_FOR_EACH_N10(F, a, ...) F(a) DUMP_FOR_EACH_N9(F, __VA_ARGS__) #define DUMP_FOR_EACH_N11(F, a, ...) F(a) DUMP_FOR_EACH_N10(F, __VA_ARGS__) +#define DUMP_FOR_EACH_N12(F, a, ...) F(a) DUMP_FOR_EACH_N11(F, __VA_ARGS__) #define DUMP_CONCATENATE(x, y) x##y #define DUMP_FOR_EACH_(N, F, ...) \ @@ -69,8 +70,8 @@ #define DUMP_NARG(...) DUMP_NARG_(__VA_OPT__(__VA_ARGS__, ) DUMP_RSEQ_N()) #define DUMP_NARG_(...) DUMP_ARG_N(__VA_ARGS__) -#define DUMP_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, N, ...) N -#define DUMP_RSEQ_N() 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 +#define DUMP_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, N, ...) N +#define DUMP_RSEQ_N() 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 #define DUMP_FOR_EACH(F, ...) \ DUMP_FOR_EACH_(DUMP_NARG(__VA_ARGS__), F __VA_OPT__(, __VA_ARGS__)) From 88c57dd1f07fff0190268c832a1338fc946124a5 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 16 May 2025 14:34:40 +0200 Subject: [PATCH 007/509] rework ortools samples (#4590) --- ortools/algorithms/samples/knapsack.cc | 5 +++ .../samples/simple_knapsack_program.cc | 7 +++- ortools/base/init_google.h | 36 +++++++++++-------- .../constraint_solver/samples/cp_is_fun_cp.cc | 2 +- .../samples/minimal_jobshop_cp.cc | 2 +- .../constraint_solver/samples/nqueens_cp.cc | 7 +++- .../constraint_solver/samples/nurses_cp.cc | 2 +- .../samples/rabbits_and_pheasants_cp.cc | 2 +- .../samples/simple_cp_program.cc | 7 +++- .../samples/simple_ls_program.cc | 2 +- .../assignment_linear_sum_assignment.cc | 7 +++- ortools/graph/samples/assignment_min_flow.cc | 7 +++- ortools/graph/samples/balance_min_flow.cc | 7 +++- .../graph/samples/simple_max_flow_program.cc | 7 +++- .../samples/simple_min_cost_flow_program.cc | 7 +++- ortools/init/init.cc | 6 ++-- ortools/init/init.h | 1 - .../samples/assignment_groups_mip.cc | 7 +++- .../linear_solver/samples/assignment_mip.cc | 7 +++- .../samples/assignment_task_sizes_mip.cc | 7 +++- .../samples/assignment_teams_mip.cc | 7 +++- .../samples/integer_programming_example.cc | 7 +++- .../samples/linear_programming_example.cc | 7 +++- .../linear_solver/samples/mip_var_array.cc | 7 +++- .../samples/multiple_knapsack_mip.cc | 5 +++ .../samples/network_design_ilph_main.cc | 3 ++ .../samples/simple_lp_program.cc | 7 +++- .../samples/simple_mip_program.cc | 7 +++- ortools/linear_solver/samples/stigler_diet.cc | 2 +- ortools/sat/samples/assignment_groups_sat.cc | 7 +++- ortools/sat/samples/assignment_sat.cc | 7 +++- .../sat/samples/assignment_task_sizes_sat.cc | 7 +++- ortools/sat/samples/assignment_teams_sat.cc | 7 +++- ortools/sat/samples/assumptions_sample_sat.cc | 7 +++- ortools/sat/samples/binpacking_problem_sat.cc | 8 +++-- ortools/sat/samples/bool_or_sample_sat.cc | 8 +++-- ortools/sat/samples/channeling_sample_sat.cc | 8 +++-- ortools/sat/samples/clone_model_sample_sat.cc | 8 +++-- ortools/sat/samples/cp_is_fun_sat.cc | 7 +++- ortools/sat/samples/cp_sat_example.cc | 7 +++- .../earliness_tardiness_cost_sample_sat.cc | 8 +++-- ortools/sat/samples/interval_sample_sat.cc | 8 +++-- ortools/sat/samples/literal_sample_sat.cc | 8 +++-- ortools/sat/samples/minimal_jobshop_sat.cc | 7 +++- ortools/sat/samples/multiple_knapsack_sat.cc | 7 +++- ortools/sat/samples/no_overlap_sample_sat.cc | 8 +++-- ortools/sat/samples/non_linear_sat.cc | 7 +++- ortools/sat/samples/nqueens_sat.cc | 7 +++- ortools/sat/samples/nurses_sat.cc | 7 +++- .../samples/optional_interval_sample_sat.cc | 8 +++-- .../sat/samples/rabbits_and_pheasants_sat.cc | 8 +++-- ortools/sat/samples/ranking_sample_sat.cc | 8 +++-- ortools/sat/samples/reified_sample_sat.cc | 8 +++-- ortools/sat/samples/schedule_requests_sat.cc | 7 +++- .../search_for_all_solutions_sample_sat.cc | 8 +++-- ortools/sat/samples/simple_sat_program.cc | 7 +++- .../samples/solution_hinting_sample_sat.cc | 8 +++-- ...print_intermediate_solutions_sample_sat.cc | 8 +++-- .../solve_with_time_limit_sample_sat.cc | 8 +++-- .../sat/samples/step_function_sample_sat.cc | 8 +++-- .../stop_after_n_solutions_sample_sat.cc | 8 +++-- ortools/set_cover/samples/set_cover.cc | 7 +++- 62 files changed, 343 insertions(+), 93 deletions(-) diff --git a/ortools/algorithms/samples/knapsack.cc b/ortools/algorithms/samples/knapsack.cc index fba0ab1e8b..604db67e7c 100644 --- a/ortools/algorithms/samples/knapsack.cc +++ b/ortools/algorithms/samples/knapsack.cc @@ -21,7 +21,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "ortools/algorithms/knapsack_solver.h" +#include "ortools/base/init_google.h" // [END import] namespace operations_research { @@ -86,6 +89,8 @@ void RunKnapsackExample() { } // namespace operations_research int main(int argc, char** argv) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::RunKnapsackExample(); return EXIT_SUCCESS; } diff --git a/ortools/algorithms/samples/simple_knapsack_program.cc b/ortools/algorithms/samples/simple_knapsack_program.cc index 155d3c0314..82f9b1ac8a 100644 --- a/ortools/algorithms/samples/simple_knapsack_program.cc +++ b/ortools/algorithms/samples/simple_knapsack_program.cc @@ -21,8 +21,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "ortools/algorithms/knapsack_solver.h" +#include "ortools/base/init_google.h" // [END import] namespace operations_research { @@ -80,7 +83,9 @@ void SimpleKnapsackProgram() { } // namespace algorithms } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::algorithms::SimpleKnapsackProgram(); return EXIT_SUCCESS; } diff --git a/ortools/base/init_google.h b/ortools/base/init_google.h index 0789a46abb..00f3e03918 100644 --- a/ortools/base/init_google.h +++ b/ortools/base/init_google.h @@ -14,11 +14,26 @@ #ifndef OR_TOOLS_BASE_INIT_GOOGLE_H_ #define OR_TOOLS_BASE_INIT_GOOGLE_H_ -#include "absl/flags/declare.h" -#include "absl/flags/flag.h" +#include "absl/flags/declare.h" // IWYU pragma: keep +#include "absl/flags/flag.h" // IWYU pragma: keep #include "absl/flags/parse.h" #include "absl/flags/usage.h" #include "absl/log/initialize.h" +#include "absl/strings/string_view.h" + +namespace google { + +inline void InitGoogleLogging(absl::string_view usage) { + absl::InitializeLog(); + if (!usage.empty()) { + absl::SetProgramUsageMessage(usage); + } +} + +inline void ShutdownGoogleLogging() {} // No op. + +} // namespace google + // Initializes misc google-related things in the binary. // // Typically called early on in main() and must be called before other @@ -31,22 +46,15 @@ // requirement for an element (*argv)[*argc] to exist or to have // any particular value, unlike the similar array that is passed // to the `main` function. -inline void InitGoogle(const char* usage, int* argc, char*** argv, +inline void InitGoogle(absl::string_view usage, int* argc, char*** argv, bool deprecated) { - absl::InitializeLog(); - absl::SetProgramUsageMessage(usage); + google::InitGoogleLogging(usage); absl::ParseCommandLine(*argc, *argv); } -namespace google { - -inline void InitGoogleLogging(const std::string& usage) { - absl::InitializeLog(); - absl::SetProgramUsageMessage(usage); +inline void InitGoogle(const char* usage, int* argc, char*** argv, + bool deprecated) { + InitGoogle(absl::NullSafeStringView(usage), argc, argv, deprecated); } -inline void ShutdownGoogleLogging() {} // No op. - -} // namespace google - #endif // OR_TOOLS_BASE_INIT_GOOGLE_H_ diff --git a/ortools/constraint_solver/samples/cp_is_fun_cp.cc b/ortools/constraint_solver/samples/cp_is_fun_cp.cc index 98f6680204..d8af070475 100644 --- a/ortools/constraint_solver/samples/cp_is_fun_cp.cc +++ b/ortools/constraint_solver/samples/cp_is_fun_cp.cc @@ -148,8 +148,8 @@ void CPIsFunCp() { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::CPIsFunCp(); return EXIT_SUCCESS; } diff --git a/ortools/constraint_solver/samples/minimal_jobshop_cp.cc b/ortools/constraint_solver/samples/minimal_jobshop_cp.cc index e8a0023934..e8d5d93561 100644 --- a/ortools/constraint_solver/samples/minimal_jobshop_cp.cc +++ b/ortools/constraint_solver/samples/minimal_jobshop_cp.cc @@ -198,8 +198,8 @@ void SolveJobShopExample() { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SolveJobShopExample(); return EXIT_SUCCESS; } diff --git a/ortools/constraint_solver/samples/nqueens_cp.cc b/ortools/constraint_solver/samples/nqueens_cp.cc index 04c5000039..f622eb1ee8 100644 --- a/ortools/constraint_solver/samples/nqueens_cp.cc +++ b/ortools/constraint_solver/samples/nqueens_cp.cc @@ -19,6 +19,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/constraint_solver/constraint_solver.h" // [END import] @@ -102,7 +105,9 @@ void NQueensCp(const int board_size) { } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); int board_size = 8; if (argc > 1) { board_size = std::atoi(argv[1]); diff --git a/ortools/constraint_solver/samples/nurses_cp.cc b/ortools/constraint_solver/samples/nurses_cp.cc index bf572b30dd..a01f4400d8 100644 --- a/ortools/constraint_solver/samples/nurses_cp.cc +++ b/ortools/constraint_solver/samples/nurses_cp.cc @@ -205,8 +205,8 @@ void SolveNursesExample() { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SolveNursesExample(); return EXIT_SUCCESS; } diff --git a/ortools/constraint_solver/samples/rabbits_and_pheasants_cp.cc b/ortools/constraint_solver/samples/rabbits_and_pheasants_cp.cc index 3c106041bd..b88dd99cd2 100644 --- a/ortools/constraint_solver/samples/rabbits_and_pheasants_cp.cc +++ b/ortools/constraint_solver/samples/rabbits_and_pheasants_cp.cc @@ -61,8 +61,8 @@ void RunConstraintProgrammingExample() { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::RunConstraintProgrammingExample(); return EXIT_SUCCESS; } diff --git a/ortools/constraint_solver/samples/simple_cp_program.cc b/ortools/constraint_solver/samples/simple_cp_program.cc index baa90bdc58..d916b7b730 100644 --- a/ortools/constraint_solver/samples/simple_cp_program.cc +++ b/ortools/constraint_solver/samples/simple_cp_program.cc @@ -16,6 +16,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" // [END import] @@ -73,7 +76,9 @@ void SimpleCpProgram() { } // namespace operations_research -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleCpProgram(); return EXIT_SUCCESS; } diff --git a/ortools/constraint_solver/samples/simple_ls_program.cc b/ortools/constraint_solver/samples/simple_ls_program.cc index 3428b8a0b9..05b82fa3e8 100644 --- a/ortools/constraint_solver/samples/simple_ls_program.cc +++ b/ortools/constraint_solver/samples/simple_ls_program.cc @@ -202,8 +202,8 @@ void SolveProblem(SolveType solve_type) { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SolveProblem(operations_research::LNS); operations_research::SolveProblem(operations_research::LS); operations_research::SolveProblem(operations_research::LS_WITH_FILTER); diff --git a/ortools/graph/samples/assignment_linear_sum_assignment.cc b/ortools/graph/samples/assignment_linear_sum_assignment.cc index 882c97db65..ba8a566010 100644 --- a/ortools/graph/samples/assignment_linear_sum_assignment.cc +++ b/ortools/graph/samples/assignment_linear_sum_assignment.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" // [END import] namespace operations_research { @@ -77,7 +80,9 @@ void AssignmentLinearSumAssignment() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentLinearSumAssignment(); return EXIT_SUCCESS; } diff --git a/ortools/graph/samples/assignment_min_flow.cc b/ortools/graph/samples/assignment_min_flow.cc index 0597de159a..4adcd67521 100644 --- a/ortools/graph/samples/assignment_min_flow.cc +++ b/ortools/graph/samples/assignment_min_flow.cc @@ -17,6 +17,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/graph/min_cost_flow.h" // [END import] @@ -93,7 +96,9 @@ void AssignmentMinFlow() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentMinFlow(); return EXIT_SUCCESS; } diff --git a/ortools/graph/samples/balance_min_flow.cc b/ortools/graph/samples/balance_min_flow.cc index 4521199dc1..f9b8ccc3b4 100644 --- a/ortools/graph/samples/balance_min_flow.cc +++ b/ortools/graph/samples/balance_min_flow.cc @@ -17,6 +17,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/graph/min_cost_flow.h" // [END import] @@ -101,7 +104,9 @@ void BalanceMinFlow() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::BalanceMinFlow(); return EXIT_SUCCESS; } diff --git a/ortools/graph/samples/simple_max_flow_program.cc b/ortools/graph/samples/simple_max_flow_program.cc index a59f1de7c4..a49028a799 100644 --- a/ortools/graph/samples/simple_max_flow_program.cc +++ b/ortools/graph/samples/simple_max_flow_program.cc @@ -18,6 +18,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/graph/max_flow.h" // [END import] @@ -68,7 +71,9 @@ void SimpleMaxFlowProgram() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleMaxFlowProgram(); return EXIT_SUCCESS; } diff --git a/ortools/graph/samples/simple_min_cost_flow_program.cc b/ortools/graph/samples/simple_min_cost_flow_program.cc index 75d89669b5..319123cf4e 100644 --- a/ortools/graph/samples/simple_min_cost_flow_program.cc +++ b/ortools/graph/samples/simple_min_cost_flow_program.cc @@ -18,6 +18,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/graph/min_cost_flow.h" // [END import] @@ -81,7 +84,9 @@ void SimpleMinCostFlowProgram() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleMinCostFlowProgram(); return EXIT_SUCCESS; } diff --git a/ortools/init/init.cc b/ortools/init/init.cc index 389b27021a..aefe9ccfbe 100644 --- a/ortools/init/init.cc +++ b/ortools/init/init.cc @@ -17,18 +17,16 @@ #include "absl/base/log_severity.h" #include "absl/flags/flag.h" -#include "absl/flags/usage.h" #include "absl/log/globals.h" -#include "absl/log/initialize.h" #include "absl/strings/string_view.h" +#include "ortools/base/init_google.h" #include "ortools/gurobi/environment.h" #include "ortools/sat/cp_model_solver.h" #include "ortools/sat/cp_model_solver_helpers.h" namespace operations_research { void CppBridge::InitLogging(absl::string_view usage) { - absl::SetProgramUsageMessage(usage); - absl::InitializeLog(); + google::InitGoogleLogging(usage); } void CppBridge::SetFlags(const CppFlags& flags) { diff --git a/ortools/init/init.h b/ortools/init/init.h index fe058d64ce..2ea0fbbda0 100644 --- a/ortools/init/init.h +++ b/ortools/init/init.h @@ -16,7 +16,6 @@ #include #include -#include #include "absl/strings/string_view.h" #include "ortools/base/logging.h" diff --git a/ortools/linear_solver/samples/assignment_groups_mip.cc b/ortools/linear_solver/samples/assignment_groups_mip.cc index 564902592f..2dfff87fdd 100644 --- a/ortools/linear_solver/samples/assignment_groups_mip.cc +++ b/ortools/linear_solver/samples/assignment_groups_mip.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -230,7 +233,9 @@ void AssignmentTeamsMip() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentTeamsMip(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/assignment_mip.cc b/ortools/linear_solver/samples/assignment_mip.cc index 5b77e603a3..ab05b896de 100644 --- a/ortools/linear_solver/samples/assignment_mip.cc +++ b/ortools/linear_solver/samples/assignment_mip.cc @@ -16,6 +16,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -115,7 +118,9 @@ void AssignmentMip() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentMip(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/assignment_task_sizes_mip.cc b/ortools/linear_solver/samples/assignment_task_sizes_mip.cc index ec8e17f90c..15ab30fae0 100644 --- a/ortools/linear_solver/samples/assignment_task_sizes_mip.cc +++ b/ortools/linear_solver/samples/assignment_task_sizes_mip.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -135,7 +138,9 @@ void AssignmentTeamsMip() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentTeamsMip(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/assignment_teams_mip.cc b/ortools/linear_solver/samples/assignment_teams_mip.cc index 51729ace3b..c22c024fb6 100644 --- a/ortools/linear_solver/samples/assignment_teams_mip.cc +++ b/ortools/linear_solver/samples/assignment_teams_mip.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -149,7 +152,9 @@ void AssignmentTeamsMip() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::AssignmentTeamsMip(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/integer_programming_example.cc b/ortools/linear_solver/samples/integer_programming_example.cc index 539b343186..5371ba0118 100644 --- a/ortools/linear_solver/samples/integer_programming_example.cc +++ b/ortools/linear_solver/samples/integer_programming_example.cc @@ -16,6 +16,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -89,7 +92,9 @@ void IntegerProgrammingExample() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::IntegerProgrammingExample(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/linear_programming_example.cc b/ortools/linear_solver/samples/linear_programming_example.cc index fb9577bffa..80a1ec8aba 100644 --- a/ortools/linear_solver/samples/linear_programming_example.cc +++ b/ortools/linear_solver/samples/linear_programming_example.cc @@ -16,6 +16,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -80,7 +83,9 @@ void LinearProgrammingExample() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::LinearProgrammingExample(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/mip_var_array.cc b/ortools/linear_solver/samples/mip_var_array.cc index 4c4b99ecd6..eafe3056e3 100644 --- a/ortools/linear_solver/samples/mip_var_array.cc +++ b/ortools/linear_solver/samples/mip_var_array.cc @@ -16,6 +16,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -101,7 +104,9 @@ void MipVarArray() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::MipVarArray(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/multiple_knapsack_mip.cc b/ortools/linear_solver/samples/multiple_knapsack_mip.cc index 1cada90e92..ea9bf0327f 100644 --- a/ortools/linear_solver/samples/multiple_knapsack_mip.cc +++ b/ortools/linear_solver/samples/multiple_knapsack_mip.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_expr.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -129,6 +132,8 @@ void MultipleKnapsackMip() { } // namespace operations_research int main(int argc, char** argv) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::MultipleKnapsackMip(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/network_design_ilph_main.cc b/ortools/linear_solver/samples/network_design_ilph_main.cc index f4bdfe6dad..bdbed2a316 100644 --- a/ortools/linear_solver/samples/network_design_ilph_main.cc +++ b/ortools/linear_solver/samples/network_design_ilph_main.cc @@ -11,8 +11,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" #include "absl/log/check.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_format.h" @@ -29,6 +31,7 @@ using operations_research::MPSolver; int main(int argc, char* argv[]) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::CapacityPlanningInstance request; operations_research::CapacityPlanningProblem problem; ::absl::Status status = diff --git a/ortools/linear_solver/samples/simple_lp_program.cc b/ortools/linear_solver/samples/simple_lp_program.cc index 6b74bf59b0..b08569a977 100644 --- a/ortools/linear_solver/samples/simple_lp_program.cc +++ b/ortools/linear_solver/samples/simple_lp_program.cc @@ -17,6 +17,9 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -81,7 +84,9 @@ void SimpleLpProgram() { } } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleLpProgram(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/simple_mip_program.cc b/ortools/linear_solver/samples/simple_mip_program.cc index 5c16e3af88..1e3148919e 100644 --- a/ortools/linear_solver/samples/simple_mip_program.cc +++ b/ortools/linear_solver/samples/simple_mip_program.cc @@ -16,6 +16,9 @@ // [START import] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" // [END import] @@ -86,7 +89,9 @@ void SimpleMipProgram() { } } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleMipProgram(); return EXIT_SUCCESS; } diff --git a/ortools/linear_solver/samples/stigler_diet.cc b/ortools/linear_solver/samples/stigler_diet.cc index b27544a312..5b8477f959 100644 --- a/ortools/linear_solver/samples/stigler_diet.cc +++ b/ortools/linear_solver/samples/stigler_diet.cc @@ -321,8 +321,8 @@ void StiglerDiet() { } // namespace operations_research int main(int argc, char** argv) { - absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::StiglerDiet(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/assignment_groups_sat.cc b/ortools/sat/samples/assignment_groups_sat.cc index a0d4661854..32704ee371 100644 --- a/ortools/sat/samples/assignment_groups_sat.cc +++ b/ortools/sat/samples/assignment_groups_sat.cc @@ -20,8 +20,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -190,7 +193,9 @@ void AssignmentGroups() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::AssignmentGroups(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/assignment_sat.cc b/ortools/sat/samples/assignment_sat.cc index 7c46652850..b65785d3fe 100644 --- a/ortools/sat/samples/assignment_sat.cc +++ b/ortools/sat/samples/assignment_sat.cc @@ -17,6 +17,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -108,7 +111,9 @@ void IntegerProgrammingExample() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::IntegerProgrammingExample(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/assignment_task_sizes_sat.cc b/ortools/sat/samples/assignment_task_sizes_sat.cc index 85beda4aad..dcd064fe0a 100644 --- a/ortools/sat/samples/assignment_task_sizes_sat.cc +++ b/ortools/sat/samples/assignment_task_sizes_sat.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -133,7 +136,9 @@ void AssignmentTaskSizes() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::AssignmentTaskSizes(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/assignment_teams_sat.cc b/ortools/sat/samples/assignment_teams_sat.cc index 26c33d5d6f..fdfbd900cd 100644 --- a/ortools/sat/samples/assignment_teams_sat.cc +++ b/ortools/sat/samples/assignment_teams_sat.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -142,7 +145,9 @@ void AssignmentTeamsSat() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::AssignmentTeamsSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/assumptions_sample_sat.cc b/ortools/sat/samples/assumptions_sample_sat.cc index 3f130de462..5ee369beba 100644 --- a/ortools/sat/samples/assumptions_sample_sat.cc +++ b/ortools/sat/samples/assumptions_sample_sat.cc @@ -15,7 +15,10 @@ // [START import] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -69,7 +72,9 @@ void AssumptionsSampleSat() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::AssumptionsSampleSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/binpacking_problem_sat.cc b/ortools/sat/samples/binpacking_problem_sat.cc index a93884a709..d92fb94899 100644 --- a/ortools/sat/samples/binpacking_problem_sat.cc +++ b/ortools/sat/samples/binpacking_problem_sat.cc @@ -15,6 +15,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -91,8 +94,9 @@ void BinpackingProblemSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::BinpackingProblemSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/bool_or_sample_sat.cc b/ortools/sat/samples/bool_or_sample_sat.cc index 4907297272..e64e823f72 100644 --- a/ortools/sat/samples/bool_or_sample_sat.cc +++ b/ortools/sat/samples/bool_or_sample_sat.cc @@ -13,7 +13,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/sat/cp_model.h" namespace operations_research { @@ -32,8 +35,9 @@ void BoolOrSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::BoolOrSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/channeling_sample_sat.cc b/ortools/sat/samples/channeling_sample_sat.cc index bf0cf9b1da..a44f1f53f4 100644 --- a/ortools/sat/samples/channeling_sample_sat.cc +++ b/ortools/sat/samples/channeling_sample_sat.cc @@ -13,7 +13,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -66,8 +69,9 @@ void ChannelingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::ChannelingSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/clone_model_sample_sat.cc b/ortools/sat/samples/clone_model_sample_sat.cc index 583c5eec0b..f7f6dfb5de 100644 --- a/ortools/sat/samples/clone_model_sample_sat.cc +++ b/ortools/sat/samples/clone_model_sample_sat.cc @@ -14,6 +14,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -65,9 +68,10 @@ void CloneModelSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::CloneModelSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/sat/samples/cp_is_fun_sat.cc b/ortools/sat/samples/cp_is_fun_sat.cc index 4faceef10a..2476907797 100644 --- a/ortools/sat/samples/cp_is_fun_sat.cc +++ b/ortools/sat/samples/cp_is_fun_sat.cc @@ -23,6 +23,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -109,7 +112,9 @@ void CPIsFunSat() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::CPIsFunSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/cp_sat_example.cc b/ortools/sat/samples/cp_sat_example.cc index b13234147d..f0ab3fe0de 100644 --- a/ortools/sat/samples/cp_sat_example.cc +++ b/ortools/sat/samples/cp_sat_example.cc @@ -18,6 +18,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -80,7 +83,9 @@ void CpSatExample() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::CpSatExample(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.cc b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.cc index 994c7217ad..c1eca56567 100644 --- a/ortools/sat/samples/earliness_tardiness_cost_sample_sat.cc +++ b/ortools/sat/samples/earliness_tardiness_cost_sample_sat.cc @@ -15,7 +15,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -71,8 +74,9 @@ void EarlinessTardinessCostSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::EarlinessTardinessCostSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/interval_sample_sat.cc b/ortools/sat/samples/interval_sample_sat.cc index 1692b4cbeb..e6bb25a09e 100644 --- a/ortools/sat/samples/interval_sample_sat.cc +++ b/ortools/sat/samples/interval_sample_sat.cc @@ -13,6 +13,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/util/sorted_interval_list.h" @@ -59,8 +62,9 @@ void IntervalSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::IntervalSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/literal_sample_sat.cc b/ortools/sat/samples/literal_sample_sat.cc index acc7f44d06..e1a752f2ae 100644 --- a/ortools/sat/samples/literal_sample_sat.cc +++ b/ortools/sat/samples/literal_sample_sat.cc @@ -13,6 +13,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" @@ -30,8 +33,9 @@ void LiteralSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::LiteralSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/minimal_jobshop_sat.cc b/ortools/sat/samples/minimal_jobshop_sat.cc index fe2185dafe..2c2ce5f123 100644 --- a/ortools/sat/samples/minimal_jobshop_sat.cc +++ b/ortools/sat/samples/minimal_jobshop_sat.cc @@ -24,7 +24,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -208,7 +211,9 @@ void MinimalJobshopSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::MinimalJobshopSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/multiple_knapsack_sat.cc b/ortools/sat/samples/multiple_knapsack_sat.cc index 030b2eec0e..c74a197200 100644 --- a/ortools/sat/samples/multiple_knapsack_sat.cc +++ b/ortools/sat/samples/multiple_knapsack_sat.cc @@ -21,7 +21,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -137,7 +140,9 @@ void MultipleKnapsackSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::MultipleKnapsackSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/no_overlap_sample_sat.cc b/ortools/sat/samples/no_overlap_sample_sat.cc index 987c240094..2bfd8db2e0 100644 --- a/ortools/sat/samples/no_overlap_sample_sat.cc +++ b/ortools/sat/samples/no_overlap_sample_sat.cc @@ -15,7 +15,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -85,8 +88,9 @@ void NoOverlapSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::NoOverlapSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/non_linear_sat.cc b/ortools/sat/samples/non_linear_sat.cc index 3328b19618..e61e8d001e 100644 --- a/ortools/sat/samples/non_linear_sat.cc +++ b/ortools/sat/samples/non_linear_sat.cc @@ -17,6 +17,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model_solver.h" @@ -58,7 +61,9 @@ void NonLinearSatProgram() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::NonLinearSatProgram(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/nqueens_sat.cc b/ortools/sat/samples/nqueens_sat.cc index 59c9724e09..c5f5161602 100644 --- a/ortools/sat/samples/nqueens_sat.cc +++ b/ortools/sat/samples/nqueens_sat.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/numbers.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -111,7 +114,9 @@ void NQueensSat(const int board_size) { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); int board_size = 8; if (argc > 1) { if (!absl::SimpleAtoi(argv[1], &board_size)) { diff --git a/ortools/sat/samples/nurses_sat.cc b/ortools/sat/samples/nurses_sat.cc index 29197bf1bd..a92b626b44 100644 --- a/ortools/sat/samples/nurses_sat.cc +++ b/ortools/sat/samples/nurses_sat.cc @@ -23,7 +23,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -182,7 +185,9 @@ void NurseSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::NurseSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/optional_interval_sample_sat.cc b/ortools/sat/samples/optional_interval_sample_sat.cc index a4111a193e..a597933f3b 100644 --- a/ortools/sat/samples/optional_interval_sample_sat.cc +++ b/ortools/sat/samples/optional_interval_sample_sat.cc @@ -13,6 +13,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/util/sorted_interval_list.h" @@ -56,8 +59,9 @@ void OptionalIntervalSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::OptionalIntervalSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/rabbits_and_pheasants_sat.cc b/ortools/sat/samples/rabbits_and_pheasants_sat.cc index 631fecaeec..23358fe112 100644 --- a/ortools/sat/samples/rabbits_and_pheasants_sat.cc +++ b/ortools/sat/samples/rabbits_and_pheasants_sat.cc @@ -13,6 +13,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -45,8 +48,9 @@ void RabbitsAndPheasantsSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::RabbitsAndPheasantsSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/ranking_sample_sat.cc b/ortools/sat/samples/ranking_sample_sat.cc index 6558aa9446..5bf0b130ce 100644 --- a/ortools/sat/samples/ranking_sample_sat.cc +++ b/ortools/sat/samples/ranking_sample_sat.cc @@ -16,7 +16,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -155,8 +158,9 @@ void RankingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::RankingSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/reified_sample_sat.cc b/ortools/sat/samples/reified_sample_sat.cc index a2107952f6..af158deb11 100644 --- a/ortools/sat/samples/reified_sample_sat.cc +++ b/ortools/sat/samples/reified_sample_sat.cc @@ -13,7 +13,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/sat/cp_model.h" namespace operations_research { @@ -41,8 +44,9 @@ void ReifiedSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::ReifiedSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/schedule_requests_sat.cc b/ortools/sat/samples/schedule_requests_sat.cc index b42eb18af0..4c3f321611 100644 --- a/ortools/sat/samples/schedule_requests_sat.cc +++ b/ortools/sat/samples/schedule_requests_sat.cc @@ -23,7 +23,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/strings/str_format.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -227,7 +230,9 @@ void ScheduleRequestsSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::ScheduleRequestsSat(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/search_for_all_solutions_sample_sat.cc b/ortools/sat/samples/search_for_all_solutions_sample_sat.cc index 86bb142ee7..81c8582e0f 100644 --- a/ortools/sat/samples/search_for_all_solutions_sample_sat.cc +++ b/ortools/sat/samples/search_for_all_solutions_sample_sat.cc @@ -14,6 +14,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -68,9 +71,10 @@ void SearchAllSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SearchAllSolutionsSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/sat/samples/simple_sat_program.cc b/ortools/sat/samples/simple_sat_program.cc index f0f771a868..15be3149e9 100644 --- a/ortools/sat/samples/simple_sat_program.cc +++ b/ortools/sat/samples/simple_sat_program.cc @@ -15,6 +15,9 @@ // [START import] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -62,7 +65,9 @@ void SimpleSatProgram() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SimpleSatProgram(); return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/solution_hinting_sample_sat.cc b/ortools/sat/samples/solution_hinting_sample_sat.cc index eddcb8a180..d1e15422a4 100644 --- a/ortools/sat/samples/solution_hinting_sample_sat.cc +++ b/ortools/sat/samples/solution_hinting_sample_sat.cc @@ -14,6 +14,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -71,9 +74,10 @@ void SolutionHintingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolutionHintingSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.cc b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.cc index ee8b19ec0e..fe567b0b4c 100644 --- a/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.cc +++ b/ortools/sat/samples/solve_and_print_intermediate_solutions_sample_sat.cc @@ -14,6 +14,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -67,9 +70,10 @@ void SolveAndPrintIntermediateSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolveAndPrintIntermediateSolutionsSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/sat/samples/solve_with_time_limit_sample_sat.cc b/ortools/sat/samples/solve_with_time_limit_sample_sat.cc index 2c9c265178..5f3618e6c5 100644 --- a/ortools/sat/samples/solve_with_time_limit_sample_sat.cc +++ b/ortools/sat/samples/solve_with_time_limit_sample_sat.cc @@ -14,6 +14,9 @@ // [START program] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -57,9 +60,10 @@ void SolveWithTimeLimitSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolveWithTimeLimitSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/sat/samples/step_function_sample_sat.cc b/ortools/sat/samples/step_function_sample_sat.cc index e94a8cf955..f5bbbfe86d 100644 --- a/ortools/sat/samples/step_function_sample_sat.cc +++ b/ortools/sat/samples/step_function_sample_sat.cc @@ -13,7 +13,10 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -84,8 +87,9 @@ void StepFunctionSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::StepFunctionSampleSat(); - return EXIT_SUCCESS; } diff --git a/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc b/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc index 76c55ebe60..9adab9c7e2 100644 --- a/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc +++ b/ortools/sat/samples/stop_after_n_solutions_sample_sat.cc @@ -16,6 +16,9 @@ #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -64,9 +67,10 @@ void StopAfterNSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::StopAfterNSolutionsSampleSat(); - return EXIT_SUCCESS; } // [END program] diff --git a/ortools/set_cover/samples/set_cover.cc b/ortools/set_cover/samples/set_cover.cc index c5de9030b2..073ea958ac 100644 --- a/ortools/set_cover/samples/set_cover.cc +++ b/ortools/set_cover/samples/set_cover.cc @@ -15,7 +15,10 @@ // [START import] #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/set_cover/set_cover_heuristics.h" #include "ortools/set_cover/set_cover_invariant.h" #include "ortools/set_cover/set_cover_model.h" @@ -58,7 +61,9 @@ void SimpleSetCoverProgram() { } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleSetCoverProgram(); return EXIT_SUCCESS; } From aac608e6e576686dda177040f8e590ccf3fcdfe1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 16 May 2025 14:35:04 +0200 Subject: [PATCH 008/509] rework ortools/routing samples (#4590) --- ortools/routing/samples/cvrp_disjoint_tw.cc | 3 +++ ortools/routing/samples/cvrptw.cc | 3 +++ ortools/routing/samples/cvrptw_soft_capacity.cc | 3 +++ ortools/routing/samples/cvrptw_with_breaks.cc | 3 +++ ortools/routing/samples/cvrptw_with_precedences.cc | 3 +++ ortools/routing/samples/cvrptw_with_refueling.cc | 3 +++ ortools/routing/samples/cvrptw_with_resources.cc | 3 +++ .../samples/cvrptw_with_stop_times_and_resources.cc | 3 +++ .../routing/samples/cvrptw_with_time_dependent_costs.cc | 3 +++ ortools/routing/samples/simple_routing_program.cc | 7 ++++++- ortools/routing/samples/tsp.cc | 7 ++++++- ortools/routing/samples/tsp_circuit_board.cc | 7 ++++++- ortools/routing/samples/tsp_cities.cc | 7 ++++++- ortools/routing/samples/tsp_cities_routes.cc | 7 ++++++- ortools/routing/samples/tsp_distance_matrix.cc | 7 ++++++- ortools/routing/samples/vrp.cc | 7 ++++++- ortools/routing/samples/vrp_breaks.cc | 7 ++++++- ortools/routing/samples/vrp_capacity.cc | 7 ++++++- ortools/routing/samples/vrp_drop_nodes.cc | 7 ++++++- ortools/routing/samples/vrp_global_span.cc | 7 ++++++- ortools/routing/samples/vrp_initial_routes.cc | 7 ++++++- ortools/routing/samples/vrp_pickup_delivery.cc | 7 ++++++- ortools/routing/samples/vrp_pickup_delivery_fifo.cc | 7 ++++++- ortools/routing/samples/vrp_pickup_delivery_lifo.cc | 7 ++++++- ortools/routing/samples/vrp_resources.cc | 7 ++++++- ortools/routing/samples/vrp_routes.cc | 7 ++++++- ortools/routing/samples/vrp_solution_callback.cc | 7 ++++++- ortools/routing/samples/vrp_starts_ends.cc | 7 ++++++- ortools/routing/samples/vrp_time_windows.cc | 7 ++++++- ortools/routing/samples/vrp_with_time_limit.cc | 7 ++++++- ortools/routing/samples/vrptw_store_solution_data.cc | 7 ++++++- 31 files changed, 159 insertions(+), 22 deletions(-) diff --git a/ortools/routing/samples/cvrp_disjoint_tw.cc b/ortools/routing/samples/cvrp_disjoint_tw.cc index 419c5f1c27..18e51bd1ec 100644 --- a/ortools/routing/samples/cvrp_disjoint_tw.cc +++ b/ortools/routing/samples/cvrp_disjoint_tw.cc @@ -30,7 +30,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -74,6 +76,7 @@ const int64_t kSameVehicleCost = 1000; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw.cc b/ortools/routing/samples/cvrptw.cc index 3ebfb251a3..b1ab211b2e 100644 --- a/ortools/routing/samples/cvrptw.cc +++ b/ortools/routing/samples/cvrptw.cc @@ -28,7 +28,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -70,6 +72,7 @@ const int64_t kSameVehicleCost = 1000; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_soft_capacity.cc b/ortools/routing/samples/cvrptw_soft_capacity.cc index d8ee454665..26db8df740 100644 --- a/ortools/routing/samples/cvrptw_soft_capacity.cc +++ b/ortools/routing/samples/cvrptw_soft_capacity.cc @@ -27,7 +27,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -78,6 +80,7 @@ const int64_t kSameVehicleCost = 1000; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_with_breaks.cc b/ortools/routing/samples/cvrptw_with_breaks.cc index 99d67e1c6f..791be7a200 100644 --- a/ortools/routing/samples/cvrptw_with_breaks.cc +++ b/ortools/routing/samples/cvrptw_with_breaks.cc @@ -32,7 +32,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "google/protobuf/text_format.h" @@ -76,6 +78,7 @@ const char* kCapacity = "Capacity"; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_with_precedences.cc b/ortools/routing/samples/cvrptw_with_precedences.cc index d12de06bc3..bbf3f40d56 100644 --- a/ortools/routing/samples/cvrptw_with_precedences.cc +++ b/ortools/routing/samples/cvrptw_with_precedences.cc @@ -27,7 +27,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -78,6 +80,7 @@ const int64_t kSameVehicleCost = 1000; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_with_refueling.cc b/ortools/routing/samples/cvrptw_with_refueling.cc index 97fdee0301..0a21a3e1d1 100644 --- a/ortools/routing/samples/cvrptw_with_refueling.cc +++ b/ortools/routing/samples/cvrptw_with_refueling.cc @@ -25,7 +25,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -72,6 +74,7 @@ bool IsRefuelNode(int64_t node) { int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_with_resources.cc b/ortools/routing/samples/cvrptw_with_resources.cc index 79cb38bc5d..49312320d9 100644 --- a/ortools/routing/samples/cvrptw_with_resources.cc +++ b/ortools/routing/samples/cvrptw_with_resources.cc @@ -27,7 +27,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -69,6 +71,7 @@ const char* kCapacity = "Capacity"; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc index 6aa3120e36..3d65ab7983 100644 --- a/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc +++ b/ortools/routing/samples/cvrptw_with_stop_times_and_resources.cc @@ -25,7 +25,9 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "google/protobuf/text_format.h" @@ -69,6 +71,7 @@ const char* kCapacity = "Capacity"; int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_stops)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders_per_stop)) diff --git a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc index ba3c7b9554..6c37c4e2b8 100644 --- a/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc +++ b/ortools/routing/samples/cvrptw_with_time_dependent_costs.cc @@ -21,8 +21,10 @@ #include #include +#include "absl/base/log_severity.h" #include "absl/flags/flag.h" #include "absl/functional/bind_front.h" +#include "absl/log/globals.h" #include "absl/random/random.h" #include "google/protobuf/text_format.h" #include "ortools/base/init_google.h" @@ -145,6 +147,7 @@ class TrafficTransitionEvaluator { int main(int argc, char** argv) { InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); CHECK_LT(0, absl::GetFlag(FLAGS_vrp_orders)) << "Specify an instance size greater than 0."; CHECK_LT(0, absl::GetFlag(FLAGS_vrp_vehicles)) diff --git a/ortools/routing/samples/simple_routing_program.cc b/ortools/routing/samples/simple_routing_program.cc index ce1fdfd12b..69b966ec5b 100644 --- a/ortools/routing/samples/simple_routing_program.cc +++ b/ortools/routing/samples/simple_routing_program.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -92,7 +95,9 @@ void SimpleRoutingProgram() { } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::SimpleRoutingProgram(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/tsp.cc b/ortools/routing/samples/tsp.cc index ffbbb096ce..739762e76d 100644 --- a/ortools/routing/samples/tsp.cc +++ b/ortools/routing/samples/tsp.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -154,7 +157,9 @@ void Tsp() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Tsp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/tsp_circuit_board.cc b/ortools/routing/samples/tsp_circuit_board.cc index fbb71c392b..2fc55e5ccd 100644 --- a/ortools/routing/samples/tsp_circuit_board.cc +++ b/ortools/routing/samples/tsp_circuit_board.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -183,7 +186,9 @@ void Tsp() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Tsp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/tsp_cities.cc b/ortools/routing/samples/tsp_cities.cc index 5c9afb519e..72818dafae 100644 --- a/ortools/routing/samples/tsp_cities.cc +++ b/ortools/routing/samples/tsp_cities.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -129,7 +132,9 @@ void Tsp() { } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Tsp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/tsp_cities_routes.cc b/ortools/routing/samples/tsp_cities_routes.cc index cc0b5a6ca7..c734004d5f 100644 --- a/ortools/routing/samples/tsp_cities_routes.cc +++ b/ortools/routing/samples/tsp_cities_routes.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -132,7 +135,9 @@ void Tsp() { } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Tsp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/tsp_distance_matrix.cc b/ortools/routing/samples/tsp_distance_matrix.cc index 5aa1818e53..a31e8f71b8 100644 --- a/ortools/routing/samples/tsp_distance_matrix.cc +++ b/ortools/routing/samples/tsp_distance_matrix.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -151,7 +154,9 @@ void Tsp() { } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Tsp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp.cc b/ortools/routing/samples/vrp.cc index 7eac20f7f5..21f598970f 100644 --- a/ortools/routing/samples/vrp.cc +++ b/ortools/routing/samples/vrp.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -166,7 +169,9 @@ void Vrp() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Vrp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_breaks.cc b/ortools/routing/samples/vrp_breaks.cc index 7f9bcd18dc..4337f0d12d 100644 --- a/ortools/routing/samples/vrp_breaks.cc +++ b/ortools/routing/samples/vrp_breaks.cc @@ -25,8 +25,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -205,7 +208,9 @@ void VrpBreaks() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpBreaks(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_capacity.cc b/ortools/routing/samples/vrp_capacity.cc index 0981924ddf..08aef67f04 100644 --- a/ortools/routing/samples/vrp_capacity.cc +++ b/ortools/routing/samples/vrp_capacity.cc @@ -18,8 +18,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "google/protobuf/duration.pb.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -191,7 +194,9 @@ void VrpCapacity() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpCapacity(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_drop_nodes.cc b/ortools/routing/samples/vrp_drop_nodes.cc index d88ec5d87d..8d00899255 100644 --- a/ortools/routing/samples/vrp_drop_nodes.cc +++ b/ortools/routing/samples/vrp_drop_nodes.cc @@ -18,8 +18,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "google/protobuf/duration.pb.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -207,7 +210,9 @@ void VrpDropNodes() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpDropNodes(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_global_span.cc b/ortools/routing/samples/vrp_global_span.cc index e8cef1ecb6..c4c2f8ba21 100644 --- a/ortools/routing/samples/vrp_global_span.cc +++ b/ortools/routing/samples/vrp_global_span.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -170,7 +173,9 @@ void VrpGlobalSpan() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpGlobalSpan(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_initial_routes.cc b/ortools/routing/samples/vrp_initial_routes.cc index 1da4adc15c..08c7cc990e 100644 --- a/ortools/routing/samples/vrp_initial_routes.cc +++ b/ortools/routing/samples/vrp_initial_routes.cc @@ -19,8 +19,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "google/protobuf/duration.pb.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -195,7 +198,9 @@ void VrpInitialRoutes() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpInitialRoutes(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_pickup_delivery.cc b/ortools/routing/samples/vrp_pickup_delivery.cc index 2470d67237..257163690e 100644 --- a/ortools/routing/samples/vrp_pickup_delivery.cc +++ b/ortools/routing/samples/vrp_pickup_delivery.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -202,7 +205,9 @@ void VrpGlobalSpan() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpGlobalSpan(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_pickup_delivery_fifo.cc b/ortools/routing/samples/vrp_pickup_delivery_fifo.cc index db90243d35..6ba76c2d9e 100644 --- a/ortools/routing/samples/vrp_pickup_delivery_fifo.cc +++ b/ortools/routing/samples/vrp_pickup_delivery_fifo.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -204,7 +207,9 @@ void VrpGlobalSpan() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpGlobalSpan(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_pickup_delivery_lifo.cc b/ortools/routing/samples/vrp_pickup_delivery_lifo.cc index c5f6a5bb8a..9a02fda062 100644 --- a/ortools/routing/samples/vrp_pickup_delivery_lifo.cc +++ b/ortools/routing/samples/vrp_pickup_delivery_lifo.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -204,7 +207,9 @@ void VrpGlobalSpan() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpGlobalSpan(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_resources.cc b/ortools/routing/samples/vrp_resources.cc index f00e307b12..2e5497d16d 100644 --- a/ortools/routing/samples/vrp_resources.cc +++ b/ortools/routing/samples/vrp_resources.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -226,7 +229,9 @@ void VrpTimeWindows() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpTimeWindows(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_routes.cc b/ortools/routing/samples/vrp_routes.cc index c6f13c913a..bf477de3f1 100644 --- a/ortools/routing/samples/vrp_routes.cc +++ b/ortools/routing/samples/vrp_routes.cc @@ -18,7 +18,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -156,7 +159,9 @@ void Vrp() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::Vrp(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_solution_callback.cc b/ortools/routing/samples/vrp_solution_callback.cc index 0e2781f0c6..b5571b1037 100644 --- a/ortools/routing/samples/vrp_solution_callback.cc +++ b/ortools/routing/samples/vrp_solution_callback.cc @@ -20,8 +20,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "google/protobuf/duration.pb.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -221,7 +224,9 @@ void VrpSolutionCallback() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpSolutionCallback(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_starts_ends.cc b/ortools/routing/samples/vrp_starts_ends.cc index 665ea5f772..93f02e316e 100644 --- a/ortools/routing/samples/vrp_starts_ends.cc +++ b/ortools/routing/samples/vrp_starts_ends.cc @@ -19,7 +19,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -178,7 +181,9 @@ void VrpStartsEnds() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpStartsEnds(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_time_windows.cc b/ortools/routing/samples/vrp_time_windows.cc index 4e03027f85..df4a7d8ab0 100644 --- a/ortools/routing/samples/vrp_time_windows.cc +++ b/ortools/routing/samples/vrp_time_windows.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -200,7 +203,9 @@ void VrpTimeWindows() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpTimeWindows(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrp_with_time_limit.cc b/ortools/routing/samples/vrp_with_time_limit.cc index be3f137b41..06e1700b21 100644 --- a/ortools/routing/samples/vrp_with_time_limit.cc +++ b/ortools/routing/samples/vrp_with_time_limit.cc @@ -18,8 +18,11 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" #include "google/protobuf/duration.pb.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -129,7 +132,9 @@ void VrpGlobalSpan() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpGlobalSpan(); return EXIT_SUCCESS; } diff --git a/ortools/routing/samples/vrptw_store_solution_data.cc b/ortools/routing/samples/vrptw_store_solution_data.cc index 8750373588..70d15c0bb8 100644 --- a/ortools/routing/samples/vrptw_store_solution_data.cc +++ b/ortools/routing/samples/vrptw_store_solution_data.cc @@ -20,7 +20,10 @@ #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -240,7 +243,9 @@ void VrpTimeWindows() { } } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::VrpTimeWindows(); return EXIT_SUCCESS; } From b29f4075f691b3f14042860ca2f9c5c4d8bbc917 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 16 May 2025 16:47:50 +0200 Subject: [PATCH 009/509] [CP-SAT] use precedences in completion time cuts; improve glue clause sharing --- ortools/sat/BUILD.bazel | 2 + ortools/sat/cp_model_solver.cc | 4 +- ortools/sat/cp_model_solver_helpers.cc | 88 ++++---- ortools/sat/cp_model_solver_test.cc | 11 +- ortools/sat/diffn.cc | 2 +- ortools/sat/parameters_validation.cc | 2 + ortools/sat/sat_parameters.proto | 5 +- ortools/sat/scheduling_cuts.cc | 228 +++++++++++++------ ortools/sat/scheduling_cuts.h | 35 ++- ortools/sat/scheduling_cuts_test.cc | 69 +++--- ortools/sat/synchronization.cc | 291 +++++++++---------------- ortools/sat/synchronization.h | 151 ++++++------- ortools/sat/synchronization_test.cc | 231 +++++++------------- 13 files changed, 548 insertions(+), 571 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index f65627520b..b9ad165420 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2893,6 +2893,7 @@ cc_library( ":linear_constraint", ":linear_constraint_manager", ":model", + ":precedences", ":sat_base", ":sat_solver", ":scheduling_helpers", @@ -3548,6 +3549,7 @@ cc_library( "//ortools/util:strong_integers", "//ortools/util:time_limit", "@abseil-cpp//absl/container:flat_hash_set", + "@abseil-cpp//absl/container:inlined_vector", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:vlog_is_on", diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index dcf164ef21..2928764a79 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1127,7 +1127,9 @@ class FullProblemSolver : public SubSolver { // Note that this is done after the loading, so we will never export // problem clauses. if (shared_->clauses != nullptr) { - const int id = shared_->clauses->RegisterNewId(); + const int id = shared_->clauses->RegisterNewId( + /*may_terminate_early=*/stop_at_first_solution_ && + local_model_.GetOrCreate()->has_objective()); shared_->clauses->SetWorkerNameForId(id, local_model_.Name()); RegisterClausesLevelZeroImport(id, shared_->clauses.get(), diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index d889246bfe..030cf124b5 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -953,27 +953,31 @@ void RegisterClausesExport(int id, SharedClausesManager* shared_clauses_manager, if (!model->GetOrCreate()->share_glue_clauses()) { return; } - auto* clause_stream = shared_clauses_manager->GetClauseStream(id); - const int max_lbd = - model->GetOrCreate()->clause_cleanup_lbd_bound(); - // Note that this callback takes no global locks, everything operates on this - // worker's own clause stream, whose lock is only used by this worker, and - // briefly when generating a batch in SharedClausesManager::Synchronize(). - auto share_clause = [mapping, clause_stream, max_lbd, - clause = std::vector()]( + const double share_interval = + model->GetOrCreate()->share_glue_clauses_dtime(); + auto* clause_stream = model->GetOrCreate(); + auto* time_limit = model->GetOrCreate(); + auto share_clause = [mapping, clause_stream, time_limit, id, + shared_clauses_manager, share_interval, + next_batch_dtime = -1.0, clause = std::vector()]( int lbd, absl::Span literals) mutable { - if (lbd <= 0 || lbd > max_lbd || - !clause_stream->CanAccept(literals.size(), lbd)) { - return; + if (literals.size() >= UniqueClauseStream::kMinClauseSize && + literals.size() <= UniqueClauseStream::kMaxClauseSize) { + clause.clear(); + for (const Literal& lit : literals) { + const int var = + mapping->GetProtoVariableFromBooleanVariable(lit.Variable()); + if (var == -1) return; + clause.push_back(lit.IsPositive() ? var : NegatedRef(var)); + } + clause_stream->Add(clause, lbd); } - clause.clear(); - for (const Literal& lit : literals) { - const int var = - mapping->GetProtoVariableFromBooleanVariable(lit.Variable()); - if (var == -1) return; - clause.push_back(lit.IsPositive() ? var : NegatedRef(var)); + const double elapsed_dtime = time_limit->GetElapsedDeterministicTime(); + if (next_batch_dtime < 0) next_batch_dtime = elapsed_dtime + share_interval; + if (elapsed_dtime >= next_batch_dtime) { + shared_clauses_manager->AddBatch(id, clause_stream->NextBatch()); + next_batch_dtime = elapsed_dtime + share_interval; } - clause_stream->Add(clause); }; model->GetOrCreate()->SetAddClauseCallback( std::move(share_clause)); @@ -994,16 +998,16 @@ int RegisterClausesLevelZeroImport(int id, auto* implications = model->GetOrCreate(); const bool share_glue_clauses = model->GetOrCreate()->share_glue_clauses(); + auto* clause_stream = + share_glue_clauses ? model->GetOrCreate() : nullptr; const bool minimize_shared_clauses = model->GetOrCreate()->minimize_shared_clauses(); - auto* clause_stream = share_glue_clauses - ? shared_clauses_manager->GetClauseStream(id) - : nullptr; auto* clause_manager = model->GetOrCreate(); const auto& import_level_zero_clauses = [shared_clauses_manager, id, mapping, sat_solver, implications, - clause_stream, clause_manager, - minimize_shared_clauses]() { + minimize_shared_clauses, + clause_stream, + clause_manager]() mutable { std::vector> new_binary_clauses; shared_clauses_manager->GetUnseenBinaryClauses(id, &new_binary_clauses); implications->EnableSharing(false); @@ -1020,28 +1024,27 @@ int RegisterClausesLevelZeroImport(int id, int new_clauses = 0; std::array local_clause; sat_solver->EnsureNewClauseIndexInitialized(); - // Temporarily disable clause sharing so we don't immediately re-export the - // clauses we just imported. + // Temporarily disable clause sharing. auto callback = clause_manager->TakeAddClauseCallback(); - for (const absl::Span shared_clause : - shared_clauses_manager->GetUnseenClauses(id)) { - // Check this clause was not already learned by this worker. - // We can delete the fingerprint because we should not learn an identical - // clause, and the global stream will not emit the same clause while any - // worker hasn't consumed this clause (and thus also shouldn't relearn the - // clause). - if (clause_stream->Delete(shared_clause)) continue; - for (int i = 0; i < shared_clause.size(); ++i) { - local_clause[i] = mapping->Literal(shared_clause[i]); + while (true) { + auto batch = shared_clauses_manager->GetUnseenClauses(id); + if (batch.empty()) break; + for (int clause_index = 0; clause_index < batch.size(); ++clause_index) { + const absl::Span& shared_clause = batch[clause_index]; + // Check this clause was not already learned by this worker. + if (!clause_stream->BlockClause(shared_clause)) continue; + ++new_clauses; + for (int i = 0; i < shared_clause.size(); ++i) { + local_clause[i] = mapping->Literal(shared_clause[i]); + } + if (!sat_solver->AddProblemClause( + absl::MakeSpan(local_clause) + .subspan(0, shared_clause.size()))) { + return false; + } } - if (!sat_solver->AddProblemClause( - absl::MakeSpan(local_clause).subspan(0, shared_clause.size()))) { - return false; - } - ++new_clauses; } clause_manager->SetAddClauseCallback(std::move(callback)); - clause_stream->RemoveWorstClauses(); if (minimize_shared_clauses && new_clauses > 0) { // The new clauses may be subsumed, so try to minimize them to reduce // overhead of sharing. @@ -2110,8 +2113,7 @@ SharedClasses::SharedClasses(const CpModelProto* proto, Model* global_model) !params.interleave_search() || params.num_workers() <= 1; response->SetSynchronizationMode(always_synchronize); if (params.share_binary_clauses() && params.num_workers() > 1) { - clauses = std::make_unique(always_synchronize, - absl::Seconds(1)); + clauses = std::make_unique(always_synchronize); } } diff --git a/ortools/sat/cp_model_solver_test.cc b/ortools/sat/cp_model_solver_test.cc index 205dd64543..96fb307224 100644 --- a/ortools/sat/cp_model_solver_test.cc +++ b/ortools/sat/cp_model_solver_test.cc @@ -87,9 +87,9 @@ TEST(LoadCpModelTest, PureSatProblem) { TEST(LoadCpModelTest, PureSatProblemWithLimit) { const CpModelProto model_proto = Random3SatProblem(500); LOG(INFO) << CpModelStats(model_proto); - Model model; - model.Add(NewSatParameters("max_deterministic_time:0.00001")); - const CpSolverResponse response = SolveCpModel(model_proto, &model); + SatParameters params; + params.set_max_deterministic_time(0.00001); + const CpSolverResponse response = SolveWithParameters(model_proto, params); EXPECT_EQ(response.status(), CpSolverStatus::UNKNOWN); LOG(INFO) << CpSolverResponseStats(response); } @@ -193,7 +193,8 @@ TEST(LoadCpModelTest, SimpleCumulative) { } TEST(SolverCpModelTest, EmptyModel) { - const CpModelProto cp_model = ParseTestProto("solution_hint {}"); + CpModelProto cp_model; + cp_model.mutable_solution_hint(); SatParameters params; params.set_debug_crash_if_presolve_breaks_hint(true); @@ -329,6 +330,7 @@ TEST(SolveCpModelTest, TrivialModelWithCore) { response.solution().end()))); } +#if !defined(__EMBEDDED_PLATFORM__) TEST(SolveCpModelTest, TrivialLinearTranslatedModel) { const CpModelProto model_proto = ParseTestProto(R"pb( variables { domain: -10 domain: 10 } @@ -4803,6 +4805,7 @@ TEST(PresolveCpModelTest, CumulativeBug4) { response = SolveWithParameters(cp_model, params); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); } +#endif // !defined(__EMBEDDED_PLATFORM__) } // namespace } // namespace sat diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 1e86f0cd4f..23e85a04ff 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -32,7 +32,7 @@ #include "absl/log/vlog_is_on.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" -#include "ortools/base/stl_util.h" +// #include "ortools/base/stl_util.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" #include "ortools/sat/2d_try_edge_propagator.h" diff --git a/ortools/sat/parameters_validation.cc b/ortools/sat/parameters_validation.cc index fc889abcf3..36af16ffab 100644 --- a/ortools/sat/parameters_validation.cc +++ b/ortools/sat/parameters_validation.cc @@ -87,6 +87,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_IS_FINITE(relative_gap_limit); TEST_IS_FINITE(restart_dl_average_ratio); TEST_IS_FINITE(restart_lbd_average_ratio); + TEST_IS_FINITE(share_glue_clauses_dtime); TEST_IS_FINITE(shared_tree_open_leaves_per_worker); TEST_IS_FINITE(shaving_deterministic_time_in_probing_search); TEST_IS_FINITE(shaving_search_deterministic_time); @@ -156,6 +157,7 @@ std::string ValidateParameters(const SatParameters& params) { TEST_NON_NEGATIVE(presolve_probing_deterministic_time_limit); TEST_NON_NEGATIVE(probing_deterministic_time_limit); TEST_NON_NEGATIVE(symmetry_detection_deterministic_time_limit); + TEST_POSITIVE(share_glue_clauses_dtime); if (params.enumerate_all_solutions() && (params.num_search_workers() > 1 || params.num_workers() > 1)) { diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 0f28a03181..c42f5a405e 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 322 +// NEXT TAG: 323 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -705,6 +705,9 @@ message SatParameters { // are imported. optional bool minimize_shared_clauses = 300 [default = true]; + // The amount of dtime between each export of shared glue clauses. + optional double share_glue_clauses_dtime = 322 [default = 1.0]; + // ========================================================================== // Debugging parameters // ========================================================================== diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 80a5fea030..5fd29afe20 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -43,6 +43,7 @@ #include "ortools/sat/linear_constraint.h" #include "ortools/sat/linear_constraint_manager.h" #include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/scheduling_helpers.h" @@ -1053,15 +1054,16 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator( } CompletionTimeEvent::CompletionTimeEvent(int t, - SchedulingConstraintHelper* x_helper, + SchedulingConstraintHelper* helper, SchedulingDemandHelper* demands_helper) : task_index(t), - start_min(x_helper->StartMin(t)), - start_max(x_helper->StartMax(t)), - end_min(x_helper->EndMin(t)), - end_max(x_helper->EndMax(t)), - size_min(x_helper->SizeMin(t)), - end(x_helper->Ends()[t]) { + start_min(helper->StartMin(t)), + start_max(helper->StartMax(t)), + end_min(helper->EndMin(t)), + end_max(helper->EndMax(t)), + size_min(helper->SizeMin(t)), + start(helper->Starts()[t]), + end(helper->Ends()[t]) { if (demands_helper == nullptr) { demand_min = 1; demand_is_fixed = true; @@ -1099,20 +1101,68 @@ std::string CompletionTimeEvent::DebugString() const { "]"); } +void CtExhaustiveHelper::Init( + const absl::Span events, Model* model) { + BinaryRelationsMaps* binary_relations = + model->GetOrCreate(); + max_task_index_ = 0; + for (const auto& event : events) { + max_task_index_ = std::max(max_task_index_, event.task_index); + } + predecessors_.reserve(max_task_index_ + 1); + for (const auto& e1 : events) { + CHECK_LE(predecessors_.size(), e1.task_index); + while (predecessors_.size() <= e1.task_index) { + predecessors_.Add({}); + } + + // Cap the number of precedences to avoid O(n^2) time complexity. + if (predecessors_.num_entries() > 20000) break; + + for (const auto& e2 : events) { + if (e2.task_index == e1.task_index) continue; + if (binary_relations->GetPrecedenceStatus(e2.end, e1.start) == + RelationStatus::IS_TRUE) { + predecessors_.AppendToLastVector(e2.task_index); + } + } + } + VLOG(2) << "num_tasks:" << max_task_index_ + 1 + << " num_precedences:" << predecessors_.num_entries(); +} + +bool CtExhaustiveHelper::PermutationIsCompatibleWithPrecedences( + absl::Span events, + absl::Span permutation) { + visited_.assign(max_task_index_ + 1, false); + for (int i = permutation.size() - 1; i >= 0; --i) { + const CompletionTimeEvent& event = events[permutation[i]]; + for (const int predecessor : predecessors_[event.task_index]) { + if (visited_[predecessor]) return false; + } + visited_[event.task_index] = true; + } + return true; +} + namespace { bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap( absl::Span events, absl::Span permutation, IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends) { + // Reset the two sums. sum_of_ends = 0; sum_of_weighted_ends = 0; + + // Loop over the permutation. IntegerValue end_min_of_previous_task = kMinIntegerValue; for (const int index : permutation) { const CompletionTimeEvent& event = events[index]; const IntegerValue threshold = std::max(event.start_min, end_min_of_previous_task); if (event.start_max < threshold) return false; // Infeasible. + end_min_of_previous_task = threshold + event.size_min; sum_of_ends += end_min_of_previous_task; sum_of_weighted_ends += event.energy_min * end_min_of_previous_task; @@ -1131,9 +1181,8 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap( bool ComputeWeightedSumOfEndMinsOfOnePermutation( absl::Span events, absl::Span permutation, IntegerValue capacity_max, - IntegerValue& sum_of_ends, IntegerValue& sum_of_weighted_ends, - std::vector>& profile, - std::vector>& new_profile) { + CtExhaustiveHelper& helper, IntegerValue& sum_of_ends, + IntegerValue& sum_of_weighted_ends, bool& cut_use_precedences) { DCHECK_EQ(permutation.size(), events.size()); if (capacity_max == 1) { @@ -1141,11 +1190,11 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( events, permutation, sum_of_ends, sum_of_weighted_ends); } - // Set default values. + // Reset the two sums. sum_of_ends = 0; sum_of_weighted_ends = 0; - // Is the permutation feasible ? + // Quick check to see if the permutation feasible: // ei = events[permutation[i]], ej = events[permutation[j]], i < j // - start_max(ej) >= start_min(ei) IntegerValue demand_min_of_previous_task = 0; @@ -1161,6 +1210,7 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( if (event.start_max < threshold) { return false; } + start_min_of_previous_task = threshold; end_min_of_previous_task = threshold + event.size_min; demand_min_of_previous_task = event.demand_min; @@ -1168,9 +1218,12 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( // The profile (and new profile) is a set of (time, capa_left) pairs, // ordered by increasing time and capa_left. - profile.clear(); - profile.emplace_back(kMinIntegerValue, capacity_max); - profile.emplace_back(kMaxIntegerValue, capacity_max); + helper.profile_.clear(); + helper.profile_.emplace_back(kMinIntegerValue, capacity_max); + helper.profile_.emplace_back(kMaxIntegerValue, capacity_max); + + // Loop over the permutation. + helper.assigned_ends_.assign(helper.max_task_index() + 1, kMinIntegerValue); IntegerValue start_of_previous_task = kMinIntegerValue; for (const int index : permutation) { const CompletionTimeEvent& event = events[index]; @@ -1180,15 +1233,30 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( // Iterate on the profile to find the step that contains start_min. // Then push until we find a step with enough capacity. int current = 0; - while (profile[current + 1].first <= start_min || - profile[current].second < event.demand_min) { + while (helper.profile_[current + 1].first <= start_min || + helper.profile_[current].second < event.demand_min) { ++current; } - const IntegerValue actual_start = - std::max(start_min, profile[current].first); + IntegerValue actual_start = + std::max(start_min, helper.profile_[current].first); + const IntegerValue initial_start_min = actual_start; - start_of_previous_task = actual_start; + // Propagate precedences. + // + // helper.predecessors() can be truncated. We need to be careful here. + if (event.task_index < helper.predecessors().size()) { + for (const int predecessor : helper.predecessors()[event.task_index]) { + if (helper.assigned_ends_[predecessor] == kMinIntegerValue) continue; + actual_start = + std::max(actual_start, helper.assigned_ends_[predecessor]); + } + } + + if (actual_start > initial_start_min) { + cut_use_precedences = true; + VLOG(3) << "push from " << initial_start_min << " to " << actual_start; + } // Compatible with the event.start_max ? if (actual_start > event.start_max) { @@ -1197,33 +1265,37 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( const IntegerValue actual_end = actual_start + event.size_min; + // Bookkeeping. + helper.assigned_ends_[event.task_index] = actual_end; sum_of_ends += actual_end; sum_of_weighted_ends += event.energy_min * actual_end; + start_of_previous_task = actual_start; // No need to update the profile on the last loop. if (event.task_index == events[permutation.back()].task_index) break; // Update the profile. - new_profile.clear(); - new_profile.push_back( - {actual_start, profile[current].second - event.demand_min}); + helper.new_profile_.clear(); + helper.new_profile_.push_back( + {actual_start, helper.profile_[current].second - event.demand_min}); ++current; - while (profile[current].first < actual_end) { - new_profile.push_back( - {profile[current].first, profile[current].second - event.demand_min}); + while (helper.profile_[current].first < actual_end) { + helper.new_profile_.push_back( + {helper.profile_[current].first, + helper.profile_[current].second - event.demand_min}); ++current; } - if (profile[current].first > actual_end) { - new_profile.push_back( - {actual_end, new_profile.back().second + event.demand_min}); + if (helper.profile_[current].first > actual_end) { + helper.new_profile_.push_back( + {actual_end, helper.new_profile_.back().second + event.demand_min}); } - while (current < profile.size()) { - new_profile.push_back(profile[current]); + while (current < helper.profile_.size()) { + helper.new_profile_.push_back(helper.profile_[current]); ++current; } - profile.swap(new_profile); + helper.profile_.swap(helper.new_profile_); } return true; } @@ -1232,36 +1304,37 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( bool ComputeMinSumOfWeightedEndMins( absl::Span events, IntegerValue capacity_max, - double sum_of_ends_lp, double sum_of_weighted_ends_lp, - IntegerValue& min_sum_of_end_mins, - IntegerValue& min_sum_of_weighted_end_mins) { + double unweighted_threshold, double weighted_threshold, + CtExhaustiveHelper& helper, double& min_sum_of_ends, + double& min_sum_of_weighted_ends, bool& cut_use_precedences) { + // Reset the events based sums. + min_sum_of_ends = std::numeric_limits::max(); + min_sum_of_weighted_ends = std::numeric_limits::max(); + + // Local stats. int num_explored = 0; int num_pruned = 0; - min_sum_of_end_mins = kMaxIntegerValue; - min_sum_of_weighted_end_mins = kMaxIntegerValue; bool aborted = false; - const int64_t unweighted_threshold = - static_cast(std::floor(sum_of_ends_lp + kMinCutViolation)); - const int64_t weighted_threshold = static_cast( - std::floor(sum_of_weighted_ends_lp + kMinCutViolation)); - // Reusable storage for ComputeWeightedSumOfEndMinsOfOnePermutation(). - std::vector> profile; - std::vector> new_profile; std::vector permutation(events.size()); std::iota(permutation.begin(), permutation.end(), 0); do { - IntegerValue sum_of_ends(0); - IntegerValue sum_of_weighted_ends(0); + IntegerValue sum_of_ends = 0; + IntegerValue sum_of_weighted_ends = 0; + if (!helper.PermutationIsCompatibleWithPrecedences(events, permutation)) { + cut_use_precedences = true; + continue; + } + if (ComputeWeightedSumOfEndMinsOfOnePermutation( - events, permutation, capacity_max, sum_of_ends, - sum_of_weighted_ends, profile, new_profile)) { - min_sum_of_end_mins = std::min(sum_of_ends, min_sum_of_end_mins); - min_sum_of_weighted_end_mins = - std::min(sum_of_weighted_ends, min_sum_of_weighted_end_mins); + events, permutation, capacity_max, helper, sum_of_ends, + sum_of_weighted_ends, cut_use_precedences)) { + min_sum_of_ends = std::min(ToDouble(sum_of_ends), min_sum_of_ends); + min_sum_of_weighted_ends = + std::min(ToDouble(sum_of_weighted_ends), min_sum_of_weighted_ends); num_explored++; - if (min_sum_of_end_mins <= unweighted_threshold && - min_sum_of_weighted_end_mins <= weighted_threshold) { + if (min_sum_of_ends <= unweighted_threshold && + min_sum_of_weighted_ends <= weighted_threshold) { aborted = true; break; } @@ -1271,8 +1344,8 @@ bool ComputeMinSumOfWeightedEndMins( } while (std::next_permutation(permutation.begin(), permutation.end())); VLOG(3) << "DP: size=" << events.size() << ", explored = " << num_explored << ", pruned = " << num_pruned << ", aborted = " << aborted - << ", min_sum_of_end_mins = " << min_sum_of_end_mins - << ", min_sum_of_weighted_end_mins = " << min_sum_of_weighted_end_mins + << ", min_sum_of_end_mins = " << min_sum_of_ends + << ", min_sum_of_weighted_end_mins = " << min_sum_of_weighted_ends << ", unweighted_threshold = " << unweighted_threshold << ", weighted_threshold = " << weighted_threshold; return num_explored > 0; @@ -1283,7 +1356,8 @@ bool ComputeMinSumOfWeightedEndMins( // - better caching of explored states ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( const std::string& cut_name, std::vector events, - IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { + IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model, + LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); // Sort by start min to bucketize by start_min. std::sort( @@ -1298,6 +1372,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( continue; } + bool cut_use_precedences = false; // Used for naming the cut. const IntegerValue sequence_start_min = events[start].start_min; std::vector residual_tasks(events.begin() + start, events.end()); @@ -1321,40 +1396,43 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( double sum_of_ends_lp = 0.0; double sum_of_weighted_ends_lp = 0.0; IntegerValue sum_of_demands = 0; - IntegerValue sum_of_energies = 0; + double sum_of_square_energies = 0; + double min_sum_of_ends = std::numeric_limits::max(); + double min_sum_of_weighted_ends = std::numeric_limits::max(); for (int i = 0; i < std::min(residual_tasks.size(), 7); ++i) { const CompletionTimeEvent& event = residual_tasks[i]; + const double energy = ToDouble(event.energy_min); sum_of_ends_lp += event.lp_end; - sum_of_weighted_ends_lp += event.lp_end * ToDouble(event.energy_min); + sum_of_weighted_ends_lp += event.lp_end * energy; sum_of_demands += event.demand_min; - sum_of_energies += event.energy_min; + sum_of_square_energies += energy * energy; // Both cases with 1 or 2 tasks are trivial and independent of the order. // Also, if capacity is not exceeded, pushing all ends left is a valid LP // assignment. if (i <= 1 || sum_of_demands <= capacity_max) continue; - IntegerValue min_sum_of_end_mins = kMaxIntegerValue; - IntegerValue min_sum_of_weighted_end_mins = kMaxIntegerValue; if (!ComputeMinSumOfWeightedEndMins( absl::MakeSpan(residual_tasks).first(i + 1), capacity_max, - sum_of_ends_lp, sum_of_weighted_ends_lp, min_sum_of_end_mins, - min_sum_of_weighted_end_mins)) { + /* unweighted_threshold= */ sum_of_ends_lp + kMinCutViolation, + /* weighted_threshold= */ sum_of_weighted_ends_lp + + kMinCutViolation, + helper, min_sum_of_ends, min_sum_of_weighted_ends, + cut_use_precedences)) { return false; } const double unweigthed_violation = - (ToDouble(min_sum_of_end_mins) - sum_of_ends_lp) / ToDouble(i + 1); + (min_sum_of_ends - sum_of_ends_lp) / std::sqrt(ToDouble(i + 1)); const double weighted_violation = - (ToDouble(min_sum_of_weighted_end_mins) - sum_of_weighted_ends_lp) / - ToDouble(sum_of_energies); + (min_sum_of_weighted_ends - sum_of_weighted_ends_lp) / + std::sqrt(sum_of_square_energies); // Unweighted cuts. if (unweigthed_violation > weighted_violation && unweigthed_violation > kMinCutViolation) { - LinearConstraintBuilder cut(model, min_sum_of_end_mins, - kMaxIntegerValue); + LinearConstraintBuilder cut(model, min_sum_of_ends, kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { const CompletionTimeEvent& event = residual_tasks[j]; @@ -1362,6 +1440,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( cut.AddTerm(event.end, IntegerValue(1)); } std::string full_name = cut_name; + if (cut_use_precedences) full_name.append("_prec"); if (is_lifted) full_name.append("_lifted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } @@ -1369,7 +1448,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( // Weighted cuts. if (weighted_violation >= unweigthed_violation && weighted_violation > kMinCutViolation) { - LinearConstraintBuilder cut(model, min_sum_of_weighted_end_mins, + LinearConstraintBuilder cut(model, min_sum_of_weighted_ends, kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { @@ -1379,6 +1458,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( } std::string full_name = cut_name; if (is_lifted) full_name.append("_lifted"); + if (cut_use_precedences) full_name.append("_prec"); full_name.append("_weighted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); } @@ -1686,11 +1766,14 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( } } + CtExhaustiveHelper helper; + helper.Init(events, model); + const std::string mirror_str = time_is_forward ? "" : "_mirror"; if (!GenerateShortCompletionTimeCutsWithExactBound( absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), events, - /*capacity_max=*/IntegerValue(1), model, manager)) { + /*capacity_max=*/IntegerValue(1), helper, model, manager)) { return false; } @@ -1748,11 +1831,14 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( } } + CtExhaustiveHelper helper; + helper.Init(events, model); + const IntegerValue capacity_max = integer_trail->UpperBound(capacity); const std::string mirror_str = time_is_forward ? "" : "_mirror"; if (!GenerateShortCompletionTimeCutsWithExactBound( absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str), - events, capacity_max, model, manager)) { + events, capacity_max, helper, model, manager)) { return false; } diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index 53d27f1061..a1fbd5a61c 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -16,6 +16,7 @@ #include #include +#include #include #include "absl/types/span.h" @@ -24,6 +25,7 @@ #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" #include "ortools/sat/scheduling_helpers.h" +#include "ortools/sat/util.h" namespace operations_research { namespace sat { @@ -117,7 +119,8 @@ struct CompletionTimeEvent { IntegerValue end_max; IntegerValue size_min; - // The lp value of the end of the interval. + // Start and end affine expressions and lp value of the end of the interval. + AffineExpression start; AffineExpression end; double lp_end = 0.0; @@ -147,6 +150,30 @@ struct CompletionTimeEvent { std::string DebugString() const; }; +class CtExhaustiveHelper { + public: + int max_task_index() const { return max_task_index_; } + const CompactVectorVector& predecessors() const { return predecessors_; } + + // Temporary data. + std::vector> profile_; + std::vector> new_profile_; + std::vector assigned_ends_; + + // Collect precedences, set max_task_index. + // TODO(user): Do some transitive closure. + void Init(absl::Span events, Model* model); + + bool PermutationIsCompatibleWithPrecedences( + absl::Span events, + absl::Span permutation); + + private: + CompactVectorVector predecessors_; + int max_task_index_ = 0; + std::vector visited_; +}; + // Computes the minimum sum of the end min and the minimum sum of the end min // weighted by weight of all events. It returns false if no permutation is // valid w.r.t. the range of starts. @@ -157,9 +184,9 @@ struct CompletionTimeEvent { // Optim: If both sums are proven <= to the corresponding threshold, we abort. bool ComputeMinSumOfWeightedEndMins( absl::Span events, IntegerValue capacity_max, - double sum_of_ends_lp, double sum_of_weighted_ends_lp, - IntegerValue& min_sum_of_end_mins, - IntegerValue& min_sum_of_weighted_end_mins); + double unweighted_threshold, double weighted_threshold, + CtExhaustiveHelper& helper, double& min_sum_of_ends, + double& min_sum_of_weighted_ends, bool& cut_use_precedences); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 9c24ac0afa..f434e296de 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -406,13 +406,17 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3) { CompletionTimeEvent e3(2, helper, demands_helper); const std::vector events = {e1, e2, e3}; - IntegerValue min_sum_of_end_mins = 0; - IntegerValue min_sum_of_weighted_end_mins = 0; - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, - min_sum_of_end_mins, - min_sum_of_weighted_end_mins)); + double min_sum_of_end_mins = 0; + double min_sum_of_weighted_end_mins = 0; + CtExhaustiveHelper ct_helper; + ct_helper.Init(events, &model); + bool cut_use_precedences = false; + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences)); EXPECT_EQ(min_sum_of_end_mins, 17); EXPECT_EQ(min_sum_of_weighted_end_mins, 86); + EXPECT_FALSE(cut_use_precedences); } TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { @@ -451,11 +455,14 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { CompletionTimeEvent e3(2, helper, demands_helper); const std::vector events = {e1, e2, e3}; - IntegerValue min_sum_of_end_mins = 0; - IntegerValue min_sum_of_weighted_end_mins = 0; - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, - min_sum_of_end_mins, - min_sum_of_weighted_end_mins)); + double min_sum_of_end_mins = 0; + double min_sum_of_weighted_end_mins = 0; + CtExhaustiveHelper ct_helper; + ct_helper.Init(events, &model); + bool cut_use_precedences = false; + ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences)); EXPECT_EQ(min_sum_of_end_mins, 18); EXPECT_EQ(min_sum_of_weighted_end_mins, 86); } @@ -496,15 +503,18 @@ TEST(ComputeMinSumOfEndMinsTest, Infeasible) { CompletionTimeEvent e3(2, helper, demands_helper); const std::vector events = {e1, e2, e3}; - IntegerValue min_sum_of_end_mins = 0; - IntegerValue min_sum_of_weighted_end_mins = 0; - ASSERT_FALSE(ComputeMinSumOfWeightedEndMins(events, two, 0.01, 0.01, - min_sum_of_end_mins, - min_sum_of_weighted_end_mins)); + double min_sum_of_end_mins = 0; + double min_sum_of_weighted_end_mins = 0; + CtExhaustiveHelper ct_helper; + ct_helper.Init(events, &model); + bool cut_use_precedences = false; + ASSERT_FALSE(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences)); } -int64_t ExactMakespan(absl::Span sizes, std::vector& demands, - int capacity) { +double ExactMakespan(absl::Span sizes, std::vector& demands, + int capacity) { const int64_t kHorizon = 1000; CpModelBuilder builder; LinearExpr obj; @@ -519,11 +529,11 @@ int64_t ExactMakespan(absl::Span sizes, std::vector& demands, const CpSolverResponse response = SolveWithParameters(builder.Build(), "num_search_workers:8"); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); - return static_cast(response.objective_value()); + return response.objective_value(); } -int64_t ExactMakespanBruteForce(absl::Span sizes, - std::vector& demands, int capacity) { +double ExactMakespanBruteForce(absl::Span sizes, + std::vector& demands, int capacity) { const int64_t kHorizon = 1000; Model model; auto* intervals_repository = model.GetOrCreate(); @@ -555,12 +565,15 @@ int64_t ExactMakespanBruteForce(absl::Span sizes, events.push_back(e); } - IntegerValue min_sum_of_end_mins = 0; - IntegerValue min_sum_of_weighted_end_mins = 0; - EXPECT_TRUE(ComputeMinSumOfWeightedEndMins(events, capacity, 0.01, 0.01, - min_sum_of_end_mins, - min_sum_of_weighted_end_mins)); - return min_sum_of_end_mins.value(); + double min_sum_of_end_mins = 0; + double min_sum_of_weighted_end_mins = 0; + CtExhaustiveHelper ct_helper; + ct_helper.Init(events, &model); + bool cut_use_precedences = false; + EXPECT_TRUE(ComputeMinSumOfWeightedEndMins( + events, capacity, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences)); + return min_sum_of_end_mins; } TEST(ComputeMinSumOfEndMinsTest, RandomCases) { @@ -576,8 +589,8 @@ TEST(ComputeMinSumOfEndMinsTest, RandomCases) { demands.push_back(absl::Uniform(random, 1, capacity)); } - EXPECT_EQ(ExactMakespan(sizes, demands, capacity), - ExactMakespanBruteForce(sizes, demands, capacity)); + EXPECT_NEAR(ExactMakespan(sizes, demands, capacity), + ExactMakespanBruteForce(sizes, demands, capacity), 1e-6); } } diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index af8414e30f..e3cd1cd66d 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -1130,76 +1130,79 @@ int SharedBoundsManager::NumBoundsExported(absl::string_view worker_name) { UniqueClauseStream::UniqueClauseStream() { for (auto& buffer : clauses_by_size_) { - buffer.reserve(kMaxBufferedLiterals); + buffer.reserve(kMaxLiteralsPerBatch); } + fingerprints_.reserve(kMaxFingerprints); } -bool UniqueClauseStream::Add(absl::Span clause) { - absl::MutexLock mutex_lock(&mutex_); - if (clause.size() > kMaxClauseSize || clause.size() <= 2) return false; - // This is just a safety check, the caller should have called CanAccept(). - if (NumLiteralsOfSize(clause.size()) + clause.size() > kMaxBufferedLiterals) { - return false; - } - if (BlockClause(clause)) { - std::vector* buffer = MutableBufferForSize(clause.size()); +bool UniqueClauseStream::Add(absl::Span clause, int lbd) { + if (!BlockClause(clause) || lbd > lbd_threshold_) return false; + std::vector* buffer = MutableBufferForSize(clause.size()); + CHECK_NE(buffer, nullptr); + if (buffer->size() + clause.size() <= kMaxLiteralsPerBatch) { buffer->insert(buffer->end(), clause.begin(), clause.end()); - return true; + } else { + // Maybe replace an old buffered clause of the same size if it has a smaller + // hash value. This means that the buffer will contain a deterministic + // sample of the clauses added independent of insertion order. + const int64_t replaced_clause_id = + HashClause(clause, 1) % NumClausesOfSize(clause.size()); + absl::Span replaced_clause = absl::MakeSpan(*buffer).subspan( + replaced_clause_id * clause.size(), clause.size()); + dropped_literals_since_last_batch_ += clause.size(); + if (HashClause(clause, 2) < HashClause(replaced_clause, 2)) { + std::copy(clause.begin(), clause.end(), replaced_clause.begin()); + } } - return false; + return true; } bool UniqueClauseStream::BlockClause(absl::Span clause) { if (clause.size() > kMaxClauseSize) return false; if (clause.size() <= 2) return false; - return fingerprints_.emplace(HashClause(clause)).second; -} - -bool UniqueClauseStream::Delete(absl::Span clause) { - const size_t fingerprint = HashClause(clause); - absl::MutexLock mutex_lock(&mutex_); - // Note a clause with this hash may be buffered, but not yet exported. - return fingerprints_.erase(fingerprint) == 1; + const auto hash = HashClause(clause); + return fingerprints_.emplace(hash).second && + !old_fingerprints_.contains(hash); } CompactVectorVector UniqueClauseStream::NextBatch() { - CompactVectorVector buffer; - buffer.reserve(kMaxLiteralsPerBatch / kMinClauseSize, kMaxLiteralsPerBatch); + CompactVectorVector batch; + batch.reserve(kMaxLiteralsPerBatch / kMinClauseSize, kMaxLiteralsPerBatch); int to_fill = kMaxLiteralsPerBatch; - absl::MutexLock mutex_lock(&mutex_); for (int size = kMinClauseSize; size <= kMaxClauseSize; ++size) { CHECK_EQ(NumLiteralsOfSize(size) % size, 0); - while (to_fill >= size && NumLiteralsOfSize(size) > 0) { - absl::Span clause = NextClause(size); - if (fingerprints_.contains(HashClause(clause))) { - buffer.Add(NextClause(size)); - to_fill -= size; - } + std::vector* buffer = MutableBufferForSize(size); + while (to_fill >= size && !buffer->empty()) { + batch.Add(NextClause(size)); + to_fill -= size; PopClause(size); } - } - return buffer; -} - -int UniqueClauseStream::FillUpstreamBuffer(UniqueClauseStream& upstream, - int size, - int max_clauses_to_export) { - int num_exported_clauses = 0; - absl::MutexLock mutex_lock(&mutex_); - while (NumLiteralsOfSize(size) > 0 && - num_exported_clauses < max_clauses_to_export) { - absl::Span clause = NextClause(size); - // Don't emit deleted clauses. - if (fingerprints_.contains(HashClause(clause)) && upstream.Add(clause)) { - ++num_exported_clauses; + if (to_fill < size) { + dropped_literals_since_last_batch_ += buffer->size(); + buffer->clear(); } - PopClause(size); } - return num_exported_clauses; + + if (fingerprints_.size() >= kMaxFingerprints / 2) { + VLOG(2) << "Clearing fingerprints: " << fingerprints_.size() / 1024 << "Ki"; + std::swap(fingerprints_, old_fingerprints_); + fingerprints_.clear(); + fingerprints_.reserve(kMaxFingerprints); + } + + if (to_fill > kMaxLiteralsPerBatch / 2 && lbd_threshold_ < kMaxLbd) { + lbd_threshold_ += 1; + VLOG(2) << "Inc lbd: " << lbd_threshold_; + } else if (dropped_literals_since_last_batch_ > 0 && + lbd_threshold_ > kMinLbd) { + lbd_threshold_ -= 1; + VLOG(2) << "Dec lbd: " << lbd_threshold_; + } + dropped_literals_since_last_batch_ = 0; + return batch; } int UniqueClauseStream::NumBufferedLiterals() const { - absl::MutexLock mutex_lock(&mutex_); int result = 0; for (const auto& buffer : clauses_by_size_) { result += buffer.size(); @@ -1207,42 +1210,6 @@ int UniqueClauseStream::NumBufferedLiterals() const { return result; } -bool UniqueClauseStream::CanAccept(int size, int lbd) const { - if (size <= 2 || size > kMaxClauseSize) return false; - absl::MutexLock mutex_lock(&mutex_); - if (lbd > lbd_threshold_) return false; - int num_literals_up_to_size = 0; - for (int i = kMinClauseSize; i <= size; ++i) { - num_literals_up_to_size += NumLiteralsOfSize(i); - } - return num_literals_up_to_size + size <= kMaxBufferedLiterals; -} - -void UniqueClauseStream::RemoveWorstClauses() { - absl::MutexLock mutex_lock(&mutex_); - int literals_to_remove = 0; - for (const auto& buffer : clauses_by_size_) { - literals_to_remove += buffer.size(); - } - literals_to_remove -= kMaxBufferedLiterals; - for (int size = kMaxClauseSize; size >= kMinClauseSize; --size) { - while (NumLiteralsOfSize(size) > 0) { - // Stop if removing one more clause of the current size would - // leave the buffer under full. Otherwise we might remove a shorter - // clause later! - if (literals_to_remove < size) return; - fingerprints_.erase(HashClause(NextClause(size))); - PopClause(size); - literals_to_remove -= size; - } - } -} - -void UniqueClauseStream::set_lbd_threshold(int lbd) { - absl::MutexLock mutex_lock(&mutex_); - lbd_threshold_ = lbd; -} - size_t UniqueClauseStream::HashClause(absl::Span clause, size_t hash_seed) { size_t hash = absl::HashOf(hash_seed, clause.size()); @@ -1270,22 +1237,24 @@ int UniqueClauseStream::NumLiteralsOfSize(int size) const { return BufferForSize(size).size(); } -SharedClausesManager::SharedClausesManager(bool always_synchronize, - absl::Duration share_frequency) - : always_synchronize_(always_synchronize), - share_frequency_(share_frequency) {} +SharedClausesManager::SharedClausesManager(bool always_synchronize) + : always_synchronize_(always_synchronize) {} -int SharedClausesManager::RegisterNewId() { +int SharedClausesManager::RegisterNewId(bool may_terminate_early) { absl::MutexLock mutex_lock(&mutex_); + num_full_workers_ += may_terminate_early ? 0 : 1; const int id = id_to_last_processed_binary_clause_.size(); id_to_last_processed_binary_clause_.resize(id + 1, 0); - id_to_last_returned_batch_.resize(id + 1, 0); - id_to_last_finished_batch_.resize(id + 1, 0); + id_to_last_returned_batch_.resize(id + 1, -1); + id_to_last_finished_batch_.resize(id + 1, -1); id_to_clauses_exported_.resize(id + 1, 0); - id_to_clause_stream_.emplace_back(); return id; } +bool SharedClausesManager::ShouldReadBatch(int reader_id, int writer_id) { + return reader_id != writer_id; +} + void SharedClausesManager::SetWorkerNameForId(int id, absl::string_view worker_name) { absl::MutexLock mutex_lock(&mutex_); @@ -1312,18 +1281,25 @@ void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { } } -std::vector> SharedClausesManager::GetUnseenClauses( - int id) { - std::vector> result; +void SharedClausesManager::AddBatch(int id, CompactVectorVector batch) { absl::MutexLock mutex_lock(&mutex_); - for (int i = id_to_last_returned_batch_[id]; i < batches_.size(); ++i) { - for (int j = 0; j < batches_[i].size(); ++j) { - result.push_back(batches_[i][j]); + id_to_clauses_exported_[id] += batch.size(); + pending_batches_.push_back(std::move(batch)); +} + +const CompactVectorVector& SharedClausesManager::GetUnseenClauses(int id) { + std::vector> result; + { + absl::MutexLock mutex_lock(&mutex_); + id_to_last_finished_batch_[id] = id_to_last_returned_batch_[id]; + if (id_to_last_returned_batch_[id] + 1 < batches_.size()) { + id_to_last_returned_batch_[id] += 1; + return batches_[id_to_last_returned_batch_[id]]; } } - id_to_last_finished_batch_[id] = id_to_last_returned_batch_[id]; - id_to_last_returned_batch_[id] = batches_.size(); - return result; + static CompactVectorVector* const empty_batch = + new CompactVectorVector(); + return *empty_batch; } void SharedClausesManager::GetUnseenBinaryClauses( @@ -1357,96 +1333,47 @@ void SharedClausesManager::LogStatistics(SolverLogger* logger) { } void SharedClausesManager::Synchronize() { - absl::MutexLock mutex_lock(&mutex_); - last_visible_binary_clause_ = added_binary_clauses_.size(); - const int num_workers = id_to_clause_stream_.size(); - if (num_workers <= 1) return; - if (!share_timer_.IsRunning()) share_timer_.Start(); - if (share_timer_.GetDuration() < share_frequency_) return; - share_timer_.Restart(); + std::vector> batches_to_merge; + { + absl::MutexLock mutex_lock(&mutex_); + last_visible_binary_clause_ = added_binary_clauses_.size(); + const int num_workers = id_to_last_processed_binary_clause_.size(); + if (num_workers <= 1) return; - // Tune LBD threshold for individual workers based on how the worker's buffer - // is. We aim to ensure workers can always export their fair share of clauses. - for (int id = 0; id < num_workers; ++id) { - UniqueClauseStream& stream = id_to_clause_stream_[id]; - const int lbd_threshold = stream.lbd_threshold(); - const int num_buffered_literals = stream.NumBufferedLiterals(); - const bool underfull = - num_buffered_literals < - UniqueClauseStream::kMaxLiteralsPerBatch / num_workers; - const bool overfull = - num_buffered_literals > - 2 * UniqueClauseStream::kMaxLiteralsPerBatch / num_workers; - const int new_lbd = std::clamp(lbd_threshold + underfull - overfull, 2, - UniqueClauseStream::kMaxClauseSize); - if (new_lbd != lbd_threshold) { - VLOG(2) << id_to_worker_name_[id] - << " sharing clauses with lbd <= " << new_lbd; - stream.set_lbd_threshold(new_lbd); + if (pending_batches_.size() >= num_full_workers_) { + batches_to_merge = std::move(pending_batches_); } - } - std::vector ids(num_workers); - int literals_to_fill = UniqueClauseStream::kMaxLiteralsPerBatch; - for (int size = UniqueClauseStream::kMinClauseSize; - size <= UniqueClauseStream::kMaxClauseSize; ++size) { - ids.clear(); - for (int id = 0; id < num_workers; ++id) { - if (id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) > 0) { - ids.push_back(id); + // Delete batches that have been consumed by all workers. + // Keep a few batches around for startup (min finished batch doesn't count + // workers that haven't registered yet). + if (batches_.size() > kMinBatches) { + const int min_finished_batch = + std::min(batches_.size() - kMinBatches, + *absl::c_min_element(id_to_last_finished_batch_) + 1); + for (int i = 0; i < min_finished_batch; ++i) { + VLOG(2) << "Erasing batch"; + batches_.pop_front(); + } + for (int id = 0; id < id_to_last_finished_batch_.size(); ++id) { + id_to_last_returned_batch_[id] -= min_finished_batch; + id_to_last_finished_batch_[id] -= min_finished_batch; } } - // Use progressive filling to attempt to fill the batch with clauses of - // minimum size, this is max-min fair. - while (!ids.empty()) { - const int clauses_to_fill = literals_to_fill / size; - if (clauses_to_fill == 0) break; - // Some workers need to export more clauses to fill the batch due to - // rounding, but we don't want all workers to round up. - const int num_to_round_up = clauses_to_fill % ids.size(); - for (int i = 0; i < ids.size(); ++i) { - const bool round_up = i < num_to_round_up; - const int id = ids[i]; - const int shared = id_to_clause_stream_[id].FillUpstreamBuffer( - all_clauses_, size, clauses_to_fill / ids.size() + round_up); - id_to_clauses_exported_[id] += shared; - if (shared == 0 || - id_to_clause_stream_[id].NumBufferedLiteralsOfSize(size) == 0) { - ids[i] = ids.back(); - ids.pop_back(); - --i; - } - } + // TODO(user): We could cleanup binary clauses that have been consumed. + } + if (batches_to_merge.empty()) return; + UniqueClauseStream next_batch; + for (const auto& batch : batches_to_merge) { + for (int i = 0; i < batch.size(); ++i) { + next_batch.Add(batch[i]); } } - if (all_clauses_.NumBufferedLiterals() > 0) { - batches_.push_back(all_clauses_.NextBatch()); - VLOG(2) << "Batch #" << batches_.size() << " w/ " << batches_.back().size() - << " clauses max size = " - << batches_.back()[batches_.back().size() - 1].size(); + if (next_batch.NumBufferedLiterals() > 0) { + absl::MutexLock mutex_lock(&mutex_); + VLOG(2) << "Merging batch"; + batches_.push_back(next_batch.NextBatch()); } - // Delete batches that have been consumed by all workers. - // Keep a few batches around for startup (min finished batch doesn't count - // workers that haven't registered yet). - // This also ensures that our fingerprint table always contains the last few - // batches, so we reduce the chance of an old buffered duplicate clause on - // a worker being emitted from the global stream multiple times. - if (batches_.size() < kMinBatches) return; - const int min_finished_batch = - std::min(batches_.size() - kMinBatches, - *absl::c_min_element(id_to_last_finished_batch_)); - for (int i = 0; i < min_finished_batch; ++i) { - VLOG(2) << "Erasing batch"; - for (int i = 0; i < batches_.front().size(); ++i) { - all_clauses_.Delete(batches_.front()[i]); - } - batches_.pop_front(); - } - for (int id = 0; id < id_to_last_finished_batch_.size(); ++id) { - id_to_last_returned_batch_[id] -= min_finished_batch; - id_to_last_finished_batch_[id] -= min_finished_batch; - } - // TODO(user): We could cleanup binary clauses that have been consumed. } void SharedStatistics::AddStats( diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index afd6be261a..fe6a22fdf8 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -622,108 +622,87 @@ class SharedBoundsManager { // It has a finite size internal buffer that is a small multiple of the batch // size. // -// This class is thread-safe, the idea is to have one per worker plus a -// global one to deduplicate between workers to minimize contention. -// // This uses a finite buffer, so some clauses may be dropped if we generate too -// many more than we export, but that is rarely a problem because we never -// overfill the "global" stream, and if we drop a clause on a worker, one of the -// following will most likely happen: +// many more than we export, but that is rarely a problem because if we drop a +// clause on a worker, one of the following will most likely happen: // 1. Some other worker learns the clause and shares it later. // 2. All other workers also learn and drop the clause. // 3. No other worker learns the clause, so it was not that helpful anyway. // // Note that this uses literals as encoded in a cp_model.proto. Thus, the // literals can be negative numbers. +// +// TODO(user): This class might not want to live in this file now it no +// longer needs to be thread-safe. class UniqueClauseStream { public: static constexpr int kMinClauseSize = 3; static constexpr int kMaxClauseSize = 32; + static constexpr int kMinLbd = 2; + static constexpr int kMaxLbd = 5; // Export 4KiB of clauses per batch. static constexpr int kMaxLiteralsPerBatch = 4096 / sizeof(int); - // Bound the total literals we buffer, approximately enforced so shorter - // clauses can replace longer ones. This can be larger than - // kMaxLiteralsPerBatch (hence the separate constant), but experiments suggest - // that this doesn't help. - static constexpr int kMaxBufferedLiterals = kMaxLiteralsPerBatch; UniqueClauseStream(); // Move only - this is an expensive class to copy. UniqueClauseStream(const UniqueClauseStream&) = delete; UniqueClauseStream(UniqueClauseStream&&) = default; - // Adds the clause to a future batch and returns true if the clause was added. - // Otherwise returns false. This may return false if the buffer is full. - // It will not block the clause if it is dropped to avoid unbounded growth of - // the hash table. - bool Add(absl::Span clause) ABSL_LOCKS_EXCLUDED(mutex_); + // Adds the clause to a future batch and returns true if the clause is new, + // otherwise returns false. + bool Add(absl::Span clause, int lbd = 2); - // Lazily deletes a clause with the same hash, returns true if it was present. - // The deleted clause will not be exported (either via NextBatch or - // FillUpstreamBuffer). A clause with the same hash may be re-added after - // calling Delete. If another clause with the same hash is added before the - // deleted clause is emitted then both clauses may be emitted. - bool Delete(absl::Span clause) ABSL_LOCKS_EXCLUDED(mutex_); + // Stop a clause being added to future batches. + // Returns true if the clause is new. + // This is approximate and can have false positives and negatives, it is still + // guaranteed to prevent adding the same clause twice to the next batch. + bool BlockClause(absl::Span clause); - // Returns a set of clauses totalling up to kMaxLiteralsPerBatch and removes - // exported clauses from the internal buffer. - CompactVectorVector NextBatch() ABSL_LOCKS_EXCLUDED(mutex_); + // Returns a set of clauses totalling up to kMaxLiteralsPerBatch and clears + // the internal buffer. + // Increases the LBD threshold if the batch is underfull, and decreases it if + // too many clauses were dropped. + CompactVectorVector NextBatch(); - // Adds up to max_clauses_to_export clauses of a given size to upstream and - // removes them from the internal buffer. - int FillUpstreamBuffer(UniqueClauseStream& upstream, int clause_size, - int max_clauses_to_export) ABSL_LOCKS_EXCLUDED(mutex_); - - // Returns the number of literals in the buffer in clauses with size <= - // max_size. - int NumBufferedLiteralsOfSize(int size) const ABSL_LOCKS_EXCLUDED(mutex_) { - absl::MutexLock lock(&mutex_); - return NumLiteralsOfSize(size); + void ClearFingerprints() { + old_fingerprints_.clear(); + fingerprints_.clear(); + fingerprints_.reserve(kMaxFingerprints); } - int NumBufferedLiterals() const ABSL_LOCKS_EXCLUDED(mutex_); - // Returns true if the stream can accept a clause of the specified size and - // LBD without dropping it. - bool CanAccept(int size, int lbd) const; + // Returns the number of buffered literals in clauses of a given size. + int NumLiteralsOfSize(int size) const; + int NumBufferedLiterals() const; - // Delete longest clauses while keeping at least kMaxBufferedLiterals. - // This guarantees that CanAccept will return the same result as before, and - // at least the next batch will contain the same clauses, but we will emit - // fewer old, long clauses in the future. - void RemoveWorstClauses(); - - int lbd_threshold() const ABSL_LOCKS_EXCLUDED(mutex_) { - absl::MutexLock lock(&mutex_); - return lbd_threshold_; - } - void set_lbd_threshold(int lbd) ABSL_LOCKS_EXCLUDED(mutex_); + int lbd_threshold() const { return lbd_threshold_; } + void set_lbd_threshold(int lbd_threshold) { lbd_threshold_ = lbd_threshold; } // Computes a hash that is independent of the order of literals in the clause. static size_t HashClause(absl::Span clause, size_t hash_seed = 0); private: - bool BlockClause(absl::Span clause) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); - std::vector* MutableBufferForSize(int size) - ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + // This needs to be >> the number of clauses we can plausibly learn in + // a few seconds. + constexpr static size_t kMaxFingerprints = 1024 * 1024 / sizeof(size_t); + constexpr static int kNumSizes = kMaxClauseSize - kMinClauseSize + 1; + + std::vector* MutableBufferForSize(int size) { return &clauses_by_size_[size - kMinClauseSize]; } - absl::Span BufferForSize(int size) const - ABSL_SHARED_LOCKS_REQUIRED(mutex_) { + absl::Span BufferForSize(int size) const { return clauses_by_size_[size - kMinClauseSize]; } - absl::Span NextClause(int size) const - ABSL_SHARED_LOCKS_REQUIRED(mutex_); - void PopClause(int size) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + absl::Span NextClause(int size) const; + void PopClause(int size); // Computes the number of clauses of a given size. - int NumClausesOfSize(int size) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); - int NumLiteralsOfSize(int size) const ABSL_SHARED_LOCKS_REQUIRED(mutex_); + int NumClausesOfSize(int size) const; - mutable absl::Mutex mutex_; - int lbd_threshold_ ABSL_GUARDED_BY(mutex_) = 2; - absl::flat_hash_set fingerprints_ ABSL_GUARDED_BY(mutex_); - std::array, kMaxClauseSize - kMinClauseSize + 1> - clauses_by_size_ ABSL_GUARDED_BY(mutex_); + int lbd_threshold_ = kMinLbd; + int64_t dropped_literals_since_last_batch_ = 0; + + absl::flat_hash_set fingerprints_; + absl::flat_hash_set old_fingerprints_; + std::array, kNumSizes> clauses_by_size_; }; // This class holds clauses found and shared by workers. @@ -735,14 +714,15 @@ class UniqueClauseStream { // literals can be negative numbers. class SharedClausesManager { public: - explicit SharedClausesManager(bool always_synchronize, - absl::Duration share_frequency); + explicit SharedClausesManager(bool always_synchronize); void AddBinaryClause(int id, int lit1, int lit2); // Returns new glue clauses. // The spans are guaranteed to remain valid until the next call to // SyncClauses(). - std::vector> GetUnseenClauses(int id); + const CompactVectorVector& GetUnseenClauses(int id); + + void AddBatch(int id, CompactVectorVector batch); // Fills new_clauses with // {{lit1 of clause1, lit2 of clause1}, @@ -752,16 +732,9 @@ class SharedClausesManager { std::vector>* new_clauses); // Ids are used to identify which worker is exporting/importing clauses. - int RegisterNewId(); + int RegisterNewId(bool may_terminate_early); void SetWorkerNameForId(int id, absl::string_view worker_name); - // A worker can add or remove clauses from its own clause set. - // Retains ownership of the returned ClauseFilter. - UniqueClauseStream* GetClauseStream(int id) { - absl::ReaderMutexLock mutex_lock(&mutex_); - return &id_to_clause_stream_[id]; - } - // Search statistics. void LogStatistics(SolverLogger* logger); @@ -770,8 +743,12 @@ class SharedClausesManager { void Synchronize(); private: - static constexpr int kMinBatches = 10; - absl::Mutex mutex_; + // Returns true if `reader_id` should read batches produced by `writer_id`. + bool ShouldReadBatch(int reader_id, int writer_id) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + static constexpr int kMinBatches = 64; + mutable absl::Mutex mutex_; // Binary clauses: // Cache to avoid adding the same binary clause twice. @@ -782,18 +759,22 @@ class SharedClausesManager { std::vector id_to_last_processed_binary_clause_ ABSL_GUARDED_BY(mutex_); int last_visible_binary_clause_ ABSL_GUARDED_BY(mutex_) = 0; - // Longer clauses: - UniqueClauseStream all_clauses_ ABSL_GUARDED_BY(mutex_); // This is slightly subtle - we need to track the batches that might be - // currently being processed by each worker. + // currently being processed by each worker to make sure we don't erase any + // batch that a worker might currently be reading. std::vector id_to_last_returned_batch_ ABSL_GUARDED_BY(mutex_); std::vector id_to_last_finished_batch_ ABSL_GUARDED_BY(mutex_); + std::deque> batches_ ABSL_GUARDED_BY(mutex_); - std::deque id_to_clause_stream_ ABSL_GUARDED_BY(mutex_); - WallTimer share_timer_ ABSL_GUARDED_BY(mutex_); + // pending_batches_ contains clauses produced by individual workers that have + // not yet been merged into batches_, which can be read by other workers. When + // this is long enough they will be merged into a single batch and appended to + // batches_. + std::vector> pending_batches_ + ABSL_GUARDED_BY(mutex_); + int num_full_workers_ ABSL_GUARDED_BY(mutex_) = 0; const bool always_synchronize_ = true; - const absl::Duration share_frequency_; // Stats: std::vector id_to_clauses_exported_; diff --git a/ortools/sat/synchronization_test.cc b/ortools/sat/synchronization_test.cc index 6276f45cfe..00dd4a2550 100644 --- a/ortools/sat/synchronization_test.cc +++ b/ortools/sat/synchronization_test.cc @@ -833,10 +833,9 @@ TEST(SharedResponseManagerTest, Callback) { } TEST(SharedClausesManagerTest, SyncApi) { - SharedClausesManager manager(/*always_synchronize=*/true, - /*share_frequency=*/absl::ZeroDuration()); - EXPECT_EQ(0, manager.RegisterNewId()); - EXPECT_EQ(1, manager.RegisterNewId()); + SharedClausesManager manager(/*always_synchronize=*/true); + EXPECT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); + EXPECT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); manager.AddBinaryClause(/*id=*/0, 1, 2); std::vector> new_clauses; @@ -868,19 +867,6 @@ TEST(UniqueClauseStreamTest, AddIgnoresDuplicates) { EXPECT_EQ(stream.NumBufferedLiterals(), 3); } -TEST(UniqueClauseStreamTest, Delete) { - UniqueClauseStream stream; - - EXPECT_TRUE(stream.Add({3, 2, 1})); - EXPECT_TRUE(stream.Delete({1, 2, 3})); - EXPECT_FALSE(stream.Delete({1, 2, 3, 4})); - EXPECT_THAT(stream.NextBatch(), ::testing::IsEmpty()); - EXPECT_TRUE(stream.Add({2, 3, 1})); - EXPECT_EQ(stream.NumBufferedLiterals(), 3); - stream.NextBatch(); - EXPECT_TRUE(stream.Delete({1, 2, 3})); -} - TEST(UniqueClauseStreamTest, AddIgnoresInvalidSizeClauses) { UniqueClauseStream stream; std::vector long_clause; @@ -905,46 +891,20 @@ TEST(UniqueClauseStreamTest, ExportsShortestClauses) { } // Batch 1 should be filled with size 3 clauses. - EXPECT_EQ(stream.NextBatch().size(), 1024 / 3); - // Batch 2 should be filled with size 4 clauses. - EXPECT_EQ(stream.NextBatch().size(), 1024 / 4); - // Batch 3 should be filled with size 5 clauses. - EXPECT_EQ(stream.NextBatch().size(), 1024 / 5); -} - -TEST(UniqueClauseStreamTest, RemoveWorstClauses) { - UniqueClauseStream stream; - // Fill the buffer - for (int i = 0; i < UniqueClauseStream::kMaxBufferedLiterals / 6; ++i) { - stream.Add({i + 1, i + 256, i + 512, -4, -3, -2}); - } - for (int i = 0; i < UniqueClauseStream::kMaxLiteralsPerBatch / 2 / 3; ++i) { - stream.Add({i + 1, i + 256, i + 512}); - } - - stream.RemoveWorstClauses(); - - EXPECT_GE(stream.NumBufferedLiterals(), - UniqueClauseStream::kMaxBufferedLiterals); - EXPECT_LT(stream.NumBufferedLiterals(), - UniqueClauseStream::kMaxBufferedLiterals + 6); - EXPECT_TRUE(stream.CanAccept(3, /*lbd=*/2)); - EXPECT_FALSE(stream.CanAccept(6, /*lbd=*/2)); - // Make sure none of the size 3 clauses were removed. EXPECT_EQ(stream.NextBatch().size(), - UniqueClauseStream::kMaxLiteralsPerBatch / 2 / 3 + - UniqueClauseStream::kMaxBufferedLiterals / 2 / 6); + UniqueClauseStream::kMaxLiteralsPerBatch / 3); + // Batch 2 should be empty. + EXPECT_TRUE(stream.NextBatch().empty()); } TEST(UniqueClauseStreamTest, DropsClauses) { UniqueClauseStream stream; - // We shouldn't drop any clause where Add returns true. int literals_successfully_added = 0; for (int i = 0; i < 256 / 4; ++i) { literals_successfully_added += 4 * stream.Add({i + 1, i + 256, i + 512, -4}); } - for (int i = 0; i < 256 / 3; ++i) { + for (int i = 0; i < UniqueClauseStream::kMaxLiteralsPerBatch / 3; ++i) { literals_successfully_added += 3 * stream.Add({i + 1, i + 256, i + 512}); } for (int i = 0; i < 1024 * 1024 / 5; ++i) { @@ -952,26 +912,18 @@ TEST(UniqueClauseStreamTest, DropsClauses) { 5 * stream.Add({i + 1, i + 256, i + 512, i + 1024, -2048}); } - EXPECT_FALSE(stream.CanAccept(3, /*lbd=*/3)); - EXPECT_TRUE(stream.CanAccept(3, /*lbd=*/2)); - EXPECT_TRUE(stream.CanAccept(4, /*lbd=*/2)); - EXPECT_FALSE(stream.CanAccept(5, /*lbd=*/2)); - EXPECT_EQ(stream.NumBufferedLiterals(), literals_successfully_added); - EXPECT_EQ( - literals_successfully_added, - 256 - 256 % 3 + // size 3 clauses - 256 - 256 % 4 + // size 4 clauses - UniqueClauseStream::kMaxBufferedLiterals - - UniqueClauseStream::kMaxBufferedLiterals % 5); // size 5 clauses - // Batch 1 should be filled with size 3 clauses. - EXPECT_EQ(stream.NextBatch().size(), 256 / 3 + 256 / 4 + 512 / 5); + EXPECT_GT(stream.NumBufferedLiterals(), + UniqueClauseStream::kMaxLiteralsPerBatch - 5); + // Batch should be filled with size 3 clauses. + EXPECT_EQ(stream.NextBatch().size(), + UniqueClauseStream::kMaxLiteralsPerBatch / 3); + EXPECT_TRUE(stream.NextBatch().empty()); } TEST(SharedClausesManagerTest, NonSyncApi) { - SharedClausesManager manager(/*always_synchronize=*/false, - /*share_frequency=*/absl::ZeroDuration()); - EXPECT_EQ(0, manager.RegisterNewId()); - EXPECT_EQ(1, manager.RegisterNewId()); + SharedClausesManager manager(/*always_synchronize=*/false); + EXPECT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); + EXPECT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); manager.AddBinaryClause(/*id=*/0, 1, 2); std::vector> new_clauses; @@ -1018,115 +970,92 @@ TEST(SharedClausesManagerTest, NonSyncApi) { } TEST(SharedClausesManagerTest, ShareGlueClauses) { - SharedClausesManager manager(/*always_synchronize=*/true, - absl::ZeroDuration()); - ASSERT_EQ(0, manager.RegisterNewId()); - ASSERT_EQ(1, manager.RegisterNewId()); - auto* stream0 = manager.GetClauseStream(0); - auto* stream1 = manager.GetClauseStream(1); - // Add a bunch of clauses that will be skipped in the first batch. - for (int i = 0; i < 1024 / 8; ++i) { - EXPECT_TRUE(stream0->Add({1, 2, 3, 4, 5, 6, 7, i + 8})); + SharedClausesManager manager(/*always_synchronize=*/true); + ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + UniqueClauseStream stream0; + UniqueClauseStream stream1; + // Add a bunch of clauses that will be skipped batch. + for (int i = 0; i < UniqueClauseStream::kMaxLiteralsPerBatch / 8; ++i) { + EXPECT_TRUE(stream0.Add({1, 2, 3, 4, 5, 6, 7, i + 8})); } - EXPECT_EQ(stream0->NumBufferedLiterals(), 1024); + EXPECT_EQ(stream0.NumBufferedLiterals(), + UniqueClauseStream::kMaxLiteralsPerBatch); // Fill 1 batch of shorter clauses. - for (int i = 0; i < 1024 / 4; ++i) { - stream1->Add({1, 2, 3, i + 4}); + for (int i = 0; i < UniqueClauseStream::kMaxLiteralsPerBatch / 4; ++i) { + stream1.Add({1, 2, 3, i + 4}); } - EXPECT_EQ(stream1->NumBufferedLiterals(), 1024); + manager.AddBatch(0, stream0.NextBatch()); + manager.AddBatch(1, stream1.NextBatch()); + manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::SizeIs(1024 / 4)); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::SizeIs(1024 / 4)); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::SizeIs(1024 / 8)); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::SizeIs(1024 / 8)); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); -} - -TEST(SharedClausesManagerTest, ShareFrequency) { - SharedClausesManager manager(/*always_synchronize=*/true, - absl::InfiniteDuration()); - ASSERT_EQ(0, manager.RegisterNewId()); - ASSERT_EQ(1, manager.RegisterNewId()); - auto* stream0 = manager.GetClauseStream(0); - auto* stream1 = manager.GetClauseStream(1); - for (int i = 0; i < 1024 / 5; ++i) { - stream0->Add({i + 1, i + 513, 2048, 2049, -10}); - stream1->Add({i + 1, i + 513, 2048, 2049, -10}); - } - - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - manager.Synchronize(); + EXPECT_THAT(manager.GetUnseenClauses(0), + ::testing::SizeIs(UniqueClauseStream::kMaxLiteralsPerBatch / 4)); + EXPECT_THAT(manager.GetUnseenClauses(1), + ::testing::SizeIs(UniqueClauseStream::kMaxLiteralsPerBatch / 4)); EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); } TEST(SharedClausesManagerTest, LbdThresholdIncrease) { - SharedClausesManager manager(/*always_synchronize=*/true, - absl::ZeroDuration()); - ASSERT_EQ(0, manager.RegisterNewId()); - ASSERT_EQ(1, manager.RegisterNewId()); - auto* stream0 = manager.GetClauseStream(0); - auto* stream1 = manager.GetClauseStream(1); - for (int i = 0; i < 1024 / 5; ++i) { - stream0->Add({i + 1, i + 513, 2048, 2049, -10}); - stream1->Add({i + 1, i + 513, 2048, 2049, -10}); + SharedClausesManager manager(/*always_synchronize=*/true); + ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + UniqueClauseStream stream0; + UniqueClauseStream stream1; + const int kExpectedClauses = UniqueClauseStream::kMaxLiteralsPerBatch / 5; + for (int i = 0; i < kExpectedClauses; ++i) { + stream0.Add({i + 1, i + 513, 2048, 2049, -10}); + stream1.Add({i + 1, i + 513, 2048, 2049, -10}); } + manager.AddBatch(0, stream0.NextBatch()); + manager.AddBatch(1, stream1.NextBatch()); + manager.Synchronize(); + EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::SizeIs(kExpectedClauses)); + EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::SizeIs(kExpectedClauses)); + EXPECT_EQ(stream0.lbd_threshold(), 2); + EXPECT_EQ(stream1.lbd_threshold(), 2); + manager.Synchronize(); + manager.AddBatch(0, stream0.NextBatch()); + manager.AddBatch(1, stream1.NextBatch()); EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::SizeIs(1024 / 5)); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::SizeIs(1024 / 5)); - EXPECT_EQ(stream0->lbd_threshold(), 2); - EXPECT_EQ(stream1->lbd_threshold(), 2); - manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - EXPECT_EQ(stream0->lbd_threshold(), 3); - EXPECT_EQ(stream1->lbd_threshold(), 3); + EXPECT_EQ(stream0.lbd_threshold(), 3); + EXPECT_EQ(stream1.lbd_threshold(), 3); } TEST(SharedClausesManagerTest, LbdThresholdDecrease) { - SharedClausesManager manager(/*always_synchronize=*/true, - absl::ZeroDuration()); - ASSERT_EQ(0, manager.RegisterNewId()); - ASSERT_EQ(1, manager.RegisterNewId()); - ASSERT_EQ(2, manager.RegisterNewId()); - auto* stream0 = manager.GetClauseStream(0); - auto* stream1 = manager.GetClauseStream(1); + SharedClausesManager manager(/*always_synchronize=*/true); + ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + UniqueClauseStream stream0; + UniqueClauseStream stream1; - // Should increase LBD Threshold. - manager.Synchronize(); - // Then add 1/2 batch of clauses to each worker. - for (int i = 0; i < 1024 / 4 / 2; ++i) { - stream0->Add({i + 1, i + 512, 2048, 2049}); - stream1->Add({i + 1, i + 513, 2048, 2049}); + manager.AddBatch(0, stream0.NextBatch()); + manager.AddBatch(1, stream1.NextBatch()); + const int kSize4Clauses = UniqueClauseStream::kMaxLiteralsPerBatch / 4 / 2; + const int kSize5ClausesAdded = UniqueClauseStream::kMaxLiteralsPerBatch / 5; + // Then add 1/2 batch of size 4 clauses to each worker. + for (int i = 0; i < kSize4Clauses; ++i) { + stream0.Add({i + 1, i + 512, 2048, 2049}); + stream1.Add({i + 1, i + 513, 2048, -123}); } // Than add loads of longer clauses to just stream0. - for (int i = 1024 / 5 / 2; i < 3 * 1024 / 5; ++i) { - stream0->Add({i + 1, 2, 3, -10}); + for (int i = 0; i < kSize5ClausesAdded; ++i) { + stream0.Add({i + 1, 2, 3, -10, 12}); } - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::IsEmpty()); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::IsEmpty()); - EXPECT_EQ(stream0->lbd_threshold(), 3); - EXPECT_EQ(stream1->lbd_threshold(), 3); + EXPECT_EQ(stream0.lbd_threshold(), 3); + EXPECT_EQ(stream1.lbd_threshold(), 3); + manager.AddBatch(0, stream0.NextBatch()); + manager.AddBatch(1, stream1.NextBatch()); manager.Synchronize(); - EXPECT_THAT(manager.GetUnseenClauses(0), ::testing::SizeIs(1024 / 4)); - EXPECT_THAT(manager.GetUnseenClauses(1), ::testing::SizeIs(1024 / 4)); - EXPECT_EQ(stream0->lbd_threshold(), 2); - EXPECT_EQ(stream1->lbd_threshold(), 3); + EXPECT_THAT(manager.GetUnseenClauses(0), + ::testing::SizeIs(2 * kSize4Clauses)); + EXPECT_THAT(manager.GetUnseenClauses(1), + ::testing::SizeIs(2 * kSize4Clauses)); + EXPECT_EQ(stream0.lbd_threshold(), 2); } } // namespace } // namespace sat From af52d341549a30b3162fa24610cb5ed787b9b204 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 16 May 2025 16:25:02 +0200 Subject: [PATCH 010/509] cleanup * remove swig_python.bzl * sync cmake/README.md --- Dependencies.txt | 2 +- bazel/swig_python.bzl | 89 ------------------------------------------- cmake/README.md | 4 +- 3 files changed, 4 insertions(+), 91 deletions(-) delete mode 100644 bazel/swig_python.bzl diff --git a/Dependencies.txt b/Dependencies.txt index 85bcc6682a..c9ebed0a22 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -9,7 +9,7 @@ Clp=1.17.10 Cgl=0.60.9 Cbc=2.10.12 GLPK=5.0 -HiGHS=v1.9.0 +HiGHS=v1.10.0 Scip=v922 # Python pybind11=v2.13.6 diff --git a/bazel/swig_python.bzl b/bazel/swig_python.bzl deleted file mode 100644 index 67067c6fee..0000000000 --- a/bazel/swig_python.bzl +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Bazel rules for building swig files.""" - -def _py_wrap_cc_impl(ctx): - srcs = ctx.files.srcs - if len(srcs) != 1: - fail("Exactly one SWIG source file label must be specified.", "srcs") - module_name = ctx.attr.module_name - src = ctx.files.srcs[0] - inputs = set([src]) - inputs += ctx.files.i_includes - for dep in ctx.attr.deps: - inputs += dep.cc.transitive_headers - inputs += ctx.files._swiglib - inputs += ctx.files.toolchain_deps - swig_include_dirs = set(_get_repository_roots(ctx, inputs)) - swig_include_dirs += sorted([f.dirname for f in ctx.files._swiglib]) - args = [ - "-c++", - "-python", - "-module", - module_name, - "-o", - ctx.outputs.cc_out.path, - "-outdir", - ctx.outputs.py_out.dirname, - ] - args += ["-l" + f.path for f in ctx.files.i_includes] - args += ["-I" + i for i in swig_include_dirs] - args.append(src.path) - outputs = [ctx.outputs.cc_out, ctx.outputs.py_out] - ctx.actions.run( - executable = ctx.executable._swig, - arguments = args, - inputs = list(inputs), - outputs = outputs, - mnemonic = "PythonSwig", - progress_message = "SWIGing " + src.path, - ) - return [ - DefaultInfo(files = depset(outputs)), - ] - -_py_wrap_cc = rule( - attrs = { - "srcs": attr.label_list( - mandatory = True, - allow_files = True, - ), - "swig_includes": attr.label_list( - allow_files = True, - ), - "deps": attr.label_list( - allow_files = True, - providers = ["cc"], - ), - "toolchain_deps": attr.label_list( - allow_files = True, - ), - "module_name": attr.string(mandatory = True), - "py_module_name": attr.string(mandatory = True), - "_swig": attr.label( - default = Label("@swig//:swig"), - executable = True, - cfg = "exec", - ), - "_swiglib": attr.label( - default = Label("@swig//:templates"), - allow_files = True, - ), - }, - outputs = { - "cc_out": "%{module_name}.cc", - "py_out": "%{py_module_name}.py", - }, - implementation = _py_wrap_cc_impl, -) diff --git a/cmake/README.md b/cmake/README.md index 9d18ecbd56..a233fb8142 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -7,6 +7,7 @@ | amd64 MacOS | [![Status][amd64_macos_cpp_svg]][amd64_macos_cpp_link] | [![Status][amd64_macos_python_svg]][amd64_macos_python_link] | [![Status][amd64_macos_java_svg]][amd64_macos_java_link] | [![Status][amd64_macos_dotnet_svg]][amd64_macos_dotnet_link] | | amd64 Windows | [![Status][windows_cpp_svg]][windows_cpp_link] | [![Status][windows_python_svg]][windows_python_link] | [![Status][windows_java_svg]][windows_java_link] | [![Status][windows_dotnet_svg]][windows_dotnet_link] | + [linux_cpp_svg]: ./../../../actions/workflows/amd64_linux_cmake_cpp.yml/badge.svg?branch=main [linux_cpp_link]: ./../../../actions/workflows/amd64_linux_cmake_cpp.yml [linux_python_svg]: ./../../../actions/workflows/amd64_linux_cmake_python.yml/badge.svg?branch=main @@ -43,7 +44,8 @@ [windows_dotnet_svg]: ./../../../actions/workflows/amd64_windows_cmake_dotnet.yml/badge.svg?branch=main [windows_dotnet_link]: ./../../../actions/workflows/amd64_windows_cmake_dotnet.yml -Dockers \[AlmaLinux, Alpine, Archlinux, Debian, Fedora, OpenSuse, RockyLinux, Ubuntu\]x +Dockers: +\[AlmaLinux, Alpine, Archlinux, Debian, Fedora, OpenSuse, RockyLinux, Ubuntu\]x \[C++, Python, Java, .Net\]: [![Status][docker_svg]][docker_link] [docker_svg]: ./../../../actions/workflows/amd64_docker_cmake.yml/badge.svg?branch=main From 92e3c1e7d047510045925a5c9f0f40bf7aa28d7f Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 09:55:42 +0200 Subject: [PATCH 011/509] python: target typing-extensions >= 4.12 --- ortools/python/setup.py.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index 1c785eac1c..d7fe709b5d 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -47,7 +47,7 @@ setup( 'numpy >= 1.13.3', 'pandas >= 2.0.0', 'protobuf >= 6.30.2,<6.31', - 'typing-extensions >= 4.13.1', + 'typing-extensions >= 4.12', 'immutabledict >= 3.0.0', ], package_data={ From 4177426b57644c117e57851f4109d265c05842e1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 11:57:15 +0200 Subject: [PATCH 012/509] sat: update internal docs --- ortools/sat/docs/README.md | 7 +++++- ortools/sat/docs/boolean_logic.md | 24 ++++++++++++++----- ortools/sat/docs/channeling.md | 16 +++++++++---- ortools/sat/docs/integer_arithmetic.md | 24 ++++++++++++++----- ortools/sat/docs/model.md | 16 +++++++++---- ortools/sat/docs/scheduling.md | 32 +++++++++++++++++++------- ortools/sat/docs/solver.md | 32 +++++++++++++++++++------- ortools/sat/docs/troubleshooting.md | 7 +++++- 8 files changed, 120 insertions(+), 38 deletions(-) diff --git a/ortools/sat/docs/README.md b/ortools/sat/docs/README.md index d5f6111098..481c23e144 100644 --- a/ortools/sat/docs/README.md +++ b/ortools/sat/docs/README.md @@ -75,6 +75,9 @@ and some metrics. ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -111,7 +114,9 @@ void SimpleSatProgram() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SimpleSatProgram(); return EXIT_SUCCESS; } diff --git a/ortools/sat/docs/boolean_logic.md b/ortools/sat/docs/boolean_logic.md index bef7d6a775..c0bcf27e36 100644 --- a/ortools/sat/docs/boolean_logic.md +++ b/ortools/sat/docs/boolean_logic.md @@ -43,6 +43,9 @@ literal_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" @@ -60,9 +63,10 @@ void LiteralSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::LiteralSampleSat(); - return EXIT_SUCCESS; } ``` @@ -173,7 +177,10 @@ bool_or_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/sat/cp_model.h" namespace operations_research { @@ -192,9 +199,10 @@ void BoolOrSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::BoolOrSampleSat(); - return EXIT_SUCCESS; } ``` @@ -319,7 +327,10 @@ reified_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/sat/cp_model.h" namespace operations_research { @@ -347,9 +358,10 @@ void ReifiedSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::ReifiedSampleSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/channeling.md b/ortools/sat/docs/channeling.md index 6feed79c20..c36c1e939a 100644 --- a/ortools/sat/docs/channeling.md +++ b/ortools/sat/docs/channeling.md @@ -95,7 +95,10 @@ channeling_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -148,9 +151,10 @@ void ChannelingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::ChannelingSampleSat(); - return EXIT_SUCCESS; } ``` @@ -591,6 +595,9 @@ binpacking_problem_sat() #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -667,9 +674,10 @@ void BinpackingProblemSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::BinpackingProblemSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/integer_arithmetic.md b/ortools/sat/docs/integer_arithmetic.md index 7fac0f7d1e..fe2dae192b 100644 --- a/ortools/sat/docs/integer_arithmetic.md +++ b/ortools/sat/docs/integer_arithmetic.md @@ -154,6 +154,9 @@ rabbits_and_pheasants_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -186,9 +189,10 @@ void RabbitsAndPheasantsSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::RabbitsAndPheasantsSat(); - return EXIT_SUCCESS; } ``` @@ -440,7 +444,10 @@ earliness_tardiness_cost_sample_sat() #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -496,9 +503,10 @@ void EarlinessTardinessCostSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::EarlinessTardinessCostSampleSat(); - return EXIT_SUCCESS; } ``` @@ -863,7 +871,10 @@ step_function_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -934,9 +945,10 @@ void StepFunctionSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::StepFunctionSampleSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/model.md b/ortools/sat/docs/model.md index 42cb8f758d..02d6459830 100644 --- a/ortools/sat/docs/model.md +++ b/ortools/sat/docs/model.md @@ -120,6 +120,9 @@ solution_hinting_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -165,9 +168,10 @@ void SolutionHintingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolutionHintingSampleSat(); - return EXIT_SUCCESS; } ``` @@ -433,6 +437,9 @@ clone_model_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -474,9 +481,10 @@ void CloneModelSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::CloneModelSampleSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/scheduling.md b/ortools/sat/docs/scheduling.md index 29f27adfcd..d1c60c7e42 100644 --- a/ortools/sat/docs/scheduling.md +++ b/ortools/sat/docs/scheduling.md @@ -65,6 +65,9 @@ interval_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/util/sorted_interval_list.h" @@ -111,9 +114,10 @@ void IntervalSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::IntervalSampleSat(); - return EXIT_SUCCESS; } ``` @@ -289,6 +293,9 @@ optional_interval_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/util/sorted_interval_list.h" @@ -332,9 +339,10 @@ void OptionalIntervalSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::OptionalIntervalSampleSat(); - return EXIT_SUCCESS; } ``` @@ -624,7 +632,10 @@ no_overlap_sample_sat() #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -694,9 +705,10 @@ void NoOverlapSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::NoOverlapSampleSat(); - return EXIT_SUCCESS; } ``` @@ -1378,7 +1390,10 @@ ranking_sample_sat() #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -1517,9 +1532,10 @@ void RankingSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::RankingSampleSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/solver.md b/ortools/sat/docs/solver.md index c538ad9b6b..7eaa5311ec 100644 --- a/ortools/sat/docs/solver.md +++ b/ortools/sat/docs/solver.md @@ -52,6 +52,9 @@ solve_with_time_limit_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -95,9 +98,10 @@ void SolveWithTimeLimitSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolveWithTimeLimitSampleSat(); - return EXIT_SUCCESS; } ``` @@ -318,6 +322,9 @@ solve_and_print_intermediate_solutions_sample_sat() ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -359,9 +366,10 @@ void SolveAndPrintIntermediateSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SolveAndPrintIntermediateSolutionsSampleSat(); - return EXIT_SUCCESS; } ``` @@ -675,6 +683,9 @@ To search for all solutions, a parameter of the SAT solver must be changed. ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -719,9 +730,10 @@ void SearchAllSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::SearchAllSolutionsSampleSat(); - return EXIT_SUCCESS; } ``` @@ -998,6 +1010,9 @@ limit, and setting that bool to true. #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -1046,9 +1061,10 @@ void StopAfterNSolutionsSampleSat() { } // namespace sat } // namespace operations_research -int main() { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::StopAfterNSolutionsSampleSat(); - return EXIT_SUCCESS; } ``` diff --git a/ortools/sat/docs/troubleshooting.md b/ortools/sat/docs/troubleshooting.md index 548d7c1bbd..a281557f7b 100644 --- a/ortools/sat/docs/troubleshooting.md +++ b/ortools/sat/docs/troubleshooting.md @@ -144,7 +144,10 @@ if __name__ == "__main__": ```cpp #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/types/span.h" +#include "ortools/base/init_google.h" #include "ortools/base/logging.h" #include "ortools/sat/cp_model.h" #include "ortools/sat/cp_model.pb.h" @@ -187,7 +190,9 @@ void AssumptionsSampleSat() { } // namespace sat } // namespace operations_research -int main(int argc, char** argv) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::sat::AssumptionsSampleSat(); return EXIT_SUCCESS; } From 243365417c55de4c67c5fa7a53aa9ad173e7f9c7 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 11:58:23 +0200 Subject: [PATCH 013/509] fix indent --- ortools/base/int_type.h | 16 ++++++++-------- ortools/util/strong_integers.h | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ortools/base/int_type.h b/ortools/base/int_type.h index 246376def8..57a990a716 100644 --- a/ortools/base/int_type.h +++ b/ortools/base/int_type.h @@ -250,14 +250,14 @@ class IntType { // -- ASSIGNMENT OPERATORS --------------------------------------------------- // We support the following assignment operators: =, +=, -=, *=, /=, <<=, >>= // and %= for both ThisType and ValueType. -#define INT_TYPE_ASSIGNMENT_OP(op) \ - ThisType& operator op(const ThisType & arg_value) { \ - value_ op arg_value.value(); \ - return *this; \ - } \ - ThisType& operator op(ValueType arg_value) { \ - value_ op arg_value; \ - return *this; \ +#define INT_TYPE_ASSIGNMENT_OP(op) \ + ThisType& operator op(const ThisType& arg_value) { \ + value_ op arg_value.value(); \ + return *this; \ + } \ + ThisType& operator op(ValueType arg_value) { \ + value_ op arg_value; \ + return *this; \ } INT_TYPE_ASSIGNMENT_OP(+=); INT_TYPE_ASSIGNMENT_OP(-=); diff --git a/ortools/util/strong_integers.h b/ortools/util/strong_integers.h index 678e6abafd..7b0d45360b 100644 --- a/ortools/util/strong_integers.h +++ b/ortools/util/strong_integers.h @@ -96,7 +96,7 @@ namespace operations_research { // index and int64_t type. #define STRONG_ASSIGNMENT_OP(StrongClass, IntType, op) \ - ThisType& operator op(const ThisType & arg_value) { \ + ThisType& operator op(const ThisType& arg_value) { \ value_ op arg_value.value(); \ return *this; \ } \ From 327263e61910724f89422bf042cc7030118defe3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 13:47:28 +0200 Subject: [PATCH 014/509] replace some const string by string_view --- ortools/constraint_solver/assignment.cc | 5 +++-- ortools/math_opt/core/model_summary.cc | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ortools/constraint_solver/assignment.cc b/ortools/constraint_solver/assignment.cc index 43a108f4ad..04a79115ce 100644 --- a/ortools/constraint_solver/assignment.cc +++ b/ortools/constraint_solver/assignment.cc @@ -22,6 +22,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" #include "ortools/base/file.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" @@ -465,7 +466,7 @@ void IdToElementMap(AssignmentContainer* container, template void LoadElement(const absl::flat_hash_map& id_to_element_map, const P& proto) { - const std::string& var_id = proto.var_id(); + absl::string_view var_id = proto.var_id(); CHECK(!var_id.empty()); E* element = nullptr; if (gtl::FindCopy(id_to_element_map, var_id, &element)) { @@ -539,7 +540,7 @@ void Assignment::Load(const AssignmentProto& assignment_proto) { &AssignmentProto::sequence_var_assignment); for (int i = 0; i < assignment_proto.objective_size(); ++i) { const IntVarAssignment& objective = assignment_proto.objective(i); - const std::string& objective_id = objective.var_id(); + absl::string_view objective_id = objective.var_id(); DCHECK(!objective_id.empty()); if (HasObjectiveFromIndex(i) && objective_id == ObjectiveFromIndex(i)->name()) { diff --git a/ortools/math_opt/core/model_summary.cc b/ortools/math_opt/core/model_summary.cc index 5e689de9f8..c6c16654a3 100644 --- a/ortools/math_opt/core/model_summary.cc +++ b/ortools/math_opt/core/model_summary.cc @@ -137,7 +137,7 @@ absl::StatusOr ModelSummary::Create(const ModelProto& model, {}, model.auxiliary_objectives(), summary.auxiliary_objectives)) << "ModelProto.auxiliary_objectives are invalid"; { - const std::string& objective_name = model.objective().name(); + absl::string_view objective_name = model.objective().name(); if (summary.auxiliary_objectives.HasName(objective_name)) { return util::InvalidArgumentErrorBuilder() << "duplicate objective name: " << objective_name; From 1cd3c86e36d6769227ed26ee7f0f87cffdddafbb Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 13:47:54 +0200 Subject: [PATCH 015/509] update internal doc --- ortools/constraint_solver/docs/CP.md | 7 ++++++- ortools/routing/docs/ROUTING.md | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ortools/constraint_solver/docs/CP.md b/ortools/constraint_solver/docs/CP.md index df4d86d0ba..2865706a0f 100644 --- a/ortools/constraint_solver/docs/CP.md +++ b/ortools/constraint_solver/docs/CP.md @@ -15,6 +15,9 @@ Java and .Net. Each language have different requirements for the code samples. #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" namespace operations_research { @@ -59,7 +62,9 @@ void SimpleCpProgram() { } // namespace operations_research -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::SimpleCpProgram(); return EXIT_SUCCESS; } diff --git a/ortools/routing/docs/ROUTING.md b/ortools/routing/docs/ROUTING.md index 764a54d8f2..b8b23b2270 100644 --- a/ortools/routing/docs/ROUTING.md +++ b/ortools/routing/docs/ROUTING.md @@ -24,7 +24,10 @@ and .Net. Each language have different requirements for the code samples. #include #include +#include "absl/base/log_severity.h" +#include "absl/log/globals.h" #include "absl/log/log.h" +#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -83,7 +86,9 @@ void SimpleRoutingProgram() { } // namespace operations_research::routing -int main(int /*argc*/, char* /*argv*/[]) { +int main(int argc, char* argv[]) { + InitGoogle(argv[0], &argc, &argv, true); + absl::SetStderrThreshold(absl::LogSeverityAtLeast::kInfo); operations_research::routing::SimpleRoutingProgram(); return EXIT_SUCCESS; } From 04d55fc5cd30d4944a7fd34ae603d6f2dbf72541 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 19 May 2025 14:56:30 +0200 Subject: [PATCH 016/509] bazel: use eigen 3.4.0.bcr.3 note: eigen 4.0.0-20241125.bcr.1 is not an official release ref: https://gitlab.com/libeigen/eigen/-/issues/2907#note_2417354006 --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index ddb4471e4f..ba7820d175 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -17,7 +17,7 @@ bazel_dep(name = "abseil-cpp", version = "20250127.1") bazel_dep(name = "bazel_skylib", version = "1.7.1") bazel_dep(name = "bzip2", version = "1.0.8.bcr.2") bazel_dep(name = "contrib_rules_jvm", version = "0.28.0") -bazel_dep(name = "eigen", version = "4.0.0-20241125.bcr.1") +bazel_dep(name = "eigen", version = "3.4.0.bcr.3") bazel_dep(name = "fuzztest", version = "20250214.0", repo_name = "com_google_fuzztest") bazel_dep(name = "riegeli", version = "0.0.0-20241218-3385e3c") # otherwise fuzztest use a borken version bazel_dep(name = "gazelle", version = "0.42.0", repo_name = "bazel_gazelle") From 45d24e2bd7a827191a0b0a3998d6d9bc8ceb1ca8 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 20 May 2025 14:37:40 +0200 Subject: [PATCH 017/509] add gurobi 12.0.2 --- ortools/gurobi/environment.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc index 29b3cd925d..65485c7e27 100644 --- a/ortools/gurobi/environment.cc +++ b/ortools/gurobi/environment.cc @@ -347,9 +347,9 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { std::vector GurobiDynamicLibraryPotentialPaths() { std::vector potential_paths; const std::vector kGurobiVersions = { - "1201", "1200", "1103", "1102", "1101", "1100", "1003", - "1002", "1001", "1000", "952", "951", "950", "911", - "910", "903", "902", "811", "801", "752"}; + "1202", "1201", "1200", "1103", "1102", "1101", "1100", + "1003", "1002", "1001", "1000", "952", "951", "950", + "911", "910", "903", "902", "811", "801", "752"}; potential_paths.reserve(kGurobiVersions.size() * 3); // Look for libraries pointed by GUROBI_HOME first. @@ -408,8 +408,8 @@ std::vector GurobiDynamicLibraryPotentialPaths() { #if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. for (const absl::string_view version : - {"12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", "10.0.3", - "10.0.2", "10.0.1", "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { + {"12.0.2", "12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", + "10.0.3", "10.0.2", "10.0.1", "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { potential_paths.push_back( absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); } From d65609dbcfcca10317d6f5df044d91470ca85e4f Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 20 May 2025 14:43:35 +0200 Subject: [PATCH 018/509] cmake: disable shortest_paths_test (timeout) --- ortools/graph/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/ortools/graph/CMakeLists.txt b/ortools/graph/CMakeLists.txt index 98564ae041..976654252c 100644 --- a/ortools/graph/CMakeLists.txt +++ b/ortools/graph/CMakeLists.txt @@ -46,6 +46,7 @@ target_link_libraries(${NAME} PRIVATE if(BUILD_TESTING) file(GLOB _TEST_SRCS "*_test.cc") + list(FILTER _TEST_SRCS EXCLUDE REGEX "shortest_paths_test.cc") # timeout list(FILTER _TEST_SRCS EXCLUDE REGEX "max_flow_test.cc") foreach(_FULL_FILE_NAME IN LISTS _TEST_SRCS) get_filename_component(_NAME ${_FULL_FILE_NAME} NAME_WE) From cf96d6b9f3f4b0d6937310affc95a862fc199ac6 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 20 May 2025 14:44:52 +0200 Subject: [PATCH 019/509] bump go protobuf to 1.36.5 --- MODULE.bazel | 4 ++-- go.sum | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index ba7820d175..0d6e05df49 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -152,8 +152,8 @@ go_deps.module( ) go_deps.module( path = "google.golang.org/protobuf", - sum = "h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=", - version = "v1.34.2", + sum = "h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=", + version = "v1.36.5", ) go_deps.module( path = "golang.org/x/xerrors", diff --git a/go.sum b/go.sum index 044a27a2f3..71f8fa3afa 100644 --- a/go.sum +++ b/go.sum @@ -2,7 +2,5 @@ github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= From 6dc688876d8230bd5d16b6cc92d7c9700b27a6bd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 21 May 2025 12:37:44 +0200 Subject: [PATCH 020/509] fix #4654 --- ortools/sat/python/cp_model_helper.cc | 11 +++++++++-- ortools/sat/python/cp_model_helper_test.py | 3 +++ ortools/sat/python/cp_model_test.py | 8 ++++++++ ortools/sat/python/linear_expr.cc | 17 +++++++++++++---- ortools/sat/python/linear_expr.h | 6 ++++-- ortools/sat/python/linear_expr_doc.h | 7 +++++-- 6 files changed, 42 insertions(+), 10 deletions(-) diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index a8632552e5..c2aae7fd4c 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -674,12 +674,14 @@ PYBIND11_MODULE(cp_model_helper, m) { DOC(operations_research, sat, python, LinearExpr, AddInt)) .def("__radd__", &LinearExpr::AddFloat, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) - .def("__sub__", &LinearExpr::Sub, py::arg("other").none(false), + .def("__sub__", &LinearExpr::Sub, py::arg("h").none(false), DOC(operations_research, sat, python, LinearExpr, Sub)) .def("__sub__", &LinearExpr::SubInt, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubInt)) .def("__sub__", &LinearExpr::SubFloat, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) + .def("__rsub__", &LinearExpr::RSub, py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, RSub)) .def("__rsub__", &LinearExpr::RSubInt, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, RSubInt)) .def("__rsub__", &LinearExpr::RSubFloat, py::arg("cst"), @@ -938,7 +940,7 @@ PYBIND11_MODULE(cp_model_helper, m) { } return expr->AddInt(cst); }, - py::arg("other").none(false), + py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddInt)) .def( "__radd__", @@ -952,6 +954,7 @@ PYBIND11_MODULE(cp_model_helper, m) { } return expr->AddFloat(cst); }, + py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) .def( "__sub__", @@ -980,6 +983,7 @@ PYBIND11_MODULE(cp_model_helper, m) { } return expr->SubInt(cst); }, + py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubInt)) .def( "__sub__", @@ -993,6 +997,7 @@ PYBIND11_MODULE(cp_model_helper, m) { } return expr->SubFloat(cst); }, + py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) .def_property_readonly("num_exprs", &SumArray::num_exprs) .def_property_readonly("int_offset", &SumArray::int_offset) @@ -1028,6 +1033,8 @@ PYBIND11_MODULE(cp_model_helper, m) { DOC(operations_research, sat, python, LinearExpr, SubInt)) .def("__sub__", &LinearExpr::SubFloat, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) + .def("__rsub__", &LinearExpr::RSub, py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, RSub)) .def("__rsub__", &IntAffine::RSubInt, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, RSubInt)) .def("__rsub__", &LinearExpr::SubFloat, py::arg("cst"), diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index db2e5efead..e8ee7c4695 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -319,6 +319,9 @@ class CpModelHelperTest(absltest.TestCase): e11 = cmh.LinearExpr.weighted_sum([x, y, z, 5], [1, 2, 3, -1]) self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") + e12 = x - y - 2 * z + self.assertEqual(str(e12), "(-(2 * z) + (x - y))") + def test_float_lin_expr(self): x = TestIntVar(0, "x") self.assertTrue(x.is_integer()) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index aa88cfd5c5..7d16abcf42 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -246,6 +246,14 @@ class CpModelTest(absltest.TestCase): self.assertEqual(nb.index, -b.index - 1) self.assertRaises(TypeError, x.negated) + def test_issue_4654(self) -> None: + model = cp_model.CpModel() + x = model.NewIntVar(0, 1, "x") + y = model.NewIntVar(0, 2, "y") + z = model.NewIntVar(0, 3, "z") + expr = x - y - 2 * z + self.assertEqual(str(expr), "(-(2 * z) + (x - y))") + def test_equality_overload(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index e9c58c3aae..b158fb9343 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -69,10 +69,10 @@ std::shared_ptr LinearExpr::ConstantFloat(double value) { return std::make_shared(value); } -std::shared_ptr LinearExpr::Add(std::shared_ptr expr) { +std::shared_ptr LinearExpr::Add(std::shared_ptr other) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(expr); + exprs.push_back(other); return std::make_shared(exprs); } @@ -86,10 +86,10 @@ std::shared_ptr LinearExpr::AddFloat(double cst) { return std::make_shared(shared_from_this(), 1.0, cst); } -std::shared_ptr LinearExpr::Sub(std::shared_ptr expr) { +std::shared_ptr LinearExpr::Sub(std::shared_ptr other) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(expr); + exprs.push_back(other); const std::vector coeffs = {1, -1}; return std::make_shared(exprs, coeffs, 0); } @@ -104,6 +104,15 @@ std::shared_ptr LinearExpr::SubFloat(double cst) { return std::make_shared(shared_from_this(), 1.0, -cst); } +std::shared_ptr LinearExpr::RSub( + std::shared_ptr other) { + std::vector> exprs; + exprs.push_back(shared_from_this()); + exprs.push_back(other); + const std::vector coeffs = {-1, 1}; + return std::make_shared(exprs, coeffs, 0); +} + std::shared_ptr LinearExpr::RSubInt(int64_t cst) { return std::make_shared(shared_from_this(), -1, cst); } diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index 2f021d9114..631f17f05f 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -101,17 +101,19 @@ class LinearExpr : public std::enable_shared_from_this { static std::shared_ptr ConstantFloat(double value); /// Returns (this) + (expr). - std::shared_ptr Add(std::shared_ptr expr); + std::shared_ptr Add(std::shared_ptr other); /// Returns (this) + (cst). std::shared_ptr AddInt(int64_t cst); /// Returns (this) + (cst). std::shared_ptr AddFloat(double cst); /// Returns (this) - (expr). - std::shared_ptr Sub(std::shared_ptr expr); + std::shared_ptr Sub(std::shared_ptr other); /// Returns (this) - (cst). std::shared_ptr SubInt(int64_t cst); /// Returns (this) - (cst). std::shared_ptr SubFloat(double cst); + /// Returns (expr) - (this). + std::shared_ptr RSub(std::shared_ptr other); /// Returns (cst) - (this). std::shared_ptr RSubInt(int64_t cst); /// Returns (cst) - (this). diff --git a/ortools/sat/python/linear_expr_doc.h b/ortools/sat/python/linear_expr_doc.h index e26ddff411..d36484d457 100644 --- a/ortools/sat/python/linear_expr_doc.h +++ b/ortools/sat/python/linear_expr_doc.h @@ -644,7 +644,7 @@ model.add(cp_model.LinearExpr.weighted_sum(expressions, coefficients) >= 0) ```)doc"; static const char* __doc_operations_research_sat_python_LinearExpr_Add = - R"doc(Returns (this) + (expr).)doc"; + R"doc(Returns (this) + (other).)doc"; static const char* __doc_operations_research_sat_python_LinearExpr_AddFloat = R"doc(Returns (this) + (cst).)doc"; @@ -728,6 +728,9 @@ static const char* __doc_operations_research_sat_python_LinearExpr_NeCst = static const char* __doc_operations_research_sat_python_LinearExpr_Neg = R"doc(Returns -(this).)doc"; +static const char* __doc_operations_research_sat_python_LinearExpr_RSub = + R"doc(Returns (other) - (this).)doc"; + static const char* __doc_operations_research_sat_python_LinearExpr_RSubFloat = R"doc(Returns (cst) - (this).)doc"; @@ -735,7 +738,7 @@ static const char* __doc_operations_research_sat_python_LinearExpr_RSubInt = R"doc(Returns (cst) - (this).)doc"; static const char* __doc_operations_research_sat_python_LinearExpr_Sub = - R"doc(Returns (this) - (expr).)doc"; + R"doc(Returns (this) - (other).)doc"; static const char* __doc_operations_research_sat_python_LinearExpr_SubFloat = R"doc(Returns (this) - (cst).)doc"; From f8549c6f68ed208dc4d57f4d06abaff7e7df9138 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 21 May 2025 17:37:51 +0200 Subject: [PATCH 021/509] cmake: use GLPK 5.0.1 (#4656) --- cmake/dependencies/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 7d2ea4b967..dacf587bd5 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -264,7 +264,7 @@ if(BUILD_GLPK) FetchContent_Declare( glpk GIT_REPOSITORY "https://github.com/Mizux/GLPK.git" - GIT_TAG "5.0" + GIT_TAG "5.0.1" GIT_SHALLOW TRUE ) FetchContent_MakeAvailable(glpk) From ed1b05d1d823968e5160dfc5bf20794867c1c6c7 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 20 May 2025 22:50:51 +0200 Subject: [PATCH 022/509] bazel: update deps * don't use repo name * Bump abseil-cpp to 20250512.0 * Bump Protobuf to 31.0 * Bump benchmark to 1.9.2 * Bump googletest to 1.17.0 --- BUILD.bazel | 2 +- Dependencies.txt | 10 ++--- MODULE.bazel | 20 ++++----- bazel/notebook_requirements.in | 18 ++++---- bazel/notebook_requirements.txt | 20 ++++----- bazel/ortools_requirements.in | 8 ++-- bazel/ortools_requirements.txt | 6 +-- deps.bzl | 2 +- examples/contrib/BUILD.bazel | 2 +- examples/cpp/BUILD.bazel | 28 ++++++------ ortools/algorithms/BUILD.bazel | 8 ++-- ortools/base/BUILD.bazel | 38 ++++++++-------- ortools/bop/BUILD.bazel | 14 +++--- ortools/constraint_solver/BUILD.bazel | 8 ++-- ortools/glop/BUILD.bazel | 6 +-- ortools/graph/BUILD.bazel | 36 +++++++-------- ortools/gscip/BUILD.bazel | 6 +-- ortools/linear_solver/BUILD.bazel | 6 +-- .../linear_solver/proto_solver/BUILD.bazel | 2 +- ortools/lp_data/BUILD.bazel | 2 +- ortools/math_opt/BUILD.bazel | 14 +++--- ortools/math_opt/core/BUILD.bazel | 4 +- ortools/math_opt/cpp/BUILD.bazel | 6 +-- ortools/math_opt/elemental/BUILD.bazel | 10 ++--- ortools/math_opt/python/BUILD.bazel | 2 +- ortools/math_opt/python/ipc/BUILD.bazel | 4 +- ortools/math_opt/python/testing/BUILD.bazel | 4 +- ortools/math_opt/solvers/BUILD.bazel | 10 ++--- ortools/math_opt/storage/BUILD.bazel | 4 +- ortools/math_opt/validators/BUILD.bazel | 6 +-- ortools/packing/BUILD.bazel | 4 +- ortools/pdlp/BUILD.bazel | 8 ++-- ortools/port/BUILD.bazel | 2 +- ortools/routing/BUILD.bazel | 14 +++--- ortools/routing/parsers/BUILD.bazel | 22 +++++----- ortools/sat/BUILD.bazel | 44 +++++++++---------- ortools/sat/go/cpmodel/BUILD.bazel | 2 +- ortools/sat/java/BUILD.bazel | 2 +- ortools/sat/python/BUILD.bazel | 2 +- ortools/sat/samples/code_samples.bzl | 2 +- ortools/scheduling/BUILD.bazel | 10 ++--- ortools/service/v1/BUILD.bazel | 6 +-- ortools/service/v1/mathopt/BUILD.bazel | 12 ++--- ortools/set_cover/BUILD.bazel | 10 ++--- ortools/util/BUILD.bazel | 12 ++--- patches/BUILD.bazel | 4 +- 46 files changed, 232 insertions(+), 230 deletions(-) diff --git a/BUILD.bazel b/BUILD.bazel index 6023c2479d..d76269a5a5 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@bazel_gazelle//:def.bzl", "gazelle") +load("@gazelle//:def.bzl", "gazelle") load("@rules_license//rules:license.bzl", "license") # Expose license for external usage through bazel. diff --git a/Dependencies.txt b/Dependencies.txt index c9ebed0a22..b1dc1ed9b7 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -1,8 +1,8 @@ ZLIB=1.3.1 -abseil-cpp=20250127.1 -Protobuf=v30.2 +abseil-cpp=20250512.0 +Protobuf=v31.0 Eigen=3.4.0 -Re2=2024-04-01 +Re2=2024-07-02 CoinUtils=2.11.12 Osi=0.108.11 Clp=1.17.10 @@ -16,5 +16,5 @@ pybind11=v2.13.6 pybind11_abseil=v202402.0 pybind11_protobuf=84653a591aea5df482dc2bde42c19efafbd53a57 # Testing -googletest=v1.16.0 -benchmark=v1.9.1 +googletest=v1.17.0 +benchmark=v1.9.2 diff --git a/MODULE.bazel b/MODULE.bazel index 0d6e05df49..97c5a55401 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -13,26 +13,26 @@ module( ) # see https://registry.bazel.build/ -bazel_dep(name = "abseil-cpp", version = "20250127.1") +bazel_dep(name = "abseil-cpp", version = "20250512.0") bazel_dep(name = "bazel_skylib", version = "1.7.1") bazel_dep(name = "bzip2", version = "1.0.8.bcr.2") bazel_dep(name = "contrib_rules_jvm", version = "0.28.0") bazel_dep(name = "eigen", version = "3.4.0.bcr.3") -bazel_dep(name = "fuzztest", version = "20250214.0", repo_name = "com_google_fuzztest") +bazel_dep(name = "fuzztest", version = "20250214.0") bazel_dep(name = "riegeli", version = "0.0.0-20241218-3385e3c") # otherwise fuzztest use a borken version -bazel_dep(name = "gazelle", version = "0.42.0", repo_name = "bazel_gazelle") +bazel_dep(name = "gazelle", version = "0.43.0") bazel_dep(name = "glpk", version = "5.0.bcr.3") -bazel_dep(name = "google_benchmark", version = "1.9.1", repo_name = "com_google_benchmark") -bazel_dep(name = "googletest", version = "1.16.0", repo_name = "com_google_googletest") +bazel_dep(name = "google_benchmark", version = "1.9.2") +bazel_dep(name = "googletest", version = "1.17.0") bazel_dep(name = "highs", version = "1.10.0") bazel_dep(name = "platforms", version = "0.0.11") -bazel_dep(name = "protobuf", version = "30.2", repo_name = "com_google_protobuf") +bazel_dep(name = "protobuf", version = "31.0") bazel_dep(name = "pybind11_abseil", version = "202402.0") bazel_dep(name = "pybind11_bazel", version = "2.13.6") bazel_dep(name = "pybind11_protobuf", version = "0.0.0-20240524-1d7a729") -bazel_dep(name = "re2", version = "2024-07-02.bcr.1", repo_name = "com_google_re2") +bazel_dep(name = "re2", version = "2024-07-02.bcr.1") bazel_dep(name = "rules_cc", version = "0.1.1") -bazel_dep(name = "rules_go", version = "0.53.0", repo_name = "io_bazel_rules_go") +bazel_dep(name = "rules_go", version = "0.53.0") bazel_dep(name = "rules_java", version = "8.11.0") bazel_dep(name = "rules_jvm_external", version = "6.7") bazel_dep(name = "rules_license", version = "1.0.0") @@ -131,10 +131,10 @@ maven.install( ) use_repo(maven, "maven") -go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") +go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk") go_sdk.download(version = "1.22.4") -go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") +go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") go_deps.module( path = "github.com/golang/glog", sum = "h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=", diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index 56402811ad..af64100600 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -1,8 +1,8 @@ # OR-Tools code dependencies -absl-py==2.1.0 -immutabledict==3.0.0 +absl-py==2.2.2 +immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.30.2 +protobuf==6.31.0 requests==2.32.3 scipy==1.14.1 typing-extensions==4.13.1 @@ -21,10 +21,10 @@ svgwrite==1.4.3 plotly==5.15.0 # Notebook -jupyterlab==4.4.1 -notebook==7.4.1 -jupyter-server==2.15.0 -tornado==6.4.2 -Pygments==2.15.0 -jsonschema==4.19.0 +jupyterlab==4.4.2 +notebook==7.4.2 +jupyter-server==2.16.0 +tornado==6.5.0 +Pygments==2.19.1 +jsonschema==4.23.0 jinja2==3.1.6 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index 88cdc1bd3f..7fbe03c9bc 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -4,7 +4,7 @@ # # bazel run //bazel:notebook_requirements.update # -absl-py==2.1.0 +absl-py==2.2.2 # via -r bazel/notebook_requirements.in anyio==4.0.0 # via @@ -75,7 +75,7 @@ idna==3.7 # httpx # jsonschema # requests -immutabledict==3.0.0 +immutabledict==4.2.1 # via -r bazel/notebook_requirements.in ipykernel==6.25.2 # via jupyterlab @@ -96,7 +96,7 @@ json5==0.9.14 # via jupyterlab-server jsonpointer==2.4 # via jsonschema -jsonschema[format-nongpl]==4.19.0 +jsonschema[format-nongpl]==4.23.0 # via # -r bazel/notebook_requirements.in # jupyter-events @@ -122,7 +122,7 @@ jupyter-events==0.12.0 # via jupyter-server jupyter-lsp==2.2.2 # via jupyterlab -jupyter-server==2.15.0 +jupyter-server==2.16.0 # via # -r bazel/notebook_requirements.in # jupyter-lsp @@ -132,7 +132,7 @@ jupyter-server==2.15.0 # notebook-shim jupyter-server-terminals==0.4.4 # via jupyter-server -jupyterlab==4.4.1 +jupyterlab==4.4.2 # via # -r bazel/notebook_requirements.in # notebook @@ -171,7 +171,7 @@ nbformat==5.9.2 # nbconvert nest-asyncio==1.5.7 # via ipykernel -notebook==7.4.1 +notebook==7.4.2 # via -r bazel/notebook_requirements.in notebook-shim==0.2.3 # via @@ -217,7 +217,7 @@ prometheus-client==0.17.1 # via jupyter-server prompt-toolkit==3.0.39 # via ipython -protobuf==6.30.2 +protobuf==6.31.0 # via # -r bazel/notebook_requirements.in # mypy-protobuf @@ -231,7 +231,7 @@ pure-eval==0.2.2 # via stack-data pycparser==2.21 # via cffi -pygments==2.15.0 +pygments==2.19.1 # via # -r bazel/notebook_requirements.in # ipython @@ -301,7 +301,7 @@ terminado==0.17.1 # jupyter-server-terminals tinycss2==1.2.1 # via nbconvert -tornado==6.4.2 +tornado==6.5 # via # -r bazel/notebook_requirements.in # ipykernel @@ -340,7 +340,7 @@ virtualenv==20.28.1 # via -r bazel/notebook_requirements.in wcwidth==0.2.6 # via prompt-toolkit -webcolors==1.13 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index 5dc2da0ffe..0b4c89ab40 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -1,8 +1,8 @@ # OR-Tools code dependencies -absl-py==2.1.0 -immutabledict==3.0.0 +absl-py==2.2.2 +immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.30.2 +protobuf==6.31.0 requests==2.32.3 scipy==1.14.1 typing-extensions==4.13.1 @@ -15,4 +15,6 @@ black==24.8.0 # Example dependencies pandas==2.2.3 + +# Visualization dependencies svgwrite==1.4.3 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 2c6f29d454..820668e036 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -4,7 +4,7 @@ # # bazel run //bazel:ortools_requirements.update # -absl-py==2.1.0 +absl-py==2.2.2 # via -r bazel/ortools_requirements.in black==24.8.0 # via -r bazel/ortools_requirements.in @@ -20,7 +20,7 @@ filelock==3.12.2 # via virtualenv idna==3.7 # via requests -immutabledict==3.0.0 +immutabledict==4.2.1 # via -r bazel/ortools_requirements.in mypy==1.6.1 # via -r bazel/ortools_requirements.in @@ -45,7 +45,7 @@ platformdirs==3.10.0 # via # black # virtualenv -protobuf==6.30.2 +protobuf==6.31.0 # via # -r bazel/ortools_requirements.in # mypy-protobuf diff --git a/deps.bzl b/deps.bzl index 9f3ef22a53..0df804bae9 100644 --- a/deps.bzl +++ b/deps.bzl @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@bazel_gazelle//:deps.bzl", "go_repository") +load("@gazelle//:deps.bzl", "go_repository") def go_dependencies(): go_repository( diff --git a/examples/contrib/BUILD.bazel b/examples/contrib/BUILD.bazel index b6f58fc0c8..e29c6af2d0 100644 --- a/examples/contrib/BUILD.bazel +++ b/examples/contrib/BUILD.bazel @@ -11,6 +11,6 @@ cc_binary( "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/examples/cpp/BUILD.bazel b/examples/cpp/BUILD.bazel index 93647cc9ec..51716946da 100644 --- a/examples/cpp/BUILD.bazel +++ b/examples/cpp/BUILD.bazel @@ -33,7 +33,7 @@ cc_binary( "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -52,7 +52,7 @@ cc_test( "//ortools/packing:multiple_dimensions_bin_packing_cc_proto", "//ortools/sat:cp_model", "@abseil-cpp//absl/flags:flag", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -200,7 +200,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -216,7 +216,7 @@ cc_test( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -232,7 +232,7 @@ cc_binary( "//ortools/sat:cp_model", "//ortools/sat:cp_model_solver", "@abseil-cpp//absl/flags:flag", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -250,7 +250,7 @@ cc_test( "//ortools/packing:multiple_dimensions_bin_packing_cc_proto", "//ortools/sat:cp_model", "@abseil-cpp//absl/flags:flag", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -405,7 +405,7 @@ cc_binary( "//ortools/util:filelineiter", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -433,7 +433,7 @@ cc_test( "//ortools/util:filelineiter", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -607,7 +607,7 @@ cc_binary( "//ortools/routing", "//ortools/util:random_engine", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -623,7 +623,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -766,7 +766,7 @@ cc_binary( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -787,7 +787,7 @@ cc_test( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -943,7 +943,7 @@ cc_binary( "//ortools/util:qap_reader", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -977,7 +977,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/random", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index 2d520cb7da..c1dacf06bc 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -86,7 +86,7 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -127,7 +127,7 @@ cc_test( "@abseil-cpp//absl/random:bit_gen_ref", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -141,7 +141,7 @@ cc_library( "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -439,6 +439,6 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 97cb61d29d..1e53dc3b5e 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -162,8 +162,8 @@ cc_test( "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:bit_gen_ref", "@abseil-cpp//absl/random:distributions", - "@com_google_benchmark//:benchmark", - "@com_google_googletest//:gtest_main", + "@google_benchmark//:benchmark", + "@googletest//:gtest_main", ], ) @@ -200,7 +200,7 @@ cc_test( deps = [ ":dump_vars", "@abseil-cpp//absl/strings", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -253,7 +253,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@bzip2//:bz2", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", "@zlib", ], ) @@ -264,10 +264,10 @@ cc_library( hdrs = ["fuzztest.h"], deps = [ "@abseil-cpp//absl/log:check", - "@com_google_fuzztest//fuzztest", - "@com_google_fuzztest//fuzztest:googletest_fixture_adapter", - "@com_google_fuzztest//fuzztest:init_fuzztest", - "@com_google_protobuf//:protobuf", + "@fuzztest//fuzztest", + "@fuzztest//fuzztest:googletest_fixture_adapter", + "@fuzztest//fuzztest:init_fuzztest", + "@protobuf//:protobuf", ], ) @@ -277,7 +277,7 @@ cc_library( deps = [ ":message_matchers", ":status_matchers", - "@com_google_googletest//:gtest", + "@googletest//:gtest", ], ) @@ -285,7 +285,7 @@ cc_library( name = "gmock_main", deps = [ ":gmock", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -423,8 +423,8 @@ cc_library( hdrs = ["message_matchers.h"], deps = [ "@abseil-cpp//absl/strings", - "@com_google_googletest//:gtest", - "@com_google_protobuf//:protobuf", + "@googletest//:gtest", + "@protobuf//:protobuf", ], ) @@ -459,7 +459,7 @@ cc_library( hdrs = ["parse_text_proto.h"], deps = [ "@abseil-cpp//absl/log:check", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -469,7 +469,7 @@ cc_library( deps = [ ":gmock", "@abseil-cpp//absl/log:check", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -495,7 +495,7 @@ cc_library( ":timer", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -504,7 +504,7 @@ cc_library( hdrs = ["proto_enum_utils.h"], deps = [ "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -523,7 +523,7 @@ cc_library( ":logging", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", "@zlib", ], ) @@ -558,7 +558,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", - "@com_google_googletest//:gtest", + "@googletest//:gtest", ], ) @@ -605,7 +605,7 @@ cc_test( "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) diff --git a/ortools/bop/BUILD.bazel b/ortools/bop/BUILD.bazel index ae847c0fd4..fcab0d1ee3 100644 --- a/ortools/bop/BUILD.bazel +++ b/ortools/bop/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -119,7 +119,7 @@ cc_library( "//ortools/util:stats", "//ortools/util:time_limit", "@abseil-cpp//absl/random", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -149,7 +149,7 @@ cc_library( "//ortools/util:time_limit", "@abseil-cpp//absl/random", "@abseil-cpp//absl/cleanup", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -258,7 +258,7 @@ cc_library( "//ortools/base:threadpool", "//ortools/util:bitset", "//ortools/util:time_limit", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", "//ortools/util:stats", ], ) @@ -291,6 +291,6 @@ cc_library( "//ortools/util:bitset", "//ortools/util:stats", "//ortools/util:time_limit", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/constraint_solver/BUILD.bazel b/ortools/constraint_solver/BUILD.bazel index bf15499a69..33bf13b813 100644 --- a/ortools/constraint_solver/BUILD.bazel +++ b/ortools/constraint_solver/BUILD.bazel @@ -13,10 +13,10 @@ # Home of constraint solver constraint_solver -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:java_proto_library.bzl", "java_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") load("@rules_python//python:defs.bzl", "py_library") diff --git a/ortools/glop/BUILD.bazel b/ortools/glop/BUILD.bazel index ac1b3756ca..95860dd0c0 100644 --- a/ortools/glop/BUILD.bazel +++ b/ortools/glop/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package( diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index ce0aeeba3c..9c04dee929 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -11,8 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -56,7 +56,7 @@ cc_test( "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -111,7 +111,7 @@ cc_test( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:distributions", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -132,7 +132,7 @@ cc_test( ":minimum_vertex_cover", "//ortools/base:gmock_main", "@abseil-cpp//absl/algorithm:container", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -229,7 +229,7 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -290,7 +290,7 @@ cc_test( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -310,7 +310,7 @@ cc_test( ":graph", "//ortools/base:gmock_main", "@abseil-cpp//absl/base:core_headers", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -337,7 +337,7 @@ cc_test( "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -430,7 +430,7 @@ cc_test( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -467,7 +467,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/base:path", "//ortools/util:file_util", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -498,7 +498,7 @@ cc_test( "@abseil-cpp//absl/random:bit_gen_ref", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -541,7 +541,7 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ":graph", # Using CLP because GLOP is too slow in non-opt mode. "//ortools/algorithms:binary_search", @@ -598,7 +598,7 @@ cc_test( "//ortools/base:gmock_main", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random:distributions", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -641,7 +641,7 @@ cc_test( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -754,7 +754,7 @@ cc_test( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", "@abseil-cpp//absl/status", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -790,7 +790,7 @@ cc_test( "@abseil-cpp//absl/random", "@abseil-cpp//absl/status", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -810,7 +810,7 @@ cc_test( "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) diff --git a/ortools/gscip/BUILD.bazel b/ortools/gscip/BUILD.bazel index 690ab7ccb8..1c7e016f10 100644 --- a/ortools/gscip/BUILD.bazel +++ b/ortools/gscip/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package( diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index f561c458c9..4a7ed5e0ad 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -13,9 +13,9 @@ load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@bazel_skylib//rules:copy_file.bzl", "copy_file") -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 4f8587c492..3693aa44d6 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -20,7 +20,7 @@ cc_library( deps = [ "//ortools/port:proto_utils", "@abseil-cpp//absl/log:check", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/lp_data/BUILD.bazel b/ortools/lp_data/BUILD.bazel index 5cfd838912..84b4b74471 100644 --- a/ortools/lp_data/BUILD.bazel +++ b/ortools/lp_data/BUILD.bazel @@ -250,7 +250,7 @@ cc_library( "//ortools/linear_solver:linear_solver_cc_proto", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", - "@com_google_re2//:re2", + "@re2//:re2", ], ) diff --git a/ortools/math_opt/BUILD.bazel b/ortools/math_opt/BUILD.bazel index 5ee4a2102d..808fb161c6 100644 --- a/ortools/math_opt/BUILD.bazel +++ b/ortools/math_opt/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -22,7 +22,7 @@ proto_library( srcs = ["callback.proto"], deps = [ ":sparse_containers_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) @@ -64,7 +64,7 @@ proto_library( deps = [ ":solution_proto", ":sparse_containers_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) @@ -113,7 +113,7 @@ proto_library( "//ortools/math_opt/solvers:osqp_proto", "//ortools/pdlp:solvers_proto", "//ortools/sat:sat_parameters_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) @@ -137,7 +137,7 @@ proto_library( "//ortools/gscip:gscip_proto", "//ortools/math_opt/solvers:osqp_proto", "//ortools/pdlp:solve_log_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) diff --git a/ortools/math_opt/core/BUILD.bazel b/ortools/math_opt/core/BUILD.bazel index 5f0d2ff780..a914190703 100644 --- a/ortools/math_opt/core/BUILD.bazel +++ b/ortools/math_opt/core/BUILD.bazel @@ -51,7 +51,7 @@ cc_library( "//ortools/math_opt:sparse_containers_cc_proto", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -228,7 +228,7 @@ cc_library( "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/container:flat_hash_set", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/math_opt/cpp/BUILD.bazel b/ortools/math_opt/cpp/BUILD.bazel index 04799b7d48..22bbe52c84 100644 --- a/ortools/math_opt/cpp/BUILD.bazel +++ b/ortools/math_opt/cpp/BUILD.bazel @@ -63,7 +63,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -296,7 +296,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/time", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -326,7 +326,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/math_opt/elemental/BUILD.bazel b/ortools/math_opt/elemental/BUILD.bazel index 1fbe3dbf27..039c7eafc8 100644 --- a/ortools/math_opt/elemental/BUILD.bazel +++ b/ortools/math_opt/elemental/BUILD.bazel @@ -78,7 +78,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:string_view", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -98,7 +98,7 @@ cc_test( "//ortools/base:gmock_main", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/status", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -152,7 +152,7 @@ cc_test( ":element_storage", "//ortools/base:gmock_main", "@abseil-cpp//absl/status", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -221,7 +221,7 @@ cc_test( ":attr_storage", ":symmetry", "//ortools/base:gmock_main", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -278,7 +278,7 @@ cc_test( "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) diff --git a/ortools/math_opt/python/BUILD.bazel b/ortools/math_opt/python/BUILD.bazel index a97fa830be..facc0b7e87 100644 --- a/ortools/math_opt/python/BUILD.bazel +++ b/ortools/math_opt/python/BUILD.bazel @@ -560,7 +560,7 @@ py_library( name = "normalize", srcs = ["normalize.py"], visibility = ["//ortools/math_opt/python:__subpackages__"], - deps = ["@com_google_protobuf//:protobuf_python"], + deps = ["@protobuf//:protobuf_python"], ) py_test( diff --git a/ortools/math_opt/python/ipc/BUILD.bazel b/ortools/math_opt/python/ipc/BUILD.bazel index ccf05ed987..79d3dd9911 100644 --- a/ortools/math_opt/python/ipc/BUILD.bazel +++ b/ortools/math_opt/python/ipc/BUILD.bazel @@ -24,7 +24,7 @@ py_library( requirement("requests"), "//ortools/math_opt:rpc_py_pb2", "//ortools/math_opt/python:mathopt", - "@com_google_protobuf//:protobuf_python", + "@protobuf//:protobuf_python", ], ) @@ -35,6 +35,6 @@ py_library( "//ortools/math_opt:rpc_py_pb2", "//ortools/math_opt/python:normalize", "//ortools/service/v1:optimization_py_pb2", - "@com_google_protobuf//:protobuf_python", + "@protobuf//:protobuf_python", ], ) diff --git a/ortools/math_opt/python/testing/BUILD.bazel b/ortools/math_opt/python/testing/BUILD.bazel index ef4cbc954f..289d42b5b1 100644 --- a/ortools/math_opt/python/testing/BUILD.bazel +++ b/ortools/math_opt/python/testing/BUILD.bazel @@ -23,7 +23,7 @@ py_library( deps = [ requirement("absl-py"), "//ortools/math_opt/python:normalize", - "@com_google_protobuf//:protobuf_python", + "@protobuf//:protobuf_python", ], ) @@ -44,7 +44,7 @@ py_library( srcs = ["proto_matcher.py"], deps = [ "//ortools/math_opt/python:normalize", - "@com_google_protobuf//:protobuf_python", + "@protobuf//:protobuf_python", ], ) diff --git a/ortools/math_opt/solvers/BUILD.bazel b/ortools/math_opt/solvers/BUILD.bazel index f007b60c52..d01605400e 100644 --- a/ortools/math_opt/solvers/BUILD.bazel +++ b/ortools/math_opt/solvers/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:cc_library.bzl", "cc_library") load("@rules_cc//cc:cc_test.bzl", "cc_test") @@ -66,7 +66,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", "@scip", ], alwayslink = 1, @@ -187,7 +187,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], alwayslink = 1, ) diff --git a/ortools/math_opt/storage/BUILD.bazel b/ortools/math_opt/storage/BUILD.bazel index 605fcbe9ae..17878bd805 100644 --- a/ortools/math_opt/storage/BUILD.bazel +++ b/ortools/math_opt/storage/BUILD.bazel @@ -130,7 +130,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings:string_view", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -170,7 +170,7 @@ cc_library( "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/log:check", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/math_opt/validators/BUILD.bazel b/ortools/math_opt/validators/BUILD.bazel index c14541ff38..f2e725a7a8 100644 --- a/ortools/math_opt/validators/BUILD.bazel +++ b/ortools/math_opt/validators/BUILD.bazel @@ -122,7 +122,7 @@ cc_library( "//ortools/math_opt/core:model_summary", "//ortools/port:proto_utils", "@abseil-cpp//absl/status", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -184,7 +184,7 @@ cc_library( "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -205,7 +205,7 @@ cc_library( "//ortools/util:status_macros", "@abseil-cpp//absl/status", "@abseil-cpp//absl/time", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/packing/BUILD.bazel b/ortools/packing/BUILD.bazel index c6b75b43fb..a2ce3c9cf0 100644 --- a/ortools/packing/BUILD.bazel +++ b/ortools/packing/BUILD.bazel @@ -11,8 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 8c3bb36edc..233b229c81 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -149,7 +149,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", "@eigen", ], ) diff --git a/ortools/port/BUILD.bazel b/ortools/port/BUILD.bazel index e5eedc3da9..59dffc25bb 100644 --- a/ortools/port/BUILD.bazel +++ b/ortools/port/BUILD.bazel @@ -32,7 +32,7 @@ cc_library( "//ortools/util:parse_proto", "@abseil-cpp//absl/log", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index 83675e15cc..bbe029185c 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -11,10 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:java_proto_library.bzl", "java_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -79,7 +79,7 @@ proto_library( "//ortools/constraint_solver:solver_parameters_proto", "//ortools/sat:sat_parameters_proto", "//ortools/util:optional_boolean_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) @@ -125,7 +125,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -289,7 +289,7 @@ cc_library( "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/routing/parsers/BUILD.bazel b/ortools/routing/parsers/BUILD.bazel index 359020913f..925717864c 100644 --- a/ortools/routing/parsers/BUILD.bazel +++ b/ortools/routing/parsers/BUILD.bazel @@ -11,8 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -43,7 +43,7 @@ cc_test( deps = [ ":simple_graph", "@abseil-cpp//absl/hash:hash_testing", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -60,7 +60,7 @@ cc_library( "//ortools/base:zipfile", "//ortools/util:filelineiter", "@abseil-cpp//absl/strings", - "@com_google_re2//:re2", + "@re2//:re2", ], ) @@ -77,7 +77,7 @@ cc_test( "//ortools/base", "//ortools/base:file", "//ortools/base:gmock", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -108,7 +108,7 @@ cc_test( ":lilim_parser", "//ortools/base:path", "@abseil-cpp//absl/flags:flag", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -184,7 +184,7 @@ cc_test( "//ortools/base:path", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/hash:hash_testing", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -218,7 +218,7 @@ cc_test( ":pdtsp_parser", "//ortools/base", "//ortools/base:path", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -239,7 +239,7 @@ cc_library( "//ortools/util:filelineiter", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/strings", - "@com_google_re2//:re2", + "@re2//:re2", ], ) @@ -266,7 +266,7 @@ cc_test( "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) @@ -300,7 +300,7 @@ cc_test( deps = [ ":tsptw_parser", "//ortools/base", - "@com_google_googletest//:gtest_main", + "@googletest//:gtest_main", ], ) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index b9ad165420..e7094dc654 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -13,11 +13,11 @@ # Home of CP/SAT solver (which includes SAT, max-SAT and PB problems). -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:java_proto_library.bzl", "java_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@rules_go//proto:def.bzl", "go_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") package(default_visibility = ["//visibility:public"]) @@ -149,7 +149,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -236,7 +236,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -496,7 +496,7 @@ cc_test( ":parameters_validation", ":sat_parameters_cc_proto", "//ortools/base:gmock_main", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -632,7 +632,7 @@ cc_library( "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -760,7 +760,7 @@ cc_library( "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -1114,7 +1114,7 @@ cc_library( "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -1230,7 +1230,7 @@ cc_library( "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -2099,7 +2099,7 @@ cc_test( "//ortools/util:strong_integers", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -2554,7 +2554,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -3154,7 +3154,7 @@ cc_library( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -3180,7 +3180,7 @@ cc_test( "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -3361,7 +3361,7 @@ cc_test( "@abseil-cpp//absl/random:bit_gen_ref", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -3442,7 +3442,7 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -3514,8 +3514,8 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", - "@com_google_fuzztest//fuzztest:fuzztest_gtest_main", + "@google_benchmark//:benchmark", + "@fuzztest//fuzztest:fuzztest_gtest_main", ], ) @@ -3996,7 +3996,7 @@ cc_binary( "@abseil-cpp//absl/log:flags", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -4072,7 +4072,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/sat/go/cpmodel/BUILD.bazel b/ortools/sat/go/cpmodel/BUILD.bazel index 3b12cff8df..8fdf87ee0c 100644 --- a/ortools/sat/go/cpmodel/BUILD.bazel +++ b/ortools/sat/go/cpmodel/BUILD.bazel @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "cpmodel", diff --git a/ortools/sat/java/BUILD.bazel b/ortools/sat/java/BUILD.bazel index 7eb15d49fa..dc1589eb1e 100644 --- a/ortools/sat/java/BUILD.bazel +++ b/ortools/sat/java/BUILD.bazel @@ -24,7 +24,7 @@ ortools_java_wrap_cc( "//ortools/sat:cp_model_java_proto", "//ortools/sat:sat_parameters_java_proto", "//ortools/util/java:sorted_interval_list", - "@com_google_protobuf//java/core", + "@protobuf//java/core", ], package = "com.google.ortools.sat", swig_includes = [ diff --git a/ortools/sat/python/BUILD.bazel b/ortools/sat/python/BUILD.bazel index 25b9f6fe8e..d73f0fa96f 100644 --- a/ortools/sat/python/BUILD.bazel +++ b/ortools/sat/python/BUILD.bazel @@ -73,7 +73,7 @@ py_library( deps = [ ":cp_model_helper", requirement("numpy"), - "@com_google_protobuf//:protobuf_python", + "@protobuf//:protobuf_python", ], ) diff --git a/ortools/sat/samples/code_samples.bzl b/ortools/sat/samples/code_samples.bzl index 5629b3c152..a1b1d3ce3f 100644 --- a/ortools/sat/samples/code_samples.bzl +++ b/ortools/sat/samples/code_samples.bzl @@ -13,7 +13,7 @@ """Helper macro to compile and test code samples.""" -load("@io_bazel_rules_go//go:def.bzl", "go_test") +load("@rules_go//go:def.bzl", "go_test") load("@pip_deps//:requirements.bzl", "requirement") load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") load("@rules_java//java:defs.bzl", "java_test") diff --git a/ortools/scheduling/BUILD.bazel b/ortools/scheduling/BUILD.bazel index 4a3f0c93f0..4a3394ef61 100644 --- a/ortools/scheduling/BUILD.bazel +++ b/ortools/scheduling/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -22,7 +22,7 @@ proto_library( name = "jobshop_scheduling_proto", srcs = ["jobshop_scheduling.proto"], deps = [ - "@com_google_protobuf//:wrappers_proto", + "@protobuf//:wrappers_proto", ], ) @@ -41,7 +41,7 @@ cc_library( "//ortools/base:path", "//ortools/util:filelineiter", "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/ortools/service/v1/BUILD.bazel b/ortools/service/v1/BUILD.bazel index 018e3dbcd8..83bcc12f15 100644 --- a/ortools/service/v1/BUILD.bazel +++ b/ortools/service/v1/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = [ "//ortools/math_opt:__subpackages__", diff --git a/ortools/service/v1/mathopt/BUILD.bazel b/ortools/service/v1/mathopt/BUILD.bazel index 7498077bb2..9e685616b8 100644 --- a/ortools/service/v1/mathopt/BUILD.bazel +++ b/ortools/service/v1/mathopt/BUILD.bazel @@ -11,10 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:java_proto_library.bzl", "java_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:java_proto_library.bzl", "java_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") package(default_visibility = [ "//ortools/math_opt:__subpackages__", @@ -68,7 +68,7 @@ proto_library( srcs = ["result.proto"], deps = [ ":solution_proto", - "@com_google_protobuf//:duration_proto", + "@protobuf//:duration_proto", ], ) @@ -134,7 +134,7 @@ py_proto_library( proto_library( name = "parameters_proto", srcs = ["parameters.proto"], - deps = ["@com_google_protobuf//:duration_proto"], + deps = ["@protobuf//:duration_proto"], ) proto_library( diff --git a/ortools/set_cover/BUILD.bazel b/ortools/set_cover/BUILD.bazel index b4a82fa92f..00386cafe3 100644 --- a/ortools/set_cover/BUILD.bazel +++ b/ortools/set_cover/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") package(default_visibility = ["//visibility:public"]) @@ -59,7 +59,7 @@ cc_test( ":base_types", "//ortools/base:gmock_main", "@abseil-cpp//absl/types:span", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) @@ -241,7 +241,7 @@ cc_test( "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings", - "@com_google_benchmark//:benchmark", + "@google_benchmark//:benchmark", ], ) diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 2818939d83..43ecadd813 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -11,9 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@com_google_protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") -load("@com_google_protobuf//bazel:proto_library.bzl", "proto_library") -load("@com_google_protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") +load("@protobuf//bazel:proto_library.bzl", "proto_library") +load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") load("@rules_cc//cc:defs.bzl", "cc_library") package(default_visibility = ["//visibility:public"]) @@ -289,7 +289,7 @@ cc_library( # "//net/proto2/io/public:io", # "//net/proto2/public", # "//net/proto2/util/public:json", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -398,7 +398,7 @@ cc_library( "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) @@ -522,7 +522,7 @@ cc_library( hdrs = ["parse_proto.h"], deps = [ "@abseil-cpp//absl/strings", - "@com_google_protobuf//:protobuf", + "@protobuf//:protobuf", ], ) diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 6ca758d659..22f2795c84 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -12,9 +12,9 @@ # limitations under the License. exports_files([ - "abseil-cpp-20250127.1.patch", + "abseil-cpp-20250512.0.patch", "highs-v1.10.patch", - "protobuf-v30.2.patch", + "protobuf-v31.0.patch", "pybind11_bazel.patch", "pybind11_abseil.patch", "pybind11_protobuf.patch", From cfefe417a056a3cadb459b9ec87abea79ebf3ed2 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 21 May 2025 17:44:15 +0200 Subject: [PATCH 023/509] cmake: bump dependencies * abseil-cpp 20250512.0 * protobuf v31.0 * re2 2024-07-02 * googletest 1.17.0 * benchmark 1.9.2 devNote: to fix a patch not working ```sh cd ~/.../ortools_repo cmake -S. -Bbuild -DBUILD_DEPS=ON -DBUILD_EXAMPLES=OFF -DBUILD_SAMPLES=OFF -DBUILD_TESTING=ON pushd build/_deps/absl-src git apply --3way ~/.../ortools_repo/patches/abseil-cpp-20250512.0.patch git mergetool git diff HEAD -u > ~/.../ortools_repo/patches/abseil-cpp-20250512.0.patch cd .. rm -rf absl-* popd ``` --- cmake/dependencies/CMakeLists.txt | 20 ++-- cmake/host.CMakeLists.txt | 4 +- ortools/dotnet/Google.OrTools-full.csproj.in | 2 +- ortools/dotnet/Google.OrTools-local.csproj.in | 2 +- ortools/java/pom-full.xml.in | 2 +- ortools/java/pom-local.xml.in | 2 +- ortools/python/setup.py.in | 2 +- patches/abseil-cpp-20250127.1.patch | 99 ------------------- patches/abseil-cpp-20250512.0.patch | 20 ++++ patches/fuzztest-2025-02-14.patch | 8 +- ...v1.16.0.patch => googletest-v1.17.0.patch} | 0 ...tobuf-v30.2.patch => protobuf-v31.0.patch} | 0 ...-2024-04-01.patch => re2-2024-07-02.patch} | 0 13 files changed, 41 insertions(+), 120 deletions(-) delete mode 100644 patches/abseil-cpp-20250127.1.patch create mode 100644 patches/abseil-cpp-20250512.0.patch rename patches/{googletest-v1.16.0.patch => googletest-v1.17.0.patch} (100%) rename patches/{protobuf-v30.2.patch => protobuf-v31.0.patch} (100%) rename patches/{re2-2024-04-01.patch => re2-2024-07-02.patch} (100%) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index dacf587bd5..d0f14a71a0 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -111,10 +111,10 @@ if(BUILD_absl) FetchContent_Declare( absl GIT_REPOSITORY "https://github.com/abseil/abseil-cpp.git" - GIT_TAG "20250127.1" + GIT_TAG "20250512.0" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/abseil-cpp-20250127.1.patch" + "${CMAKE_CURRENT_LIST_DIR}/../../patches/abseil-cpp-20250512.0.patch" OVERRIDE_FIND_PACKAGE ) FetchContent_MakeAvailable(absl) @@ -136,10 +136,11 @@ if(BUILD_Protobuf) FetchContent_Declare( Protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v30.2" + GIT_TAG "v31.0" GIT_SHALLOW TRUE GIT_SUBMODULES "" - PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v30.2.patch" + PATCH_COMMAND git apply --ignore-whitespace + "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v31.0.patch" ) FetchContent_MakeAvailable(Protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) @@ -156,9 +157,9 @@ if(BUILD_re2) FetchContent_Declare( re2 GIT_REPOSITORY "https://github.com/google/re2.git" - GIT_TAG "2024-04-01" + GIT_TAG "2024-07-02" GIT_SHALLOW TRUE - PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-04-01.patch" + PATCH_COMMAND git apply --ignore-whitespace "${CMAKE_CURRENT_LIST_DIR}/../../patches/re2-2024-07-02.patch" ) FetchContent_MakeAvailable(re2) list(POP_BACK CMAKE_MESSAGE_INDENT) @@ -491,11 +492,10 @@ if(BUILD_googletest) FetchContent_Declare( googletest GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG v1.16.0 + GIT_TAG v1.17.0 GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/googletest-v1.16.0.patch" - #PATCH_COMMAND git apply --ignore-whitespace "" + "${CMAKE_CURRENT_LIST_DIR}/../../patches/googletest-v1.17.0.patch" ) set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) set(INSTALL_GTEST OFF) @@ -511,7 +511,7 @@ if(BUILD_benchmark) FetchContent_Declare( benchmark GIT_REPOSITORY https://github.com/google/benchmark.git - GIT_TAG v1.9.1 + GIT_TAG v1.9.2 GIT_SHALLOW TRUE #PATCH_COMMAND git apply --ignore-whitespace "" ) diff --git a/cmake/host.CMakeLists.txt b/cmake/host.CMakeLists.txt index c354feb7a2..9d5b83c421 100644 --- a/cmake/host.CMakeLists.txt +++ b/cmake/host.CMakeLists.txt @@ -125,11 +125,11 @@ set(protobuf_WITH_ZLIB OFF) FetchContent_Declare( protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v30.2" + GIT_TAG "v31.0" GIT_SHALLOW TRUE GIT_SUBMODULES "" PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v30.2.patch" + "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v31.0.patch" ) FetchContent_MakeAvailable(protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/ortools/dotnet/Google.OrTools-full.csproj.in b/ortools/dotnet/Google.OrTools-full.csproj.in index ffdaf5860c..b4619f4627 100644 --- a/ortools/dotnet/Google.OrTools-full.csproj.in +++ b/ortools/dotnet/Google.OrTools-full.csproj.in @@ -193,7 +193,7 @@ - + diff --git a/ortools/dotnet/Google.OrTools-local.csproj.in b/ortools/dotnet/Google.OrTools-local.csproj.in index e6576d74ea..02b9da4443 100644 --- a/ortools/dotnet/Google.OrTools-local.csproj.in +++ b/ortools/dotnet/Google.OrTools-local.csproj.in @@ -181,7 +181,7 @@ - + diff --git a/ortools/java/pom-full.xml.in b/ortools/java/pom-full.xml.in index 69a4547aa6..ffde245eac 100644 --- a/ortools/java/pom-full.xml.in +++ b/ortools/java/pom-full.xml.in @@ -109,7 +109,7 @@ com.google.protobuf protobuf-java - 4.30.2 + 4.31.0 diff --git a/ortools/java/pom-local.xml.in b/ortools/java/pom-local.xml.in index 93f38eac79..d03b19413b 100644 --- a/ortools/java/pom-local.xml.in +++ b/ortools/java/pom-local.xml.in @@ -81,7 +81,7 @@ com.google.protobuf protobuf-java - 4.30.2 + 4.31.0 diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index d7fe709b5d..23b6654505 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -46,7 +46,7 @@ setup( 'absl-py >= 2.0.0', 'numpy >= 1.13.3', 'pandas >= 2.0.0', - 'protobuf >= 6.30.2,<6.31', + 'protobuf >= 6.31.0,<6.32', 'typing-extensions >= 4.12', 'immutabledict >= 3.0.0', ], diff --git a/patches/abseil-cpp-20250127.1.patch b/patches/abseil-cpp-20250127.1.patch deleted file mode 100644 index d63e105477..0000000000 --- a/patches/abseil-cpp-20250127.1.patch +++ /dev/null @@ -1,99 +0,0 @@ -diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake -index 32cc28f..e51d6df 100644 ---- a/CMake/AbseilDll.cmake -+++ b/CMake/AbseilDll.cmake -@@ -699,31 +699,44 @@ set(ABSL_INTERNAL_TEST_DLL_TARGETS - - include(CheckCXXSourceCompiles) - --check_cxx_source_compiles( -- [==[ --#ifdef _MSC_VER --# if _MSVC_LANG < 201703L --# error "The compiler defaults or is configured for C++ < 17" --# endif --#elif __cplusplus < 201703L --# error "The compiler defaults or is configured for C++ < 17" --#endif --int main() { return 0; } --]==] -+message(WARNING "ABSL_CXX_STANDARD: ${ABSL_CXX_STANDARD}") -+message(WARNING "CMAKE_CXX_STANDARD: ${CMAKE_CXX_STANDARD}") -+message(WARNING "CMAKE_CXX_STANDARD_REQUIRED: ${CMAKE_CXX_STANDARD_REQUIRED}") -+message(WARNING "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}") -+ -+if(CMAKE_CXX_STANDARD GREATER_EQUAL 17) -+ set(ABSL_INTERNAL_AT_LEAST_CXX17 TRUE) -+else() -+ check_cxx_source_compiles( -+ [==[ -+ #ifdef _MSC_VER -+ # if _MSVC_LANG < 201703L -+ # error "The compiler defaults or is configured for C++ < 17" -+ # endif -+ #elif __cplusplus < 201703L -+ # error "The compiler defaults or is configured for C++ < 17" -+ #endif -+ int main() { return 0; } -+ ]==] - ABSL_INTERNAL_AT_LEAST_CXX17) -+endif() - --check_cxx_source_compiles( -- [==[ --#ifdef _MSC_VER --# if _MSVC_LANG < 202002L --# error "The compiler defaults or is configured for C++ < 20" --# endif --#elif __cplusplus < 202002L --# error "The compiler defaults or is configured for C++ < 20" --#endif --int main() { return 0; } --]==] -+if(CMAKE_CXX_STANDARD GREATER_EQUAL 20) -+ set(ABSL_INTERNAL_AT_LEAST_CXX20 TRUE) -+else() -+ check_cxx_source_compiles( -+ [==[ -+ #ifdef _MSC_VER -+ # if _MSVC_LANG < 202002L -+ # error "The compiler defaults or is configured for C++ < 20" -+ # endif -+ #elif __cplusplus < 202002L -+ # error "The compiler defaults or is configured for C++ < 20" -+ #endif -+ int main() { return 0; } -+ ]==] - ABSL_INTERNAL_AT_LEAST_CXX20) -+endif() - - if(ABSL_INTERNAL_AT_LEAST_CXX20) - set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_20) -@@ -731,6 +744,7 @@ elseif(ABSL_INTERNAL_AT_LEAST_CXX17) - set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_17) - else() - set(ABSL_INTERNAL_CXX_STD_FEATURE cxx_std_14) -+ message(FATAL_ERROR "Should not pass here !!!") - endif() - - function(absl_internal_dll_contains) -diff --git a/absl/flags/declare.h b/absl/flags/declare.h -index 8d2a856..a154046 100644 ---- a/absl/flags/declare.h -+++ b/absl/flags/declare.h -@@ -59,10 +59,15 @@ ABSL_NAMESPACE_END - - // Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its - // arguments. Clients must use ABSL_DECLARE_FLAG instead. -+#if defined(_MSC_VER) -+#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ -+ extern absl::Flag FLAGS_##name -+#else - #define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ - extern absl::Flag FLAGS_##name; \ - namespace absl /* block flags in namespaces */ {} \ - /* second redeclaration is to allow applying attributes */ \ - extern absl::Flag FLAGS_##name -+#endif // _MSC_VER - - #endif // ABSL_FLAGS_DECLARE_H_ diff --git a/patches/abseil-cpp-20250512.0.patch b/patches/abseil-cpp-20250512.0.patch new file mode 100644 index 0000000000..78af605623 --- /dev/null +++ b/patches/abseil-cpp-20250512.0.patch @@ -0,0 +1,20 @@ +diff --git a/absl/flags/declare.h b/absl/flags/declare.h +index 8d2a856..a154046 100644 +--- a/absl/flags/declare.h ++++ b/absl/flags/declare.h +@@ -59,10 +59,15 @@ ABSL_NAMESPACE_END + + // Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its + // arguments. Clients must use ABSL_DECLARE_FLAG instead. ++#if defined(_MSC_VER) ++#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ ++ extern absl::Flag FLAGS_##name ++#else + #define ABSL_DECLARE_FLAG_INTERNAL(type, name) \ + extern absl::Flag FLAGS_##name; \ + namespace absl /* block flags in namespaces */ {} \ + /* second redeclaration is to allow applying attributes */ \ + extern absl::Flag FLAGS_##name ++#endif // _MSC_VER + + #endif // ABSL_FLAGS_DECLARE_H_ diff --git a/patches/fuzztest-2025-02-14.patch b/patches/fuzztest-2025-02-14.patch index e803bbe2a7..053736fbb7 100644 --- a/patches/fuzztest-2025-02-14.patch +++ b/patches/fuzztest-2025-02-14.patch @@ -20,15 +20,15 @@ index 1f4f08d..cc4d0ba 100644 set(absl_URL https://github.com/abseil/abseil-cpp.git) -set(absl_TAG 20240116.0) -+set(absl_TAG 20250127.1) ++set(absl_TAG 20250512.0) set(re2_URL https://github.com/google/re2.git) -set(re2_TAG 2024-02-01) -+set(re2_TAG 2024-04-01) ++set(re2_TAG 2024-07-02) set(gtest_URL https://github.com/google/googletest.git) -set(gtest_TAG v1.14.0) -+set(gtest_TAG v1.16.0) ++set(gtest_TAG v1.17.0) # From https://www.antlr.org/download.html set(antlr_cpp_URL https://www.antlr.org/download/antlr4-cpp-runtime-4.12.0-source.zip) @@ -36,7 +36,7 @@ index 1f4f08d..cc4d0ba 100644 set(proto_URL https://github.com/protocolbuffers/protobuf.git) -set(proto_TAG v28.2) -+set(proto_TAG v30.2) ++set(proto_TAG v31.0) set(nlohmann_json_URL https://github.com/nlohmann/json.git) set(nlohmann_json_TAG v3.11.2) diff --git a/patches/googletest-v1.16.0.patch b/patches/googletest-v1.17.0.patch similarity index 100% rename from patches/googletest-v1.16.0.patch rename to patches/googletest-v1.17.0.patch diff --git a/patches/protobuf-v30.2.patch b/patches/protobuf-v31.0.patch similarity index 100% rename from patches/protobuf-v30.2.patch rename to patches/protobuf-v31.0.patch diff --git a/patches/re2-2024-04-01.patch b/patches/re2-2024-07-02.patch similarity index 100% rename from patches/re2-2024-04-01.patch rename to patches/re2-2024-07-02.patch From f9e9b55cc117865dc48a510fe35532b47a5b4513 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 22 May 2025 01:20:34 +0200 Subject: [PATCH 024/509] cmake: fixup abseil-cpp bump `absl::bad_any_cast_impl` has been removed ref: https://github.com/abseil/abseil-cpp/commit/8ce0c88d6aae3d234f2ae4dd361a634beb072ff9 --- cmake/java.cmake | 5 ----- cmake/python.cmake | 5 ----- ortools/dotnet/Google.OrTools.runtime.csproj.in | 5 ----- 3 files changed, 15 deletions(-) diff --git a/cmake/java.cmake b/cmake/java.cmake index c6a80fb29e..f77615bd99 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -327,9 +327,6 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E $,copy,true> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> @@ -365,7 +362,6 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> @@ -385,7 +381,6 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> diff --git a/cmake/python.cmake b/cmake/python.cmake index 9d37cad5c1..b7655de61c 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -509,9 +509,6 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E $,copy,true> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> @@ -547,7 +544,6 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> @@ -567,7 +563,6 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> - $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index 62c8e1db01..ce12ecded4 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -33,9 +33,6 @@ $<@need_windows_bzip2_lib@:;$> $<@need_unix_absl_lib@:;$> - $<@need_unix_absl_lib@:;$> - $<@need_unix_absl_lib@:;$> - $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> @@ -71,7 +68,6 @@ $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> - $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> @@ -91,7 +87,6 @@ $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> - $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> From 965c619f65494161085faa66fb551331a4aa4b32 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 22 May 2025 09:53:23 +0200 Subject: [PATCH 025/509] deps: add missing absl::random_internal_entropy_pool --- cmake/java.cmake | 1 + cmake/python.cmake | 1 + ortools/dotnet/Google.OrTools.runtime.csproj.in | 1 + 3 files changed, 3 insertions(+) diff --git a/cmake/java.cmake b/cmake/java.cmake index f77615bd99..ac4ca6bdae 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -380,6 +380,7 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> + $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> diff --git a/cmake/python.cmake b/cmake/python.cmake index b7655de61c..3240539174 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -562,6 +562,7 @@ add_custom_command( $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> + $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> $<${need_unix_absl_lib}:$> diff --git a/ortools/dotnet/Google.OrTools.runtime.csproj.in b/ortools/dotnet/Google.OrTools.runtime.csproj.in index ce12ecded4..cb57d49ab2 100644 --- a/ortools/dotnet/Google.OrTools.runtime.csproj.in +++ b/ortools/dotnet/Google.OrTools.runtime.csproj.in @@ -86,6 +86,7 @@ $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> + $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> $<@need_unix_absl_lib@:;$> From cf373aeb7e794a96766ee40a189d11b60a2fb50b Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Thu, 22 May 2025 17:44:52 +0200 Subject: [PATCH 026/509] bazel: bump GLPK to 5.0.bcr.4 (#4656) --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 97c5a55401..2454934bb8 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -21,7 +21,7 @@ bazel_dep(name = "eigen", version = "3.4.0.bcr.3") bazel_dep(name = "fuzztest", version = "20250214.0") bazel_dep(name = "riegeli", version = "0.0.0-20241218-3385e3c") # otherwise fuzztest use a borken version bazel_dep(name = "gazelle", version = "0.43.0") -bazel_dep(name = "glpk", version = "5.0.bcr.3") +bazel_dep(name = "glpk", version = "5.0.bcr.4") bazel_dep(name = "google_benchmark", version = "1.9.2") bazel_dep(name = "googletest", version = "1.17.0") bazel_dep(name = "highs", version = "1.10.0") From 7518c717ed00722760b6d30868be1a5f7603a1ce Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 15:07:03 +0200 Subject: [PATCH 027/509] bazel: cleanup BUILD.bazel --- .allstar/BUILD.bazel | 17 ----------- examples/cpp/BUILD.bazel | 28 +++++++++---------- ortools/algorithms/BUILD.bazel | 2 +- ortools/base/BUILD.bazel | 16 +++++------ ortools/bop/BUILD.bazel | 8 +++--- ortools/graph/BUILD.bazel | 2 +- .../linear_solver/proto_solver/BUILD.bazel | 2 +- ortools/lp_data/BUILD.bazel | 2 +- ortools/math_opt/core/BUILD.bazel | 4 +-- ortools/math_opt/cpp/BUILD.bazel | 6 ++-- ortools/math_opt/elemental/BUILD.bazel | 2 +- ortools/math_opt/solvers/BUILD.bazel | 4 +-- ortools/math_opt/storage/BUILD.bazel | 4 +-- ortools/math_opt/validators/BUILD.bazel | 6 ++-- ortools/pdlp/BUILD.bazel | 2 +- ortools/port/BUILD.bazel | 2 +- ortools/routing/BUILD.bazel | 4 +-- ortools/routing/parsers/BUILD.bazel | 4 +-- ortools/sat/BUILD.bazel | 22 +++++++-------- ortools/sat/samples/code_samples.bzl | 2 +- ortools/scheduling/BUILD.bazel | 2 +- ortools/util/BUILD.bazel | 6 ++-- 22 files changed, 65 insertions(+), 82 deletions(-) delete mode 100644 .allstar/BUILD.bazel diff --git a/.allstar/BUILD.bazel b/.allstar/BUILD.bazel deleted file mode 100644 index c6f182d9ae..0000000000 --- a/.allstar/BUILD.bazel +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exports_files( - glob(["**"]), - visibility = ["//ortools/open_source:__subpackages__"], -) diff --git a/examples/cpp/BUILD.bazel b/examples/cpp/BUILD.bazel index 51716946da..20d51c78f7 100644 --- a/examples/cpp/BUILD.bazel +++ b/examples/cpp/BUILD.bazel @@ -33,7 +33,7 @@ cc_binary( "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -52,7 +52,7 @@ cc_test( "//ortools/packing:multiple_dimensions_bin_packing_cc_proto", "//ortools/sat:cp_model", "@abseil-cpp//absl/flags:flag", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -200,7 +200,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -216,7 +216,7 @@ cc_test( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -232,7 +232,7 @@ cc_binary( "//ortools/sat:cp_model", "//ortools/sat:cp_model_solver", "@abseil-cpp//absl/flags:flag", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -250,7 +250,7 @@ cc_test( "//ortools/packing:multiple_dimensions_bin_packing_cc_proto", "//ortools/sat:cp_model", "@abseil-cpp//absl/flags:flag", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -405,7 +405,7 @@ cc_binary( "//ortools/util:filelineiter", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -433,7 +433,7 @@ cc_test( "//ortools/util:filelineiter", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -607,7 +607,7 @@ cc_binary( "//ortools/routing", "//ortools/util:random_engine", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -623,7 +623,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -766,7 +766,7 @@ cc_binary( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -787,7 +787,7 @@ cc_test( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -943,7 +943,7 @@ cc_binary( "//ortools/util:qap_reader", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -977,7 +977,7 @@ cc_binary( "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/flags:parse", "@abseil-cpp//absl/random", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index c1dacf06bc..4f4def32e7 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -141,7 +141,7 @@ cc_library( "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 1e53dc3b5e..10318be069 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -253,7 +253,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@bzip2//:bz2", - "@protobuf//:protobuf", + "@protobuf", "@zlib", ], ) @@ -267,7 +267,7 @@ cc_library( "@fuzztest//fuzztest", "@fuzztest//fuzztest:googletest_fixture_adapter", "@fuzztest//fuzztest:init_fuzztest", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -424,7 +424,7 @@ cc_library( deps = [ "@abseil-cpp//absl/strings", "@googletest//:gtest", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -459,7 +459,7 @@ cc_library( hdrs = ["parse_text_proto.h"], deps = [ "@abseil-cpp//absl/log:check", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -469,7 +469,7 @@ cc_library( deps = [ ":gmock", "@abseil-cpp//absl/log:check", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -495,7 +495,7 @@ cc_library( ":timer", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -504,7 +504,7 @@ cc_library( hdrs = ["proto_enum_utils.h"], deps = [ "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -523,7 +523,7 @@ cc_library( ":logging", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", "@zlib", ], ) diff --git a/ortools/bop/BUILD.bazel b/ortools/bop/BUILD.bazel index fcab0d1ee3..2e9bcc42d0 100644 --- a/ortools/bop/BUILD.bazel +++ b/ortools/bop/BUILD.bazel @@ -119,7 +119,7 @@ cc_library( "//ortools/util:stats", "//ortools/util:time_limit", "@abseil-cpp//absl/random", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -149,7 +149,7 @@ cc_library( "//ortools/util:time_limit", "@abseil-cpp//absl/random", "@abseil-cpp//absl/cleanup", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -258,7 +258,7 @@ cc_library( "//ortools/base:threadpool", "//ortools/util:bitset", "//ortools/util:time_limit", - "@protobuf//:protobuf", + "@protobuf", "//ortools/util:stats", ], ) @@ -291,6 +291,6 @@ cc_library( "//ortools/util:bitset", "//ortools/util:stats", "//ortools/util:time_limit", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index 9c04dee929..d8d0a5c07d 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -467,7 +467,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/base:path", "//ortools/util:file_util", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 3693aa44d6..8b01c79af5 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -20,7 +20,7 @@ cc_library( deps = [ "//ortools/port:proto_utils", "@abseil-cpp//absl/log:check", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/lp_data/BUILD.bazel b/ortools/lp_data/BUILD.bazel index 84b4b74471..6e3ad9bd97 100644 --- a/ortools/lp_data/BUILD.bazel +++ b/ortools/lp_data/BUILD.bazel @@ -250,7 +250,7 @@ cc_library( "//ortools/linear_solver:linear_solver_cc_proto", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", - "@re2//:re2", + "@re2", ], ) diff --git a/ortools/math_opt/core/BUILD.bazel b/ortools/math_opt/core/BUILD.bazel index a914190703..bdd9db9758 100644 --- a/ortools/math_opt/core/BUILD.bazel +++ b/ortools/math_opt/core/BUILD.bazel @@ -51,7 +51,7 @@ cc_library( "//ortools/math_opt:sparse_containers_cc_proto", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -228,7 +228,7 @@ cc_library( "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/container:flat_hash_set", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/math_opt/cpp/BUILD.bazel b/ortools/math_opt/cpp/BUILD.bazel index 22bbe52c84..2a62a12df6 100644 --- a/ortools/math_opt/cpp/BUILD.bazel +++ b/ortools/math_opt/cpp/BUILD.bazel @@ -63,7 +63,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -296,7 +296,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/time", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -326,7 +326,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/math_opt/elemental/BUILD.bazel b/ortools/math_opt/elemental/BUILD.bazel index 039c7eafc8..2ebdec571a 100644 --- a/ortools/math_opt/elemental/BUILD.bazel +++ b/ortools/math_opt/elemental/BUILD.bazel @@ -78,7 +78,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:string_view", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/math_opt/solvers/BUILD.bazel b/ortools/math_opt/solvers/BUILD.bazel index d01605400e..1c4a899094 100644 --- a/ortools/math_opt/solvers/BUILD.bazel +++ b/ortools/math_opt/solvers/BUILD.bazel @@ -66,7 +66,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", "@scip", ], alwayslink = 1, @@ -187,7 +187,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], alwayslink = 1, ) diff --git a/ortools/math_opt/storage/BUILD.bazel b/ortools/math_opt/storage/BUILD.bazel index 17878bd805..50b2b98e7c 100644 --- a/ortools/math_opt/storage/BUILD.bazel +++ b/ortools/math_opt/storage/BUILD.bazel @@ -130,7 +130,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings:string_view", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -170,7 +170,7 @@ cc_library( "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/log:check", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/math_opt/validators/BUILD.bazel b/ortools/math_opt/validators/BUILD.bazel index f2e725a7a8..f9b4cb4ac4 100644 --- a/ortools/math_opt/validators/BUILD.bazel +++ b/ortools/math_opt/validators/BUILD.bazel @@ -122,7 +122,7 @@ cc_library( "//ortools/math_opt/core:model_summary", "//ortools/port:proto_utils", "@abseil-cpp//absl/status", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -184,7 +184,7 @@ cc_library( "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -205,7 +205,7 @@ cc_library( "//ortools/util:status_macros", "@abseil-cpp//absl/status", "@abseil-cpp//absl/time", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 233b229c81..7a19ebc39a 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -149,8 +149,8 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", - "@protobuf//:protobuf", "@eigen", + "@protobuf", ], ) diff --git a/ortools/port/BUILD.bazel b/ortools/port/BUILD.bazel index 59dffc25bb..20219982d7 100644 --- a/ortools/port/BUILD.bazel +++ b/ortools/port/BUILD.bazel @@ -32,7 +32,7 @@ cc_library( "//ortools/util:parse_proto", "@abseil-cpp//absl/log", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index bbe029185c..5d62812372 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -125,7 +125,7 @@ cc_library( "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -289,7 +289,7 @@ cc_library( "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/routing/parsers/BUILD.bazel b/ortools/routing/parsers/BUILD.bazel index 925717864c..883228d58e 100644 --- a/ortools/routing/parsers/BUILD.bazel +++ b/ortools/routing/parsers/BUILD.bazel @@ -60,7 +60,7 @@ cc_library( "//ortools/base:zipfile", "//ortools/util:filelineiter", "@abseil-cpp//absl/strings", - "@re2//:re2", + "@re2", ], ) @@ -239,7 +239,7 @@ cc_library( "//ortools/util:filelineiter", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/strings", - "@re2//:re2", + "@re2", ], ) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index e7094dc654..06106c2ad1 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -149,7 +149,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -236,7 +236,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -496,7 +496,7 @@ cc_test( ":parameters_validation", ":sat_parameters_cc_proto", "//ortools/base:gmock_main", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -632,7 +632,7 @@ cc_library( "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -760,7 +760,7 @@ cc_library( "@abseil-cpp//absl/synchronization", "@abseil-cpp//absl/time", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -1114,7 +1114,7 @@ cc_library( "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -1230,7 +1230,7 @@ cc_library( "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -2554,7 +2554,7 @@ cc_library( "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -3154,7 +3154,7 @@ cc_library( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -3996,7 +3996,7 @@ cc_binary( "@abseil-cpp//absl/log:flags", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -4072,7 +4072,7 @@ cc_library( "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/sat/samples/code_samples.bzl b/ortools/sat/samples/code_samples.bzl index a1b1d3ce3f..e613b56ffd 100644 --- a/ortools/sat/samples/code_samples.bzl +++ b/ortools/sat/samples/code_samples.bzl @@ -13,9 +13,9 @@ """Helper macro to compile and test code samples.""" -load("@rules_go//go:def.bzl", "go_test") load("@pip_deps//:requirements.bzl", "requirement") load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_test") +load("@rules_go//go:def.bzl", "go_test") load("@rules_java//java:defs.bzl", "java_test") load("@rules_python//python:defs.bzl", "py_binary", "py_test") diff --git a/ortools/scheduling/BUILD.bazel b/ortools/scheduling/BUILD.bazel index 4a3394ef61..e8503bcc37 100644 --- a/ortools/scheduling/BUILD.bazel +++ b/ortools/scheduling/BUILD.bazel @@ -41,7 +41,7 @@ cc_library( "//ortools/base:path", "//ortools/util:filelineiter", "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index 43ecadd813..b12e21b60a 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -289,7 +289,7 @@ cc_library( # "//net/proto2/io/public:io", # "//net/proto2/public", # "//net/proto2/util/public:json", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -398,7 +398,7 @@ cc_library( "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", - "@protobuf//:protobuf", + "@protobuf", ], ) @@ -522,7 +522,7 @@ cc_library( hdrs = ["parse_proto.h"], deps = [ "@abseil-cpp//absl/strings", - "@protobuf//:protobuf", + "@protobuf", ], ) From 548497bbe1638666e852146212594fda66e24a75 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 15:07:20 +0200 Subject: [PATCH 028/509] base: export from google3 --- ortools/base/numbers.cc | 5 ----- ortools/base/zipfile.cc | 4 ++-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/ortools/base/numbers.cc b/ortools/base/numbers.cc index deacf7f3e4..6a35091869 100644 --- a/ortools/base/numbers.cc +++ b/ortools/base/numbers.cc @@ -18,16 +18,11 @@ #include // for errno -#include #include #include #include -#include -#include -// #include "ortools/base/logging.h" #include "absl/strings/ascii.h" -#include "ortools/base/strtoint.h" namespace strings { diff --git a/ortools/base/zipfile.cc b/ortools/base/zipfile.cc index 8186a50899..cc75dc984a 100644 --- a/ortools/base/zipfile.cc +++ b/ortools/base/zipfile.cc @@ -19,10 +19,10 @@ #include "ortools/base/zipfile.h" +#include + #include "absl/strings/string_view.h" -#include "ortools/base/file.h" #include "ortools/base/logging.h" -#include "zlib.h" namespace zipfile { From 17b8187693835a52cc83c80dd6c34790f036863e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 15:07:31 +0200 Subject: [PATCH 029/509] scheduling: export from google3 --- ortools/scheduling/rcpsp_parser.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ortools/scheduling/rcpsp_parser.cc b/ortools/scheduling/rcpsp_parser.cc index 65c3ff156b..31d15a5b52 100644 --- a/ortools/scheduling/rcpsp_parser.cc +++ b/ortools/scheduling/rcpsp_parser.cc @@ -17,6 +17,8 @@ #include #include +#include "absl/log/check.h" +#include "absl/log/log.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" From 2bcdb104ee6ad1adb76a1a9a0a7aab6621bea31d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 15:21:43 +0200 Subject: [PATCH 030/509] math_opt: Fix elemental python on windows ssize_t is not part of the STL, python provides `Py_ssize_t` ref: https://peps.python.org/pep-0353/ --- ortools/math_opt/elemental/python/elemental.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/math_opt/elemental/python/elemental.cc b/ortools/math_opt/elemental/python/elemental.cc index bd21f2f286..dc221f8bb8 100644 --- a/ortools/math_opt/elemental/python/elemental.cc +++ b/ortools/math_opt/elemental/python/elemental.cc @@ -177,7 +177,7 @@ class AttrKeyArrayView { } } - ssize_t size() const { return array_.shape(0); } + Py_ssize_t size() const { return array_.shape(0); } AttrKeyT operator[](const int64_t i) const { std::array key_ids; @@ -565,7 +565,7 @@ PYBIND11_MODULE(cpp_elemental, py_module) { const int64_t num_elements = static_cast(names.shape(0)); const char* unicode_data = static_cast(names.request().ptr); - const ssize_t itemsize_bytes = names.request().itemsize; + const Py_ssize_t itemsize_bytes = names.request().itemsize; py::array_t result(names.size()); auto ids = result.mutable_unchecked<1>(); for (int i = 0; i < num_elements; ++i) { From 0bf24b2fb66a70a1839fe3aa688abbb89a7e48f9 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 16:00:20 +0200 Subject: [PATCH 031/509] tools/release: remove Python 3.8 support note: BTW Protobuf v31.0 do not support Python 3.8 --- tools/release/build_delivery_macos.sh | 4 +-- tools/release/build_delivery_win.cmd | 6 ++--- tools/release/test_delivery_macos.sh | 4 +-- tools/release/test_delivery_win.cmd | 39 +++------------------------ 4 files changed, 11 insertions(+), 42 deletions(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index d6516660ed..31aa8d1d47 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -213,9 +213,9 @@ function build_python() { command -v swig | xargs echo "swig: " | tee -a build.log if [[ ${PLATFORM} == "arm64" ]]; then - local -r PY=(3.8 3.9 3.10 3.11 3.12 3.13) + local -r PY=(3.9 3.10 3.11 3.12 3.13) else - local -r PY=(3.8 3.9 3.10 3.11 3.12 3.13) + local -r PY=(3.9 3.10 3.11 3.12 3.13) fi # Check Python env diff --git a/tools/release/build_delivery_win.cmd b/tools/release/build_delivery_win.cmd index a5638aaf6c..d50a83f831 100644 --- a/tools/release/build_delivery_win.cmd +++ b/tools/release/build_delivery_win.cmd @@ -269,7 +269,7 @@ set PATH=%userprofile%\AppData\Roaming\Python\Python3%1\Scripts;%PATH% ::echo "python path: %PATH%" GOTO :eof -REM PYTHON 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 +REM PYTHON 3.9, 3.10, 3.11, 3.12, 3.13 :BUILD_PYTHON title Build Python set HASH= @@ -279,7 +279,7 @@ echo Python build seems up to date, skipping exit /B 0 ) -FOR %%v IN (8 9 10 11 12 13) DO ( +FOR %%v IN (9 10 11 12 13) DO ( title Build Python 3.%%v echo Check python3.%%v... | tee.exe -a build.log which.exe "C:\python3%%v-64\python.exe" || exit 1 @@ -342,7 +342,7 @@ del /s /f /q temp_dotnet rmdir /s /q temp_dotnet del /s /f /q temp_java rmdir /s /q temp_java -FOR %%v IN (8 9 10 11 12) do ( +FOR %%v IN (9 10 11 12 13) do ( del /s /f /q temp_python3%%v rmdir /s /q temp_python3%%v ) diff --git a/tools/release/test_delivery_macos.sh b/tools/release/test_delivery_macos.sh index 4cf1f798d0..d5d09ff23e 100755 --- a/tools/release/test_delivery_macos.sh +++ b/tools/release/test_delivery_macos.sh @@ -23,9 +23,9 @@ command -v make | xargs echo "make: " | tee -a test.log command -v swig | xargs echo "swig: " | tee -a test.log # python if [[ ${PLATFORM} == "arm64" ]]; then - local -r PY=(3.8 3.9 3.10 3.11 3.12 3.13) + local -r PY=(3.9 3.10 3.11 3.12 3.13) else - local -r PY=(3.8 3.9 3.10 3.11 3.12 3.13) + local -r PY=(3.9 3.10 3.11 3.12 3.13) fi for i in "${PY[@]}"; do diff --git a/tools/release/test_delivery_win.cmd b/tools/release/test_delivery_win.cmd index 71d98191ae..2cd35cdd76 100755 --- a/tools/release/test_delivery_win.cmd +++ b/tools/release/test_delivery_win.cmd @@ -11,50 +11,19 @@ make.exe print-OR_TOOLS_VERSION | tee.exe test.log which.exe cmake || exit 1 which.exe cmake | tee.exe -a test.log REM Python -which.exe C:\python38-64\python.exe || exit 1 -echo C:\python38-64\python.exe: FOUND | tee.exe -a test.log which.exe C:\python39-64\python.exe || exit 1 echo C:\python39-64\python.exe: FOUND | tee.exe -a test.log which.exe C:\python310-64\python.exe || exit 1 echo C:\python310-64\python.exe: FOUND | tee.exe -a test.log which.exe C:\python311-64\python.exe || exit 1 echo C:\python311-64\python.exe: FOUND | tee.exe -a test.log +which.exe C:\python312-64\python.exe || exit 1 +echo C:\python312-64\python.exe: FOUND | tee.exe -a test.log +which.exe C:\python313-64\python.exe || exit 1 +echo C:\python313-64\python.exe: FOUND | tee.exe -a test.log set LOCAL_PATH=%PATH% -REM ################## -REM ## PYTHON 3.8 ## -REM ################## -echo Cleaning Python... | tee.exe -a test.log -make.exe clean_python WINDOWS_PATH_TO_PYTHON=c:\python38-64 -echo Cleaning Python...DONE | tee.exe -a test.log - -REM make.exe python WINDOWS_PATH_TO_PYTHON=c:\python38-64 || exit 1 -REM echo make python3.8: DONE | tee.exe -a build.log -REM make.exe test_python WINDOWS_PATH_TO_PYTHON=c:\python38-64 || exit 1 -REM echo make test_python3.8: DONE | tee.exe -a build.log -echo Rebuild Python3.8 pypi archive... | tee.exe -a test.log -make.exe package_python WINDOWS_PATH_TO_PYTHON=c:\python38-64 || exit 1 -echo Rebuild Python3.8 pypi archive...DONE | tee.exe -a test.log - -echo Creating Python3.8 venv... | tee.exe -a test.log -set PATH=c:\python38-64;c:\python38-64\Scripts;%PATH% -python -m pip install virtualenv -set TEMP_DIR=temp_python38 -python -m virtualenv %TEMP_DIR%\venv -set PATH=%LOCAL_PATH% -echo Creating Python3.8 venv...DONE | tee.exe -a test.log - -echo Installing ortools Python3.8 venv... | tee.exe -a test.log -FOR %%i IN (%TEMP_DIR%\ortools\dist\*.whl) DO %TEMP_DIR%\venv\Scripts\python -m pip install %%i -echo Installing ortools Python3.8 venv...DONE | tee.exe -a test.log - -echo Testing ortools Python3.8... | tee.exe -a test.log -%TEMP_DIR%\venv\Scripts\python cmake\samples\python\sample.py 2>&1 | tee.exe -a test.log -echo Testing ortools Python3.8...DONE | tee.exe -a test.log - -FOR %%i IN (%TEMP_DIR%\ortools\dist\*.whl) DO copy %%i . - REM ################## REM ## PYTHON 3.9 ## REM ################## From 75d5d21395b84945915ac997fd1da959aa845012 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 26 May 2025 16:02:10 +0200 Subject: [PATCH 032/509] tools/docker: remove python 3.8 support --- tools/docker/Makefile | 13 ++++++------- tools/docker/python/build-manylinux.sh | 6 +++--- tools/docker/python/build-musllinux.sh | 6 +++--- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/tools/docker/Makefile b/tools/docker/Makefile index d0803291e6..dbc2101ee9 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -33,10 +33,10 @@ help: @echo -e "\t${BOLD}test_archives${RESET}: Test each OR-Tools archives for all ${BOLD}${RESET} and ${BOLD}${RESET}." @echo @echo -e "${BOLD}PYTHON TARGETS${RESET}" - @echo -e "\t${BOLD}python${RESET}: Build musllinux and manylinux python 'ortools' wheel packages (3.8+)." - @echo -e "\t${BOLD}python_${RESET}: Build all python 'ortools' wheel packages (3.8+) for a specific platform." - @echo -e "\t${BOLD}python__${RESET}: Build all python 'ortools' wheel packages (3.8+) for a specific platform." - @echo -e "\t${BOLD}python__${RESET}: Build python 'ortools' wheel packages (3.8+) for a specific target." + @echo -e "\t${BOLD}python${RESET}: Build musllinux and manylinux python 'ortools' wheel packages (3.9+)." + @echo -e "\t${BOLD}python_${RESET}: Build all python 'ortools' wheel packages (3.9+) for a specific platform." + @echo -e "\t${BOLD}python__${RESET}: Build all python 'ortools' wheel packages (3.9+) for a specific platform." + @echo -e "\t${BOLD}python__${RESET}: Build python 'ortools' wheel packages (3.9+) for a specific target." @echo -e "\t${BOLD}save_python_${RESET}: Save python 'ortools' image." @echo -e "\t${BOLD}clean_python_${RESET}: Clean manylinux and musllinux python 'ortools' wheel packages." @echo -e "\t${BOLD}sh_python_${RESET}: Run a container using the python 'ortools' image." @@ -54,7 +54,6 @@ help: @echo -e "\t\t${BOLD}manylinux${RESET} (manylinux_2_28)" @echo @echo -e "\t${BOLD}${RESET}:" - @echo -e "\t\t${BOLD}38${RESET} Python3.8" @echo -e "\t\t${BOLD}39${RESET} Python3.9" @echo -e "\t\t${BOLD}310${RESET} Python3.10" @echo -e "\t\t${BOLD}311${RESET} Python3.11" @@ -118,7 +117,7 @@ help: @echo -e "\t\t${BOLD}cpp${RESET} C++" @echo -e "\t\t${BOLD}dotnet${RESET} .Net Core 3.1 and/or .Net 6.0 wrappers" @echo -e "\t\t${BOLD}java${RESET} Java (JDK 8.0) wrappers" - @echo -e "\t\t${BOLD}python${RESET} Python 3.8+ wrappers" + @echo -e "\t\t${BOLD}python${RESET} Python 3.9+ wrappers" @echo @echo -e "\te.g. 'make sh_amd64_ubuntu-22.04_cpp_build'" @echo -e "\te.g. 'make amd64_ubuntu-22.04_cpp_archive'" @@ -204,7 +203,7 @@ cache/python: | cache -mkdir $@ ## MANYLINUX ## -PYTHON_VERSIONS := 38 39 310 311 312 313 +PYTHON_VERSIONS := 39 310 311 312 313 export/python/manylinux: | export/python -mkdir -p $@ diff --git a/tools/docker/python/build-manylinux.sh b/tools/docker/python/build-manylinux.sh index c3b86c770a..3bbf69e7c7 100755 --- a/tools/docker/python/build-manylinux.sh +++ b/tools/docker/python/build-manylinux.sh @@ -35,7 +35,7 @@ DESCRIPTION \tYou MUST define the following variables before running this script: \t* PLATFORM: x86_64 aarch64 -\t* PYTHON_VERSION: 3 38 39 310 311 312 313 +\t* PYTHON_VERSION: 3 39 310 311 312 313 note: PYTHON_VERSION=3 will generate for all pythons which could take time... OPTIONS @@ -123,7 +123,6 @@ function check_wheel() { # Check mypy files declare -a MYPY_FILES=( "ortools/algorithms/python/knapsack_solver.pyi" - "ortools/algorithms/python/set_cover.pyi" "ortools/constraint_solver/pywrapcp.pyi" "ortools/graph/python/linear_sum_assignment.pyi" "ortools/graph/python/max_flow.pyi" @@ -134,6 +133,7 @@ function check_wheel() { "ortools/pdlp/python/pdlp.pyi" "ortools/sat/python/cp_model_helper.pyi" "ortools/scheduling/python/rcpsp.pyi" + "ortools/set_cover/python/set_cover.pyi" "ortools/util/python/sorted_interval_list.pyi" ) for FILE in "${MYPY_FILES[@]}"; do @@ -264,7 +264,7 @@ function main() { assert_defined PYTHON_VERSION # Setup - declare -a SKIPS=( "pp37-pypy37_pp73" ) + declare -a SKIPS=( "cp38-cp38" ) case ${1} in build) diff --git a/tools/docker/python/build-musllinux.sh b/tools/docker/python/build-musllinux.sh index f404912565..846e636c3f 100755 --- a/tools/docker/python/build-musllinux.sh +++ b/tools/docker/python/build-musllinux.sh @@ -35,7 +35,7 @@ DESCRIPTION \tYou MUST define the following variables before running this script: \t* PLATFORM: x86_64 aarch64 -\t* PYTHON_VERSION: 3 38 39 310 311 312 313 +\t* PYTHON_VERSION: 3 39 310 311 312 313 note: PYTHON_VERSION=3 will generate for all pythons which could take time... OPTIONS @@ -118,7 +118,6 @@ function check_wheel() { # Check mypy files declare -a MYPY_FILES=( "ortools/algorithms/python/knapsack_solver.pyi" - "ortools/algorithms/python/set_cover.pyi" "ortools/constraint_solver/pywrapcp.pyi" "ortools/graph/python/linear_sum_assignment.pyi" "ortools/graph/python/max_flow.pyi" @@ -129,6 +128,7 @@ function check_wheel() { "ortools/pdlp/python/pdlp.pyi" "ortools/sat/python/cp_model_helper.pyi" "ortools/scheduling/python/rcpsp.pyi" + "ortools/set_cover/python/set_cover.pyi" "ortools/util/python/sorted_interval_list.pyi" ) for FILE in "${MYPY_FILES[@]}"; do @@ -255,7 +255,7 @@ function main() { assert_defined PYTHON_VERSION # Setup - declare -a SKIPS=( "cp36-cp36m" "cp37-cp37m" ) + declare -a SKIPS=( "cp38-cp38" ) case ${1} in build) From 6390cc1d37a32a6ab119d2be673df60e6cdc051b Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 27 May 2025 13:07:06 +0200 Subject: [PATCH 033/509] cmake: Fix host tools build when using a toolchain --- cmake/host.CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/host.CMakeLists.txt b/cmake/host.CMakeLists.txt index 9d5b83c421..970c4e0e40 100644 --- a/cmake/host.CMakeLists.txt +++ b/cmake/host.CMakeLists.txt @@ -106,10 +106,10 @@ set(ABSL_BUILD_TESTING OFF) FetchContent_Declare( absl GIT_REPOSITORY "https://github.com/abseil/abseil-cpp.git" - GIT_TAG "20250127.1" + GIT_TAG "20250512.0" GIT_SHALLOW TRUE PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/abseil-cpp-20250127.1.patch" + "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/abseil-cpp-20250512.0.patch" ) FetchContent_MakeAvailable(absl) list(POP_BACK CMAKE_MESSAGE_INDENT) From 4e2d73ae3f908a591a6025a6e258b016dcd9a614 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 27 May 2025 13:25:21 +0200 Subject: [PATCH 034/509] python: bump last remaining 3.8 to 3.9 note: Protobuf v31.0 has dropped support of Python 3.8 --- ortools/python/README.md | 2 +- ortools/python/setup.py.in | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ortools/python/README.md b/ortools/python/README.md index b149ac78f6..e95202f480 100644 --- a/ortools/python/README.md +++ b/ortools/python/README.md @@ -1,6 +1,6 @@ # Introduction -This is the documentation page for the Python 3.8+ wrapper of OR-Tools. +This is the documentation page for the Python 3.9+ wrapper of OR-Tools. This project aim to explain how you build a Python native wheel package using [`setup.py`](https://packaging.python.org/tutorials/packaging-projects/). diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index 23b6654505..b38b5fd708 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -41,7 +41,7 @@ setup( name='@PYTHON_PROJECT@', version='@PROJECT_VERSION@', packages=find_packages(), - python_requires='>= 3.8', + python_requires='>= 3.9', install_requires=[ 'absl-py >= 2.0.0', 'numpy >= 1.13.3', @@ -171,7 +171,6 @@ setup( 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', From 46a97de1c894cee95a55409a9b104543e1951c13 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 27 May 2025 13:47:22 +0200 Subject: [PATCH 035/509] Use Fractional everywhere --- ortools/glop/lp_solver.cc | 21 +++++----- ortools/glop/preprocessor.cc | 9 ++-- ortools/glop/preprocessor.h | 4 +- ortools/glop/primal_edge_norms.cc | 11 +++-- ortools/glop/reduced_costs.cc | 69 ++++++++++--------------------- ortools/glop/revised_simplex.cc | 12 +++--- ortools/glop/update_row.cc | 7 +++- ortools/glop/variable_values.cc | 2 +- ortools/lp_data/lp_parser.cc | 19 ++++++++- ortools/lp_data/lp_types.h | 6 +-- 10 files changed, 82 insertions(+), 78 deletions(-) diff --git a/ortools/glop/lp_solver.cc b/ortools/glop/lp_solver.cc index 2c9efc41c6..4e8abe93b5 100644 --- a/ortools/glop/lp_solver.cc +++ b/ortools/glop/lp_solver.cc @@ -325,7 +325,7 @@ Fractional ProblemObjectiveValue(const LinearProgram& lp, Fractional value) { // Returns the allowed error magnitude for something that should evaluate to // value under the given tolerance. Fractional AllowedError(Fractional tolerance, Fractional value) { - return tolerance * std::max(1.0, std::abs(value)); + return tolerance * std::max(Fractional(1.0), std::abs(value)); } } // namespace @@ -492,15 +492,15 @@ bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram& lp) { // primal values. // TODO(user): investigate whether to use the tolerances defined in // parameters.proto. - const double kReducedCostTolerance = 1e-9; - const double kBoundTolerance = 1e-7; + const Fractional kReducedCostTolerance = 1e-9; + const Fractional kBoundTolerance = 1e-7; const ColIndex num_cols = lp.num_variables(); for (ColIndex col(0); col < num_cols; ++col) { if (variable_statuses_[col] == VariableStatus::FIXED_VALUE) continue; const Fractional lower_bound = lp.variable_lower_bounds()[col]; const Fractional upper_bound = lp.variable_upper_bounds()[col]; const Fractional value = primal_values_[col]; - if (AreWithinAbsoluteTolerance(reduced_costs_[col], 0.0, + if (AreWithinAbsoluteTolerance(reduced_costs_[col], Fractional(0.0), kReducedCostTolerance) && (AreWithinAbsoluteTolerance(value, lower_bound, kBoundTolerance) || AreWithinAbsoluteTolerance(value, upper_bound, kBoundTolerance))) { @@ -513,7 +513,7 @@ bool LPSolver::IsOptimalSolutionOnFacet(const LinearProgram& lp) { const Fractional lower_bound = lp.constraint_lower_bounds()[row]; const Fractional upper_bound = lp.constraint_upper_bounds()[row]; const Fractional activity = constraint_activities_[row]; - if (AreWithinAbsoluteTolerance(dual_values_[row], 0.0, + if (AreWithinAbsoluteTolerance(dual_values_[row], Fractional(0.0), kReducedCostTolerance) && (AreWithinAbsoluteTolerance(activity, lower_bound, kBoundTolerance) || AreWithinAbsoluteTolerance(activity, upper_bound, kBoundTolerance))) { @@ -739,7 +739,8 @@ bool LPSolver::IsProblemSolutionConsistent( case VariableStatus::AT_UPPER_BOUND: // TODO(user): revert to an exact comparison once the bug causing this // to fail has been fixed. - if (!AreWithinAbsoluteTolerance(value, ub, 1e-7) || lb == ub) { + if (!AreWithinAbsoluteTolerance(value, ub, Fractional(1e-7)) || + lb == ub) { LogVariableStatusError(col, value, status, lb, ub); return false; } @@ -1002,7 +1003,7 @@ double LPSolver::ComputeMaxExpectedObjectiveError(const LinearProgram& lp) { double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp, bool* is_too_large) { - double infeasibility = 0.0; + Fractional infeasibility = 0.0; const Fractional tolerance = parameters_.solution_feasibility_tolerance(); const ColIndex num_cols = lp.num_variables(); for (ColIndex col(0); col < num_cols; ++col) { @@ -1032,7 +1033,7 @@ double LPSolver::ComputePrimalValueInfeasibility(const LinearProgram& lp, double LPSolver::ComputeActivityInfeasibility(const LinearProgram& lp, bool* is_too_large) { - double infeasibility = 0.0; + Fractional infeasibility = 0.0; int num_problematic_rows(0); const RowIndex num_rows = lp.num_constraints(); const Fractional tolerance = parameters_.solution_feasibility_tolerance(); @@ -1085,7 +1086,7 @@ double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp, bool* is_too_large) { const Fractional allowed_error = parameters_.solution_feasibility_tolerance(); const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; - double infeasibility = 0.0; + Fractional infeasibility = 0.0; const RowIndex num_rows = lp.num_constraints(); for (RowIndex row(0); row < num_rows; ++row) { const Fractional dual_value = dual_values_[row]; @@ -1108,7 +1109,7 @@ double LPSolver::ComputeDualValueInfeasibility(const LinearProgram& lp, double LPSolver::ComputeReducedCostInfeasibility(const LinearProgram& lp, bool* is_too_large) { const Fractional optimization_sign = lp.IsMaximizationProblem() ? -1.0 : 1.0; - double infeasibility = 0.0; + Fractional infeasibility = 0.0; const ColIndex num_cols = lp.num_variables(); const Fractional tolerance = parameters_.solution_feasibility_tolerance(); for (ColIndex col(0); col < num_cols; ++col) { diff --git a/ortools/glop/preprocessor.cc b/ortools/glop/preprocessor.cc index 41e00bbcfa..b39a461f86 100644 --- a/ortools/glop/preprocessor.cc +++ b/ortools/glop/preprocessor.cc @@ -2430,7 +2430,8 @@ bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable( const Fractional coefficient_ratio = coefficient / matrix_entry.coeff; // Check if coefficient_ratio is integer. if (!IsIntegerWithinTolerance( - coefficient_ratio, parameters_.solution_feasibility_tolerance())) { + coefficient_ratio, + Fractional(parameters_.solution_feasibility_tolerance()))) { return false; } } @@ -2439,7 +2440,8 @@ bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable( if (IsFinite(constraint_lb)) { const Fractional lower_bound_ratio = constraint_lb / matrix_entry.coeff; if (!IsIntegerWithinTolerance( - lower_bound_ratio, parameters_.solution_feasibility_tolerance())) { + lower_bound_ratio, + Fractional(parameters_.solution_feasibility_tolerance()))) { return false; } } @@ -2448,7 +2450,8 @@ bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable( if (IsFinite(constraint_ub)) { const Fractional upper_bound_ratio = constraint_ub / matrix_entry.coeff; if (!IsIntegerWithinTolerance( - upper_bound_ratio, parameters_.solution_feasibility_tolerance())) { + upper_bound_ratio, + Fractional(parameters_.solution_feasibility_tolerance()))) { return false; } } diff --git a/ortools/glop/preprocessor.h b/ortools/glop/preprocessor.h index 14c36103c0..dc171cb91d 100644 --- a/ortools/glop/preprocessor.h +++ b/ortools/glop/preprocessor.h @@ -83,13 +83,13 @@ class Preprocessor { // tolerance). bool IsSmallerWithinFeasibilityTolerance(Fractional a, Fractional b) const { return ::operations_research::IsSmallerWithinTolerance( - a, b, parameters_.solution_feasibility_tolerance()); + a, b, Fractional(parameters_.solution_feasibility_tolerance())); } bool IsSmallerWithinPreprocessorZeroTolerance(Fractional a, Fractional b) const { // TODO(user): use an absolute tolerance here to be even more defensive? return ::operations_research::IsSmallerWithinTolerance( - a, b, parameters_.preprocessor_zero_tolerance()); + a, b, Fractional(parameters_.preprocessor_zero_tolerance())); } ProblemStatus status_; diff --git a/ortools/glop/primal_edge_norms.cc b/ortools/glop/primal_edge_norms.cc index d0eeb1683e..5a39d73899 100644 --- a/ortools/glop/primal_edge_norms.cc +++ b/ortools/glop/primal_edge_norms.cc @@ -238,11 +238,14 @@ void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col, const Fractional pivot = -direction[leaving_row]; DCHECK_NE(pivot, 0.0); + const ColIndex first_slack = + compact_matrix_.num_cols() - RowToColIndex(compact_matrix_.num_rows()); + // Note that this should be precise because of the call to // TestEnteringEdgeNormPrecision(). const Fractional entering_squared_norm = edge_squared_norms_[entering_col]; const Fractional leaving_squared_norm = - std::max(1.0, entering_squared_norm / Square(pivot)); + std::max(Fractional(1.0), entering_squared_norm / Square(pivot)); int stat_lower_bounded_norms = 0; const Fractional factor = 2.0 / pivot; @@ -253,7 +256,9 @@ void PrimalEdgeNorms::UpdateEdgeSquaredNorms(ColIndex entering_col, for (const ColIndex col : update_row.GetNonZeroPositions()) { const Fractional coeff = update_row.GetCoefficient(col); const Fractional scalar_product = - view.ColumnScalarProduct(col, direction_left_inverse); + col >= first_slack + ? direction_left_inverse_[col - first_slack] + : view.ColumnScalarProduct(col, direction_left_inverse); num_operations_ += view.ColumnNumEntries(col).value(); // Update the edge squared norm of this column. Note that the update @@ -288,7 +293,7 @@ void PrimalEdgeNorms::UpdateDevexWeights( const Fractional entering_norm = sqrt(PreciseSquaredNorm(direction)); const Fractional pivot_magnitude = std::abs(direction[leaving_row]); const Fractional leaving_norm = - std::max(1.0, entering_norm / pivot_magnitude); + std::max(Fractional(1.0), entering_norm / pivot_magnitude); for (const ColIndex col : update_row.GetNonZeroPositions()) { const Fractional coeff = update_row.GetCoefficient(col); const Fractional update_vector_norm = std::abs(coeff) * leaving_norm; diff --git a/ortools/glop/reduced_costs.cc b/ortools/glop/reduced_costs.cc index 4542c3ff03..9eb8bc098d 100644 --- a/ortools/glop/reduced_costs.cc +++ b/ortools/glop/reduced_costs.cc @@ -26,17 +26,12 @@ #include "ortools/glop/update_row.h" #include "ortools/glop/variables_info.h" #include "ortools/lp_data/lp_types.h" +#include "ortools/lp_data/lp_utils.h" #include "ortools/lp_data/scattered_vector.h" #include "ortools/lp_data/sparse.h" #include "ortools/util/bitset.h" #include "ortools/util/stats.h" -#ifdef OMP -#include -#endif - -#include "ortools/lp_data/lp_utils.h" - namespace operations_research { namespace glop { @@ -275,8 +270,8 @@ void ReducedCosts::PerturbCosts() { case VariableType::UPPER_AND_LOWER_BOUNDED: // Here we don't necessarily maintain the dual-feasibility of a dual // feasible solution, however we can always shift the variable to its - // other bound (because it is boxed) to restore dual-feasiblity. This is - // done by MakeBoxedVariableDualFeasible() at the end of the dual + // other bound (because it is boxed) to restore dual-feasibility. This + // is done by MakeBoxedVariableDualFeasible() at the end of the dual // phase-I algorithm. if (objective > 0.0) { cost_perturbations_[col] = magnitude; @@ -370,52 +365,30 @@ void ReducedCosts::ComputeReducedCosts() { } Fractional dual_residual_error(0.0); const ColIndex num_cols = matrix_.num_cols(); + const ColIndex first_slack = num_cols - RowToColIndex(matrix_.num_rows()); reduced_costs_.resize(num_cols, 0.0); const DenseBitRow& is_basic = variables_info_.GetIsBasicBitRow(); -#ifdef OMP - const int num_omp_threads = parameters_.num_omp_threads(); -#else - const int num_omp_threads = 1; -#endif - if (num_omp_threads == 1) { - for (ColIndex col(0); col < num_cols; ++col) { - reduced_costs_[col] = objective_[col] + cost_perturbations_[col] - - matrix_.ColumnScalarProduct( - col, basic_objective_left_inverse_.values); + for (ColIndex col(0); col < first_slack; ++col) { + reduced_costs_[col] = + objective_[col] + cost_perturbations_[col] - + matrix_.ColumnScalarProduct(col, basic_objective_left_inverse_.values); - // We also compute the dual residual error y.B - c_B. - if (is_basic.IsSet(col)) { - dual_residual_error = - std::max(dual_residual_error, std::abs(reduced_costs_[col])); - } - } - } else { -#ifdef OMP - // In the multi-threaded case, perform the same computation as in the - // single-threaded case above. - std::vector thread_local_dual_residual_error(num_omp_threads, - 0.0); - const int parallel_loop_size = num_cols.value(); -#pragma omp parallel for num_threads(num_omp_threads) - for (int i = 0; i < parallel_loop_size; i++) { - const ColIndex col(i); - reduced_costs_[col] = objective_[col] + objective_perturbation_[col] - - matrix_.ColumnScalarProduct( - col, basic_objective_left_inverse_.values); - - if (is_basic.IsSet(col)) { - thread_local_dual_residual_error[omp_get_thread_num()] = - std::max(thread_local_dual_residual_error[omp_get_thread_num()], - std::abs(reduced_costs_[col])); - } - } - // end of omp parallel for - for (int i = 0; i < num_omp_threads; i++) { + // We also compute the dual residual error y.B - c_B. + if (is_basic.IsSet(col)) { dual_residual_error = - std::max(dual_residual_error, thread_local_dual_residual_error[i]); + std::max(dual_residual_error, std::abs(reduced_costs_[col])); + } + } + for (ColIndex col(first_slack); col < num_cols; ++col) { + reduced_costs_[col] = objective_[col] + cost_perturbations_[col] - + basic_objective_left_inverse_[col - first_slack]; + + // We also compute the dual residual error y.B - c_B. + if (is_basic.IsSet(col)) { + dual_residual_error = + std::max(dual_residual_error, std::abs(reduced_costs_[col])); } -#endif // OMP } deterministic_time_ += diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index 92b9f9c0a0..e9b26c8029 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -478,7 +478,7 @@ ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal( double min_distance = kInfinity; const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds(); const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds(); - double cost_delta = 0.0; + Fractional cost_delta = 0.0; for (ColIndex col(0); col < num_cols_; ++col) { cost_delta += solution_primal_ray_[col] * objective_[col]; if (solution_primal_ray_[col] > 0 && upper_bounds[col] != kInfinity) { @@ -586,10 +586,12 @@ ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal( // infeasibility lower than its corresponding residual error. Note that // we already adapt the tolerance like this during the simplex // execution. - const Fractional primal_tolerance = std::max( - primal_residual, parameters_.primal_feasibility_tolerance()); + const Fractional primal_tolerance = + std::max(primal_residual, + Fractional(parameters_.primal_feasibility_tolerance())); const Fractional dual_tolerance = - std::max(dual_residual, parameters_.dual_feasibility_tolerance()); + std::max(dual_residual, + Fractional(parameters_.dual_feasibility_tolerance())); const Fractional primal_infeasibility = variable_values_.ComputeMaximumPrimalInfeasibility(); const Fractional dual_infeasibility = @@ -2747,7 +2749,7 @@ Status RevisedSimplex::Polish(TimeLimit* time_limit) { const auto get_diff = [this](ColIndex col, Fractional old_value, Fractional new_value) { if (col >= integrality_scale_.size() || integrality_scale_[col] == 0.0) { - return 0.0; + return Fractional(0.0); } const Fractional s = integrality_scale_[col]; return (std::abs(new_value * s - std::round(new_value * s)) - diff --git a/ortools/glop/update_row.cc b/ortools/glop/update_row.cc index cc10283d6e..84b5349911 100644 --- a/ortools/glop/update_row.cc +++ b/ortools/glop/update_row.cc @@ -304,6 +304,9 @@ void UpdateRow::ComputeUpdatesColumnWise() { non_zero_position_list_.resize(matrix_.num_cols().value()); auto* non_zeros = non_zero_position_list_.data(); + const ColIndex first_slack = + matrix_.num_cols() - RowToColIndex(matrix_.num_rows()); + const Fractional drop_tolerance = parameters_.drop_tolerance(); const auto output_coeffs = coefficient_.view(); const auto view = matrix_.view(); @@ -311,7 +314,9 @@ void UpdateRow::ComputeUpdatesColumnWise() { for (const ColIndex col : variables_info_.GetIsRelevantBitRow()) { // Coefficient of the column right inverse on the 'leaving_row'. const Fractional coeff = - view.ColumnScalarProduct(col, unit_row_left_inverse); + col >= first_slack + ? unit_row_left_inverse_[col - first_slack] + : view.ColumnScalarProduct(col, unit_row_left_inverse); // Nothing to do if 'coeff' is (almost) zero which does happen due to // sparsity. Note that it shouldn't be too bad to use a non-zero drop diff --git a/ortools/glop/variable_values.cc b/ortools/glop/variable_values.cc index 734ee5fd4f..c9ee205e19 100644 --- a/ortools/glop/variable_values.cc +++ b/ortools/glop/variable_values.cc @@ -170,7 +170,7 @@ Fractional VariableValues::ComputeSumOfPrimalInfeasibilities() const { for (ColIndex col(0); col < num_cols; ++col) { const Fractional infeasibility = GetColInfeasibility(col, values, lower_bounds, upper_bounds); - sum += std::max(0.0, infeasibility); + sum += std::max(Fractional{0.0}, infeasibility); } return sum; } diff --git a/ortools/lp_data/lp_parser.cc b/ortools/lp_data/lp_parser.cc index 2aa6879050..f41e28626a 100644 --- a/ortools/lp_data/lp_parser.cc +++ b/ortools/lp_data/lp_parser.cc @@ -235,6 +235,21 @@ bool LPParser::ParseConstraint(StringPiece constraint) { return true; } +namespace { + +template +bool SimpleAtoFractional(absl::string_view str, T* value) { + if constexpr (std::is_same_v) { + return absl::SimpleAtod(str, value); + } else if constexpr (std::is_same_v) { + return absl::SimpleAtof(str, value); + } else { + static_assert(false, "Unsupported fractional type"); + return false; + } +} +} // namespace + bool LPParser::SetVariableBounds(ColIndex col, Fractional lb, Fractional ub) { if (bounded_variables_.find(col) == bounded_variables_.end()) { // The variable was not bounded yet, thus reset its bounds. @@ -250,7 +265,7 @@ bool LPParser::SetVariableBounds(ColIndex col, Fractional lb, Fractional ub) { } TokenType ConsumeToken(StringPiece* sp, std::string* consumed_name, - double* consumed_coeff) { + Fractional* consumed_coeff) { DCHECK(consumed_name != nullptr); DCHECK(consumed_coeff != nullptr); // We use LazyRE2 everywhere so that all the patterns are just compiled once @@ -305,7 +320,7 @@ TokenType ConsumeToken(StringPiece* sp, std::string* consumed_name, static const LazyRE2 kValuePattern = { R"(\s*([0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?))"}; if (RE2::Consume(sp, *kValuePattern, &coeff)) { - if (!absl::SimpleAtod(coeff, consumed_coeff)) { + if (!SimpleAtoFractional(coeff, consumed_coeff)) { // Note: If absl::SimpleAtod(), Consume(), and kValuePattern are correct, // this should never happen. LOG(ERROR) << "Text: " << coeff << " was matched by RE2 to be " diff --git a/ortools/lp_data/lp_types.h b/ortools/lp_data/lp_types.h index 1e45d59301..837a0a8ec9 100644 --- a/ortools/lp_data/lp_types.h +++ b/ortools/lp_data/lp_types.h @@ -81,13 +81,13 @@ static inline double ToDouble(double f) { return f; } typedef double Fractional; // Range max for type Fractional. DBL_MAX for double for example. -constexpr double kRangeMax = std::numeric_limits::max(); +constexpr Fractional kRangeMax = std::numeric_limits::max(); // Infinity for type Fractional. -constexpr double kInfinity = std::numeric_limits::infinity(); +constexpr Fractional kInfinity = std::numeric_limits::infinity(); // Epsilon for type Fractional, i.e. the smallest e such that 1.0 + e != 1.0 . -constexpr double kEpsilon = std::numeric_limits::epsilon(); +constexpr Fractional kEpsilon = std::numeric_limits::epsilon(); // Returns true if the given value is finite, that means for a double: // not a NaN and not +/- infinity. From d055baeca4d0a2c8f11b676e0d2b46d3f094670d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 27 May 2025 13:49:29 +0200 Subject: [PATCH 036/509] [CP-SAT] work on precedences; improve scheduling cuts --- ortools/sat/2d_distances_propagator.cc | 223 +++++++++++++ ortools/sat/2d_distances_propagator.h | 67 ++++ ortools/sat/BUILD.bazel | 28 +- ortools/sat/cp_model.cc | 4 +- ortools/sat/cp_model_loader.cc | 36 +- ortools/sat/cp_model_postsolve.cc | 8 +- ortools/sat/cp_model_presolve.cc | 27 +- ortools/sat/cp_model_solver.cc | 2 +- ortools/sat/cp_model_solver.h | 4 +- ortools/sat/diffn.cc | 8 + ortools/sat/diffn_cuts.cc | 4 +- ortools/sat/disjunctive.cc | 14 +- ortools/sat/disjunctive_test.cc | 3 +- ortools/sat/integer.h | 28 ++ ortools/sat/integer_base.cc | 53 ++- ortools/sat/integer_base.h | 52 ++- ortools/sat/integer_base_test.cc | 35 ++ ortools/sat/intervals.cc | 6 +- ortools/sat/linear_programming_constraint.cc | 3 +- ortools/sat/linear_propagation.cc | 62 +++- ortools/sat/linear_propagation.h | 7 + ortools/sat/lp_utils.cc | 5 +- ortools/sat/precedences.cc | 327 ++++++++++++++----- ortools/sat/precedences.h | 184 +++++++---- ortools/sat/precedences_test.cc | 224 ++++++++++--- ortools/sat/sat_inprocessing.cc | 3 + ortools/sat/sat_parameters.proto | 20 +- ortools/sat/scheduling_cuts.cc | 202 +++++++----- ortools/sat/scheduling_cuts.h | 31 +- ortools/sat/scheduling_cuts_test.cc | 89 ++++- ortools/sat/scheduling_helpers.cc | 32 +- ortools/sat/simplification.cc | 59 +++- ortools/sat/simplification.h | 10 +- ortools/sat/synchronization.cc | 2 +- ortools/sat/synchronization.h | 4 +- ortools/sat/util.h | 193 ++++++++++- ortools/sat/util_test.cc | 103 +++++- 37 files changed, 1760 insertions(+), 402 deletions(-) create mode 100644 ortools/sat/2d_distances_propagator.cc create mode 100644 ortools/sat/2d_distances_propagator.h diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc new file mode 100644 index 0000000000..62eb603ee8 --- /dev/null +++ b/ortools/sat/2d_distances_propagator.cc @@ -0,0 +1,223 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/2d_distances_propagator.h" + +#include +#include +#include +#include + +#include "absl/container/flat_hash_map.h" +#include "absl/log/check.h" +#include "absl/log/log.h" +#include "absl/types/span.h" +#include "ortools/base/stl_util.h" +#include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/integer.h" +#include "ortools/sat/integer_base.h" +#include "ortools/sat/linear_propagation.h" +#include "ortools/sat/model.h" +#include "ortools/sat/no_overlap_2d_helper.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/sat_base.h" +#include "ortools/sat/scheduling_helpers.h" +#include "ortools/sat/synchronization.h" + +namespace operations_research { +namespace sat { + +Precedences2DPropagator::Precedences2DPropagator( + NoOverlap2DConstraintHelper* helper, Model* model) + : helper_(*helper), + binary_relations_maps_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()) { + model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); +} + +void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { + helper_.SynchronizeAndSetDirection(); + non_trivial_pairs_.clear(); + + struct VarUsage { + // boxes[0=x, 1=y][0=start, 1=end] + std::vector boxes[2][2]; + }; + absl::flat_hash_map var_to_box_and_coeffs; + + for (int dim = 0; dim < 2; ++dim) { + const SchedulingConstraintHelper& dim_helper = + dim == 0 ? helper_.x_helper() : helper_.y_helper(); + for (int i = 0; i < helper_.NumBoxes(); ++i) { + const absl::Span interval_points = + i == 0 ? dim_helper.Starts() : dim_helper.Ends(); + for (int j = 0; j < 2; ++j) { + if (interval_points[i].var != kNoIntegerVariable) { + var_to_box_and_coeffs[PositiveVariable(interval_points[i].var)] + .boxes[dim][j] + .push_back(i); + } + } + } + } + + VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " + << binary_relations_maps_->GetAllExpressionsWithAffineBounds().size(); + for (const LinearExpression2& expr : + binary_relations_maps_->GetAllExpressionsWithAffineBounds()) { + auto it1 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[0])); + auto it2 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[1])); + if (it1 == var_to_box_and_coeffs.end() || + it2 == var_to_box_and_coeffs.end()) { + continue; + } + + const VarUsage& usage1 = it1->second; + const VarUsage& usage2 = it2->second; + for (int dim = 0; dim < 2; ++dim) { + const SchedulingConstraintHelper& dim_helper = + dim == 0 ? helper_.x_helper() : helper_.y_helper(); + for (const int box1 : usage1.boxes[dim][0 /* start */]) { + for (const int box2 : usage2.boxes[dim][1 /* end */]) { + const AffineExpression& start = dim_helper.Starts()[box1]; + const AffineExpression& end = dim_helper.Ends()[box2]; + LinearExpression2 expr2; + expr2.vars[0] = start.var; + expr2.vars[1] = NegationOf(end.var); + expr2.coeffs[0] = start.coeff; + expr2.coeffs[1] = end.coeff; + expr2.SimpleCanonicalization(); + expr2.DivideByGcd(); + if (expr == expr2) { + if (box1 < box2) { + non_trivial_pairs_.push_back({box1, box2}); + } else { + non_trivial_pairs_.push_back({box2, box1}); + } + } + } + } + } + } + + gtl::STLSortAndRemoveDuplicates(&non_trivial_pairs_); +} + +bool Precedences2DPropagator::Propagate() { + if (!helper_.SynchronizeAndSetDirection()) return false; + if (last_helper_inprocessing_count_ != helper_.InProcessingCount() || + helper_.x_helper().CurrentDecisionLevel() == 0 || + last_num_expressions_ != + binary_relations_maps_->NumExpressionsWithAffineBounds()) { + last_helper_inprocessing_count_ = helper_.InProcessingCount(); + last_num_expressions_ = + binary_relations_maps_->NumExpressionsWithAffineBounds(); + CollectPairsOfBoxesWithNonTrivialDistance(); + } + + num_calls_++; + + SchedulingConstraintHelper* helpers[2] = {&helper_.x_helper(), + &helper_.y_helper()}; + + for (const auto& [box1, box2] : non_trivial_pairs_) { + DCHECK(box1 < helper_.NumBoxes()); + DCHECK(box2 < helper_.NumBoxes()); + if (!helper_.IsPresent(box1) && !helper_.IsPresent(box2)) { + continue; + } + + bool is_unfeasible = true; + for (int dim = 0; dim < 2; dim++) { + const SchedulingConstraintHelper* helper = helpers[dim]; + for (int j = 0; j < 2; j++) { + int b1 = box1; + int b2 = box2; + if (j == 1) { + std::swap(b1, b2); + } + LinearExpression2 expr; + expr.vars[0] = helper->Starts()[b1].var; + expr.vars[1] = NegationOf(helper->Ends()[b2].var); + expr.coeffs[0] = helper->Starts()[b1].coeff; + expr.coeffs[1] = helper->Ends()[b2].coeff; + const IntegerValue ub_of_start_minus_end_value = + binary_relations_maps_->UpperBound(expr) + + helper->Starts()[b1].constant - helper->Ends()[b2].constant; + if (ub_of_start_minus_end_value >= 0) { + is_unfeasible = false; + break; + } + if (!is_unfeasible) break; + } + } + if (!is_unfeasible) continue; + + // We have a mandatory overlap on both x and y! Explain and propagate. + + helper_.ClearReason(); + num_conflicts_++; + std::vector reason; + std::vector lit_reason; + + for (int dim = 0; dim < 2; dim++) { + SchedulingConstraintHelper* helper = helpers[dim]; + for (int j = 0; j < 2; j++) { + int b1 = box1; + int b2 = box2; + if (j == 1) { + std::swap(b1, b2); + } + LinearExpression2 expr; + expr.vars[0] = helper->Starts()[b1].var; + expr.vars[1] = NegationOf(helper->Ends()[b2].var); + expr.coeffs[0] = helper->Starts()[b1].coeff; + expr.coeffs[1] = helper->Ends()[b2].coeff; + binary_relations_maps_->AddReasonForUpperBoundLowerThan( + expr, + -(helper->Starts()[b1].constant - helper->Ends()[b2].constant), + &lit_reason, &reason); + } + } + helper_.AddPresenceReason(box1); + helper_.AddPresenceReason(box2); + helper_.x_helper().MutableIntegerReason()->insert( + helper_.x_helper().MutableIntegerReason()->end(), reason.begin(), + reason.end()); + helper_.x_helper().MutableLiteralReason()->insert( + helper_.x_helper().MutableLiteralReason()->end(), lit_reason.begin(), + lit_reason.end()); + return helper_.ReportConflict(); + } + return true; +} + +int Precedences2DPropagator::RegisterWith(GenericLiteralWatcher* watcher) { + const int id = watcher->Register(this); + helper_.WatchAllBoxes(id); + // TODO(user): add an API to BinaryRelationsMaps to watch linear2 + return id; +} + +Precedences2DPropagator::~Precedences2DPropagator() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"Precedences2DPropagator/called", num_calls_}); + stats.push_back({"Precedences2DPropagator/conflicts", num_conflicts_}); + stats.push_back({"Precedences2DPropagator/pairs", non_trivial_pairs_.size()}); + + shared_stats_->AddStats(stats); +} + +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/2d_distances_propagator.h b/ortools/sat/2d_distances_propagator.h new file mode 100644 index 0000000000..b05e6b1a3d --- /dev/null +++ b/ortools/sat/2d_distances_propagator.h @@ -0,0 +1,67 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_SAT_2D_DISTANCES_PROPAGATOR_H_ +#define OR_TOOLS_SAT_2D_DISTANCES_PROPAGATOR_H_ + +#include +#include +#include + +#include "ortools/sat/integer.h" +#include "ortools/sat/model.h" +#include "ortools/sat/no_overlap_2d_helper.h" +#include "ortools/sat/precedences.h" +#include "ortools/sat/synchronization.h" + +namespace operations_research { +namespace sat { + +// This class implements a propagator for non_overlap_2d constraints that uses +// the BinaryRelationsMaps to detect precedences between pairs of boxes and +// detect a conflict if the precedences implies an overlap between the two +// boxes. For doing this efficiently, it keep track of pairs of boxes that have +// non-fixed precedences in the BinaryRelationsMaps and only check those in the +// propagation. +class Precedences2DPropagator : public PropagatorInterface { + public: + Precedences2DPropagator(NoOverlap2DConstraintHelper* helper, Model* model); + + ~Precedences2DPropagator() override; + + bool Propagate() final; + int RegisterWith(GenericLiteralWatcher* watcher); + + private: + void CollectPairsOfBoxesWithNonTrivialDistance(); + + std::vector> non_trivial_pairs_; + + NoOverlap2DConstraintHelper& helper_; + BinaryRelationsMaps* binary_relations_maps_; + SharedStatistics* shared_stats_; + + int last_helper_inprocessing_count_ = -1; + int last_num_expressions_ = -1; + + int64_t num_conflicts_ = 0; + int64_t num_calls_ = 0; + + Precedences2DPropagator(const Precedences2DPropagator&) = delete; + Precedences2DPropagator& operator=(const Precedences2DPropagator&) = delete; +}; + +} // namespace sat +} // namespace operations_research + +#endif // OR_TOOLS_SAT_2D_DISTANCES_PROPAGATOR_H_ diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 06106c2ad1..56eb67a470 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -114,6 +114,29 @@ proto_library( srcs = ["cp_model.proto"], ) +cc_library( + name = "2d_distances_propagator", + srcs = ["2d_distances_propagator.cc"], + hdrs = ["2d_distances_propagator.h"], + deps = [ + ":cp_model_cc_proto", + ":integer", + ":integer_base", + ":linear_propagation", + ":model", + ":no_overlap_2d_helper", + ":precedences", + ":sat_base", + ":scheduling_helpers", + ":synchronization", + "//ortools/base:stl_util", + "@abseil-cpp//absl/container:flat_hash_map", + "@abseil-cpp//absl/log", + "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/types:span", + ], +) + cc_library( name = "2d_mandatory_overlap_propagator", srcs = ["2d_mandatory_overlap_propagator.cc"], @@ -809,6 +832,7 @@ cc_library( srcs = ["cp_model_loader.cc"], hdrs = ["cp_model_loader.h"], deps = [ + ":2d_distances_propagator", ":all_different", ":circuit", ":clause", @@ -3175,6 +3199,7 @@ cc_test( "//ortools/util:random_engine", "//ortools/util:sorted_interval_list", "@abseil-cpp//absl/container:btree", + "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random", @@ -3514,8 +3539,8 @@ cc_test( "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", - "@google_benchmark//:benchmark", "@fuzztest//fuzztest:fuzztest_gtest_main", + "@google_benchmark//:benchmark", ], ) @@ -3524,6 +3549,7 @@ cc_library( srcs = ["diffn.cc"], hdrs = ["diffn.h"], deps = [ + ":2d_distances_propagator", ":2d_mandatory_overlap_propagator", ":2d_orthogonal_packing", ":2d_try_edge_propagator", diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 098bd85db8..30a888d3d8 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -47,10 +47,10 @@ BoolVar BoolVar::WithName(absl::string_view name) { std::string BoolVar::Name() const { if (builder_ == nullptr) return "null"; - const std::string& name = + absl::string_view name = builder_->Proto().variables(PositiveRef(index_)).name(); if (RefIsPositive(index_)) { - return name; + return std::string(name); } else { return absl::StrCat("Not(", name, ")"); } diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index 6f5af4cad2..f053299657 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1271,49 +1271,29 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { rhs_max = std::min(rhs_max, max_sum.value()); if (vars.size() == 2) { - if (std::abs(coeffs[0]) == std::abs(coeffs[1])) { - const int64_t magnitude = std::abs(coeffs[0]); - IntegerVariable v1 = vars[0]; - IntegerVariable v2 = vars[1]; - if (coeffs[0] < 0) v1 = NegationOf(v1); - if (coeffs[1] > 0) v2 = NegationOf(v2); - - // magnitude * v1 <= magnitude * v2 + rhs_max. - precedences->Add(v1, v2, MathUtil::CeilOfRatio(-rhs_max, magnitude)); - - // magnitude * v1 >= magnitude * v2 + rhs_min. - precedences->Add(v2, v1, MathUtil::CeilOfRatio(rhs_min, magnitude)); - } + LinearExpression2 expr(vars[0], vars[1], coeffs[0], coeffs[1]); + precedences->AddBounds(expr, rhs_min, rhs_max); } else if (vars.size() == 3) { + // TODO(user): This is a weaker duplication of the logic of + // BinaryRelationsMaps, but is is useful for the transitive closure in + // PrecedenceRelations::Build(). Replace this by getting the + // BinaryRelationsMaps affine bounds at level zero. for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { if (i == j) continue; - if (std::abs(coeffs[i]) != std::abs(coeffs[j])) continue; const int other = 3 - i - j; // i + j + other = 0 + 1 + 2. - // Make the terms magnitude * v1 - magnitude * v2 ... - const int64_t magnitude = std::abs(coeffs[i]); - IntegerVariable v1 = vars[i]; - IntegerVariable v2 = vars[j]; - if (coeffs[i] < 0) v1 = NegationOf(v1); - if (coeffs[j] > 0) v2 = NegationOf(v2); - - // magnitude * v1 + other_lb <= magnitude * v2 + rhs_max const int64_t coeff = coeffs[other]; const int64_t other_lb = coeff > 0 ? coeff * integer_trail->LowerBound(vars[other]).value() : coeff * integer_trail->UpperBound(vars[other]).value(); - precedences->Add( - v1, v2, MathUtil::CeilOfRatio(other_lb - rhs_max, magnitude)); - - // magnitude * v1 + other_ub >= magnitude * v2 + rhs_min const int64_t other_ub = coeff > 0 ? coeff * integer_trail->UpperBound(vars[other]).value() : coeff * integer_trail->LowerBound(vars[other]).value(); - precedences->Add( - v2, v1, MathUtil::CeilOfRatio(rhs_min - other_ub, magnitude)); + LinearExpression2 expr(vars[i], vars[j], coeffs[i], coeffs[j]); + precedences->AddBounds(expr, rhs_min - other_ub, rhs_max - other_lb); } } } diff --git a/ortools/sat/cp_model_postsolve.cc b/ortools/sat/cp_model_postsolve.cc index 786e43f660..8231dc2ef1 100644 --- a/ortools/sat/cp_model_postsolve.cc +++ b/ortools/sat/cp_model_postsolve.cc @@ -49,8 +49,12 @@ void PostsolveClause(const ConstraintProto& ct, std::vector* domains) { satisfied = true; } } else { - // We still need to assign free variable. Any value should work. - (*domains)[PositiveRef(ref)] = Domain(0); + // We still need to assign free variable. + // + // It is important to set its value so that the literal in the clause is + // false, so that we support the "filter_sat_postsolve_clauses" option and + // we use a bit less memory for postsolve clauses. + (*domains)[PositiveRef(ref)] = Domain(RefIsPositive(ref) ? 0 : 1); } } if (satisfied) return; diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 76db43e093..d835856bf1 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -2564,7 +2564,7 @@ bool CpModelPresolver::RemoveSingletonInLinear(ConstraintProto* ct) { // also force the postsolve to take search decisions... if (absl::GetFlag(FLAGS_cp_model_debug_postsolve)) { auto* new_ct = context_->NewMappingConstraint(*ct, __FILE__, __LINE__); - const std::string name = new_ct->name(); + const std::string name(new_ct->name()); *new_ct = *ct; new_ct->set_name(absl::StrCat(ct->name(), " copy ", name)); } else { @@ -8221,10 +8221,15 @@ bool CpModelPresolver::PresolvePureSatPart() { // We also disable this if the user asked for tightened domain as this might // fix variable to a potentially infeasible value, and just correct them later // during postsolve of a particular solution. - SatParameters params = context_->params(); - if (params.debug_postsolve_with_full_solver() || - params.fill_tightened_domains_in_response()) { - params.set_presolve_blocked_clause(false); + SatParameters sat_params = context_->params(); + if (sat_params.debug_postsolve_with_full_solver() || + sat_params.fill_tightened_domains_in_response()) { + sat_params.set_presolve_blocked_clause(false); + } + + // This option is only supported by the custom postsolve code. + if (!sat_params.debug_postsolve_with_full_solver()) { + sat_params.set_filter_sat_postsolve_clauses(true); } SatPostsolver sat_postsolver(num_variables); @@ -8280,7 +8285,7 @@ bool CpModelPresolver::PresolvePureSatPart() { // TODO(user): BVA takes time and does not seems to help on the minizinc // benchmarks. So we currently disable it, except if we are on a pure-SAT // problem, where we follow the default (true) or the user specified value. - params.set_presolve_use_bva(false); + sat_params.set_presolve_use_bva(false); } // Disable BVA if we want to keep the symmetries. @@ -8289,7 +8294,7 @@ bool CpModelPresolver::PresolvePureSatPart() { // and also update the generators to take into account the new variables. This // do not seems that easy. if (context_->params().keep_symmetry_in_presolve()) { - params.set_presolve_use_bva(false); + sat_params.set_presolve_use_bva(false); } // Update the time limit of the initial propagation. @@ -8304,7 +8309,7 @@ bool CpModelPresolver::PresolvePureSatPart() { sat_presolver.SetEquivalentLiteralMapping(equiv_map); } sat_presolver.SetTimeLimit(time_limit_); - sat_presolver.SetParameters(params); + sat_presolver.SetParameters(sat_params); // Load in the presolver. // Register the fixed variables with the postsolver. @@ -8354,12 +8359,6 @@ bool CpModelPresolver::PresolvePureSatPart() { for (int i = 0; i < num_variables; ++i) { const int var = new_to_old_index[i]; if (context_->VarToConstraints(var).empty()) { - // Such variable needs to be fixed to some value for the SAT postsolve to - // work. - if (!context_->IsFixed(var)) { - CHECK(context_->IntersectDomainWith( - var, Domain(context_->DomainOf(var).SmallestValue()))); - } context_->MarkVariableAsRemoved(var); } } diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 2928764a79..57a9fc7bcd 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -2282,7 +2282,7 @@ void ParseFromStringOrDie(absl::string_view str, T* proto) { // TODO(user): Support it on android. std::function NewSatParameters( - const std::string& params) { + absl::string_view params) { sat::SatParameters parameters; if (!params.empty()) { ParseFromStringOrDie(params, ¶meters); diff --git a/ortools/sat/cp_model_solver.h b/ortools/sat/cp_model_solver.h index 2afcac1ec6..79d3f7195b 100644 --- a/ortools/sat/cp_model_solver.h +++ b/ortools/sat/cp_model_solver.h @@ -18,6 +18,7 @@ #include #include "absl/flags/declare.h" +#include "absl/strings/string_view.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" @@ -123,8 +124,7 @@ std::function NewBestBoundCallback( \endcode * before calling \c SolveCpModel(). */ -std::function NewSatParameters( - const std::string& params); +std::function NewSatParameters(absl::string_view params); std::function NewSatParameters( const SatParameters& parameters); diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 23e85a04ff..ab5b398d19 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -33,6 +33,7 @@ #include "absl/numeric/bits.h" #include "absl/types/span.h" // #include "ortools/base/stl_util.h" +#include "ortools/sat/2d_distances_propagator.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" #include "ortools/sat/2d_try_edge_propagator.h" @@ -187,6 +188,13 @@ void AddNonOverlappingRectangles(const std::vector& x, model->GetOrCreate(); CreateAndRegisterMandatoryOverlapPropagator(helper_2d, model, watcher, 3); + if (model->GetOrCreate() + ->use_linear3_for_no_overlap_2d_precedences()) { + Precedences2DPropagator* propagator = + new Precedences2DPropagator(helper_2d, model); + watcher->SetPropagatorPriority(propagator->RegisterWith(watcher), 4); + model->TakeOwnership(propagator); + } NonOverlappingRectanglesDisjunctivePropagator* constraint = new NonOverlappingRectanglesDisjunctivePropagator(helper_2d, model); diff --git a/ortools/sat/diffn_cuts.cc b/ortools/sat/diffn_cuts.cc index 3c514bd78b..91485e84d9 100644 --- a/ortools/sat/diffn_cuts.cc +++ b/ortools/sat/diffn_cuts.cc @@ -668,11 +668,11 @@ CutGenerator CreateNoOverlap2dCompletionTimeCutGenerator( if (!helper->SynchronizeAndSetDirection(false, false, false)) { return false; } - generate_cuts("NoOverlap2dXCompletionTime_mirror"); + generate_cuts("NoOverlap2dXCompletionTime"); if (!helper->SynchronizeAndSetDirection(false, false, true)) { return false; } - generate_cuts("NoOverlap2dYCompletionTime_mirror"); + generate_cuts("NoOverlap2dYCompletionTime"); } return true; }; diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 256673d15c..0faa84e760 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -1259,8 +1259,8 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // the offset as much as possible. Note that the alternative of storing it // in PrecedenceData is not necessarily better and harder to update as we // dive/backtrack. - const IntegerValue inner_offset = - precedence_relations_->GetConditionalOffset(end_exp.var, var); + const IntegerValue inner_offset = -precedence_relations_->UpperBound( + LinearExpression2::Difference(end_exp.var, var)); DCHECK_NE(inner_offset, kMinIntegerValue); // We have var >= end_exp.var + inner_offset, so @@ -1312,11 +1312,11 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // Fetch the explanation. // This is okay if a bit slow since we only do that when we push. const AffineExpression& end_exp = helper_->Ends()[ct]; - for (const Literal l : - precedence_relations_->GetConditionalEnforcements(end_exp.var, - var)) { - helper_->MutableLiteralReason()->push_back(l.Negated()); - } + const LinearExpression2 expr = + LinearExpression2::Difference(end_exp.var, var); + precedence_relations_->AddReasonForUpperBoundLowerThan( + expr, precedence_relations_->UpperBound(expr), + helper_->MutableLiteralReason(), helper_->MutableIntegerReason()); } ++stats_.num_propagations; if (!helper_->PushIntegerLiteral( diff --git a/ortools/sat/disjunctive_test.cc b/ortools/sat/disjunctive_test.cc index cf8b2f30cb..85c1db3204 100644 --- a/ortools/sat/disjunctive_test.cc +++ b/ortools/sat/disjunctive_test.cc @@ -249,7 +249,8 @@ TEST(DisjunctiveConstraintTest, Precedences) { CHECK_EQ(e2.coeff, 1); precedences->AddPrecedenceWithOffset(e1.var, e2.var, e1.constant - e2.constant); - relations->Add(e1.var, e2.var, e1.constant - e2.constant); + relations->AddUpperBound(LinearExpression2::Difference(e1.var, e2.var), + e2.constant - e1.constant); }; const int kStart(0); diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index ecfafd04c6..1c8cbf1438 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -531,6 +531,10 @@ class IntegerTrail final : public SatPropagator { IntegerValue LevelZeroLowerBound(AffineExpression exp) const; IntegerValue LevelZeroUpperBound(AffineExpression exp) const; + // Returns globally valid lower/upper bound on the given linear expression. + IntegerValue LevelZeroLowerBound(LinearExpression2 expr) const; + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const; + // Returns true if the variable is fixed at level 0. bool IsFixedAtLevelZero(IntegerVariable var) const; @@ -1432,6 +1436,30 @@ inline IntegerValue IntegerTrail::LevelZeroUpperBound( return expr.ValueAt(LevelZeroUpperBound(expr.var)); } +inline IntegerValue IntegerTrail::LevelZeroLowerBound( + LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + IntegerValue result = 0; + for (int i = 0; i < 2; ++i) { + if (expr.coeffs[i] != 0) { + result += expr.coeffs[i] * LevelZeroLowerBound(expr.vars[i]); + } + } + return result; +} + +inline IntegerValue IntegerTrail::LevelZeroUpperBound( + LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + IntegerValue result = 0; + for (int i = 0; i < 2; ++i) { + if (expr.coeffs[i] != 0) { + result += expr.coeffs[i] * LevelZeroUpperBound(expr.vars[i]); + } + } + return result; +} + inline bool IntegerTrail::IsFixedAtLevelZero(AffineExpression expr) const { if (expr.var == kNoIntegerVariable) return true; return IsFixedAtLevelZero(expr.var); diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index 4f3f7e70e7..8af3a695ca 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -56,6 +56,29 @@ void LinearExpression2::SimpleCanonicalization() { } } +IntegerValue LinearExpression2::DivideByGcd() { + const uint64_t gcd = std::gcd(coeffs[0].value(), coeffs[1].value()); + if (gcd > 1) { + coeffs[0] /= gcd; + coeffs[1] /= gcd; + return IntegerValue(gcd); + } + return IntegerValue(1); +} + +bool LinearExpression2::NegateForCanonicalization() { + bool negate = false; + if (coeffs[0] == 0) { + if (coeffs[1] != 0) { + negate = !VariableIsPositive(vars[1]); + } + } else { + negate = !VariableIsPositive(vars[0]); + } + if (negate) Negate(); + return negate; +} + void LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, bool allow_negation) { @@ -63,17 +86,8 @@ void LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, if (coeffs[0] == 0 || coeffs[1] == 0) return; // abort. if (allow_negation) { - bool negate = false; - if (coeffs[0] == 0) { - if (coeffs[1] != 0) { - negate = !VariableIsPositive(vars[1]); - } - } else { - negate = !VariableIsPositive(vars[0]); - } - if (negate) { - Negate(); - + const bool negated = NegateForCanonicalization(); + if (negated) { // We need to be able to negate without overflow. CHECK_GE(lb, kMinIntegerValue); CHECK_LE(ub, kMaxIntegerValue); @@ -134,4 +148,21 @@ RelationStatus BestBinaryRelationBounds::GetStatus(LinearExpression2 expr, return RelationStatus::IS_UNKNOWN; } +IntegerValue BestBinaryRelationBounds::GetUpperBound( + LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + const bool negated = expr.NegateForCanonicalization(); + const auto it = best_bounds_.find(expr); + if (it != best_bounds_.end()) { + const auto [known_lb, known_ub] = it->second; + if (negated) { + return CapProdI(gcd, -known_lb); + } else { + return CapProdI(gcd, known_ub); + } + } + return kMaxIntegerValue; +} + } // namespace operations_research::sat diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index 3ae647f744..a86d15eb07 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -346,6 +346,20 @@ H AbslHashValue(H h, const AffineExpression& e) { // A linear expression with at most two variables (coeffs can be zero). // And some utility to canonicalize them. struct LinearExpression2 { + LinearExpression2() = default; + LinearExpression2(IntegerVariable v1, IntegerVariable v2, IntegerValue c1, + IntegerValue c2) { + vars[0] = v1; + vars[1] = v2; + coeffs[0] = c1; + coeffs[1] = c2; + } + + // Build (v1 - v2) + static LinearExpression2 Difference(IntegerVariable v1, IntegerVariable v2) { + return LinearExpression2(v1, v2, 1, -1); + } + // Take the negation of this expression. void Negate() { vars[0] = NegationOf(vars[0]); @@ -355,17 +369,48 @@ struct LinearExpression2 { // This will not change any bounds on the LinearExpression2. // That is we will not potentially Negate() the expression like // CanonicalizeAndUpdateBounds() might do. + // Note that since kNoIntegerVariable=-1 and we sort the variables, if we any + // one zero and one non-zero we will always have the zero first. void SimpleCanonicalization(); - // This fully canonicalize this, and update the given bounds accordingly. + // Fully canonicalizes the expression and updates the given bounds + // accordingly. This is the same as SimpleCanonicalization(), DivideByGcd() + // and the NegateForCanonicalization() with a proper updates of the bounds. void CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, bool allow_negation = false); + // Divides the expression by the gcd of both coefficients, and returns it. + // Note that we always return something >= 1 even if both coefficients are + // zero. + IntegerValue DivideByGcd(); + + // Makes sure expr and -expr have the same canonical representation by + // negating the expression of it is in the non-canonical form. Returns true if + // the expression was negated. + bool NegateForCanonicalization(); + + absl::Span non_zero_vars() const { + const int first = coeffs[0] == 0 ? 1 : 0; + const int last = coeffs[1] == 0 ? 0 : 1; + return absl::MakeSpan(&vars[first], last - first + 1); + } + + absl::Span non_zero_coeffs() const { + const int first = coeffs[0] == 0 ? 1 : 0; + const int last = coeffs[1] == 0 ? 0 : 1; + return absl::MakeSpan(&coeffs[first], last - first + 1); + } + bool operator==(const LinearExpression2& o) const { return vars[0] == o.vars[0] && vars[1] == o.vars[1] && coeffs[0] == o.coeffs[0] && coeffs[1] == o.coeffs[1]; } + bool operator<(const LinearExpression2& o) const { + return std::tie(vars[0], vars[1], coeffs[0], coeffs[1]) < + std::tie(o.vars[0], o.vars[1], o.coeffs[0], o.coeffs[1]); + } + IntegerValue coeffs[2]; IntegerVariable vars[2]; }; @@ -400,6 +445,11 @@ class BestBinaryRelationBounds { RelationStatus GetStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const; + // Return a valid upper-bound on the given LinearExpression2. Note that we + // assume kMaxIntegerValue is always valid and returns it if we don't have an + // entry in the hash-map. + IntegerValue GetUpperBound(LinearExpression2 expr) const; + private: // The best bound on the given "canonicalized" expression. absl::flat_hash_map> diff --git a/ortools/sat/integer_base_test.cc b/ortools/sat/integer_base_test.cc index 15f4b987fb..e3b069cd02 100644 --- a/ortools/sat/integer_base_test.cc +++ b/ortools/sat/integer_base_test.cc @@ -37,6 +37,21 @@ TEST(CanonicalizeAffinePrecedenceTest, Basic) { EXPECT_EQ(ub, 5); } +TEST(CanonicalizeAffinePrecedenceTest, OneSingleVariable) { + LinearExpression2 expr; + expr.vars[0] = IntegerVariable(0); + expr.vars[1] = IntegerVariable(0); + expr.coeffs[0] = IntegerValue(2); + expr.coeffs[1] = IntegerValue(2); + + expr.SimpleCanonicalization(); + + EXPECT_EQ(expr.vars[0], kNoIntegerVariable); + EXPECT_EQ(expr.vars[1], IntegerVariable(0)); + EXPECT_EQ(expr.coeffs[0], IntegerValue(0)); + EXPECT_EQ(expr.coeffs[1], IntegerValue(4)); +} + TEST(BestBinaryRelationBoundsTest, Basic) { LinearExpression2 expr; expr.vars[0] = IntegerVariable(0); @@ -63,5 +78,25 @@ TEST(BestBinaryRelationBoundsTest, Basic) { best_bounds.GetStatus(expr, IntegerValue(-5), IntegerValue(3))); } +TEST(BestBinaryRelationBoundsTest, UpperBound) { + LinearExpression2 expr; + expr.vars[0] = IntegerVariable(0); + expr.vars[1] = IntegerVariable(2); + expr.coeffs[0] = IntegerValue(1); + expr.coeffs[1] = IntegerValue(-1); + + BestBinaryRelationBounds best_bounds; + EXPECT_TRUE(best_bounds.Add(expr, IntegerValue(0), IntegerValue(5))); + + EXPECT_EQ(best_bounds.GetUpperBound(expr), IntegerValue(5)); + + expr.coeffs[0] *= 3; + expr.coeffs[1] *= 3; + EXPECT_EQ(best_bounds.GetUpperBound(expr), IntegerValue(15)); + + expr.Negate(); + EXPECT_EQ(best_bounds.GetUpperBound(expr), IntegerValue(0)); +} + } // namespace } // namespace operations_research::sat diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index bc5ad0bde2..c50429e71f 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -155,9 +155,9 @@ IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( } // Abort if the relation is already known. - if (relations_maps_->GetPrecedenceStatus(a.end, b.start) == + if (relations_maps_->GetLevelZeroPrecedenceStatus(a.end, b.start) == RelationStatus::IS_TRUE || - relations_maps_->GetPrecedenceStatus(b.end, a.start) == + relations_maps_->GetLevelZeroPrecedenceStatus(b.end, a.start) == RelationStatus::IS_TRUE) { return kNoLiteralIndex; } @@ -217,7 +217,7 @@ bool IntervalsRepository::CreatePrecedenceLiteralIfNonTrivial( // We want l => x <= y and not(l) => x > y <=> y + 1 <= x // Do not create l if the relation is always true or false. - if (relations_maps_->GetPrecedenceStatus(x, y) != + if (relations_maps_->GetLevelZeroPrecedenceStatus(x, y) != RelationStatus::IS_UNKNOWN) { return false; } diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index fde18d9794..fa2b681949 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -1851,7 +1851,8 @@ void LinearProgrammingConstraint::AddMirCuts() { // But there is some degenerate problem where these rows have a really low // weight (or even zero), and having only weight of exactly zero in // std::discrete_distribution will result in a crash. - row_weights[row] = std::max(1e-8, std::abs(simplex_.GetDualValue(row))); + row_weights[row] = + std::max(Fractional(1e-8), std::abs(simplex_.GetDualValue(row))); } // The code here can be really slow, so we put a limit on the number of diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 7632da7836..31fb792911 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -385,6 +385,7 @@ LinearPropagator::LinearPropagator(Model* model) rev_integer_value_repository_( model->GetOrCreate()), precedences_(model->GetOrCreate()), + binary_relations_(model->GetOrCreate()), random_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), watcher_id_(watcher_->Register(this)), @@ -534,6 +535,7 @@ bool LinearPropagator::Propagate() { // - Z + Y >= 6 ==> Z >= 1 // - (1) again to push T <= 10 and reach the propagation fixed point. Bitset64::View in_queue = in_queue_.view(); + const bool push_affine_ub = push_affine_ub_for_binary_relations_; while (true) { // We always process the whole queue in FIFO order. // Note that the order really only matter for infeasible constraint so it @@ -571,6 +573,58 @@ bool LinearPropagator::Propagate() { order_.Register(id, NegationOf(var), -new_ub); } } + + // Look at linear3 and update our "linear2 affine upper bound". If we are + // here it means the constraint was in the queue, and its slack changed, + // so it might lead to stronger affine ub. + // + // TODO(user): This can be costly for no reason if we keep updating the + // bound for variable appearing in a single linear3. On another hand it is + // O(1) compared to what this class already do. Profile will tell if it is + // worth it. Maybe we can only share LinearExpression2 that we might look + // up. + // + // TODO(user): This only look at non-enforced linear3. We could look at + // constraint whose enforcement or other variables are fixed at level + // zero, but it is trickier. It could be done if we add a "batch clean up" + // to this class that runs at level zero, and reduce constraints + // accordingly. + const ConstraintInfo& info = infos_[id]; + if (push_affine_ub && info.initial_size == 3 && info.enf_id == -1) { + // A constraint A + B + C <= rhs can lead to up to 3 relations... + const auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); + + // We don't "push" relation A + B <= ub if A or B is fixed, because + // the variable bound of the non-fixed A or B should just be as-strong + // as what can be inferred from the binary relation. + if (info.rev_size == 2) { + LinearExpression2 expr; + expr.vars[0] = vars[0]; + expr.vars[1] = vars[1]; + expr.coeffs[0] = coeffs[0]; + expr.coeffs[1] = coeffs[1]; + + // The fixed variable is always at index 2. + // The rev_rhs was updated to: initial_rhs - lb(vars[2]) * coeffs[2]. + const IntegerValue initial_rhs = + info.rev_rhs + coeffs[2] * integer_trail_->LowerBound(vars[2]); + binary_relations_->AddAffineUpperBound( + expr, AffineExpression(vars[2], -coeffs[2], initial_rhs)); + } else if (info.rev_size == 3) { + for (int i = 0; i < 3; ++i) { + LinearExpression2 expr; + const int a = (i + 1) % 3; + const int b = (i + 2) % 3; + expr.vars[0] = vars[a]; + expr.vars[1] = vars[b]; + expr.coeffs[0] = coeffs[a]; + expr.coeffs[1] = coeffs[b]; + binary_relations_->AddAffineUpperBound( + expr, AffineExpression(vars[i], -coeffs[i], info.rev_rhs)); + } + } + } } const int next_id = order_.NextId(); @@ -645,7 +699,7 @@ bool LinearPropagator::AddConstraint( } // Initialize watchers. - // Initialy we want everything to be propagated at least once. + // Initially we want everything to be propagated at least once. in_queue_.resize(in_queue_.size() + 1); if (!enforcement_literals.empty()) { @@ -670,11 +724,13 @@ bool LinearPropagator::AddConstraint( // variables. if (status == EnforcementStatus::IS_ENFORCED) { const auto info = infos_[id]; - if (info.initial_size == 2 && info.all_coeffs_are_one) { + if (info.initial_size == 2) { const auto vars = GetVariables(info); + const auto coeffs = GetCoeffs(info); precedences_->PushConditionalRelation( enforcement_propagator_->GetEnforcementLiterals(enf_id), - vars[0], vars[1], initial_rhs_[id]); + LinearExpression2(vars[0], vars[1], coeffs[0], coeffs[1]), + initial_rhs_[id]); } } }); diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index fd7d375e2d..561ecdf268 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -324,6 +324,10 @@ class LinearPropagator : public PropagatorInterface, std::vector* literals_reason, std::vector* trail_indices_reason) final; + void SetPushAffineUbForBinaryRelation() { + push_affine_ub_for_binary_relations_ = true; + } + private: // We try to pack the struct as much as possible. Using a maximum size of // 1 << 29 should be okay since we split long constraint anyway. Technically @@ -389,10 +393,13 @@ class LinearPropagator : public PropagatorInterface, RevIntRepository* rev_int_repository_; RevIntegerValueRepository* rev_integer_value_repository_; PrecedenceRelations* precedences_; + BinaryRelationsMaps* binary_relations_; ModelRandomGenerator* random_; SharedStatistics* shared_stats_ = nullptr; const int watcher_id_; + bool push_affine_ub_for_binary_relations_ = false; + // To know when we backtracked. See SetLevel(). int previous_level_ = 0; diff --git a/ortools/sat/lp_utils.cc b/ortools/sat/lp_utils.cc index a4c84c023d..caf9478faa 100644 --- a/ortools/sat/lp_utils.cc +++ b/ortools/sat/lp_utils.cc @@ -23,6 +23,7 @@ #include "absl/log/check.h" #include "absl/strings/str_cat.h" +#include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" @@ -1052,7 +1053,7 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters& params, } case MPGeneralConstraintProto::kAndConstraint: { const auto& and_constraint = general_constraint.and_constraint(); - const std::string& name = general_constraint.name(); + absl::string_view name = general_constraint.name(); ConstraintProto* ct_pos = cp_model->add_constraints(); ct_pos->set_name(name.empty() ? "" : absl::StrCat(name, "_pos")); @@ -1071,7 +1072,7 @@ bool ConvertMPModelProtoToCpModelProto(const SatParameters& params, } case MPGeneralConstraintProto::kOrConstraint: { const auto& or_constraint = general_constraint.or_constraint(); - const std::string& name = general_constraint.name(); + absl::string_view name = general_constraint.name(); ConstraintProto* ct_pos = cp_model->add_constraints(); ct_pos->set_name(name.empty() ? "" : absl::StrCat(name, "_pos")); diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 0b6c0e0ed6..562795dd3c 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -53,38 +53,60 @@ namespace operations_research { namespace sat { -bool PrecedenceRelations::Add(IntegerVariable tail, IntegerVariable head, - IntegerValue offset) { - // Ignore trivial relation: tail + offset <= head. - if (integer_trail_->LevelZeroUpperBound(tail) + offset <= - integer_trail_->LevelZeroLowerBound(head)) { +bool PrecedenceRelations::AddBounds(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub) { + expr.CanonicalizeAndUpdateBounds(lb, ub); + + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + // This class handles only binary relationships, let something else handle + // the case where there is actually a single variable. return false; } - // TODO(user): Return infeasible if tail == head and offset > 0. - // TODO(user): if tail = Negation(head) also update Domain. - if (tail == head) return false; - // Add to root_relations_. // // TODO(user): AddInternal() only returns true if this is the first relation // between head and tail. But we can still avoid an extra lookup. - if (offset <= GetOffset(tail, head)) return false; - AddInternal(tail, head, offset); + const bool add_ub = ub < LevelZeroUpperBound(expr); + LinearExpression2 expr_for_lb = expr; + expr_for_lb.Negate(); + const bool add_lb = lb > -LevelZeroUpperBound(expr_for_lb); + if (!add_ub && !add_lb) { + return false; + } + + if (add_ub) { + AddInternal(expr, ub); + } + if (add_lb) { + AddInternal(expr_for_lb, -lb); + } // If we are not built, make sure there is enough room in the graph. // TODO(user): Alternatively, force caller to do a Resize(). const int max_node = - std::max(PositiveVariable(tail), PositiveVariable(head)).value() + 1; + std::max(PositiveVariable(expr.vars[0]), PositiveVariable(expr.vars[1])) + .value() + + 1; if (!is_built_ && max_node >= graph_.num_nodes()) { graph_.AddNode(max_node); } return true; } +bool PrecedenceRelations::AddUpperBound(LinearExpression2 expr, + IntegerValue ub) { + return AddBounds(expr, kMinIntegerValue, ub); +} + void PrecedenceRelations::PushConditionalRelation( - absl::Span enforcements, IntegerVariable a, - IntegerVariable b, IntegerValue rhs) { + absl::Span enforcements, LinearExpression2 expr, + IntegerValue rhs) { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + return; + } + // This must be currently true. if (DEBUG_MODE) { for (const Literal l : enforcements) { @@ -93,14 +115,16 @@ void PrecedenceRelations::PushConditionalRelation( } if (enforcements.empty() || trail_->CurrentDecisionLevel() == 0) { - Add(a, NegationOf(b), -rhs); + AddUpperBound(expr, rhs); return; } + const IntegerValue gcd = expr.DivideByGcd(); + rhs = FloorRatio(rhs, gcd); + // Ignore if no better than best_relations, otherwise increase it. - const auto key = GetKey(a, b); { - const auto [it, inserted] = best_relations_.insert({key, rhs}); + const auto [it, inserted] = best_relations_.insert({expr, rhs}); if (!inserted) { if (rhs >= it->second) return; // Ignore. it->second = rhs; @@ -108,17 +132,20 @@ void PrecedenceRelations::PushConditionalRelation( } const int new_index = conditional_stack_.size(); - const auto [it, inserted] = conditional_relations_.insert({key, new_index}); + const auto [it, inserted] = conditional_relations_.insert({expr, new_index}); if (inserted) { CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, key, enforcements); + conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, expr, enforcements); - const int new_size = std::max(a.value(), b.value()) + 1; - if (new_size > conditional_after_.size()) { - conditional_after_.resize(new_size); + if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { + const int new_size = + std::max(expr.vars[0].value(), expr.vars[1].value()) + 1; + if (new_size > conditional_after_.size()) { + conditional_after_.resize(new_size); + } + conditional_after_[expr.vars[0]].push_back(NegationOf(expr.vars[1])); + conditional_after_[expr.vars[1]].push_back(NegationOf(expr.vars[0])); } - conditional_after_[a].push_back(NegationOf(b)); - conditional_after_[b].push_back(NegationOf(a)); } else { // We should only decrease because we ignored entry worse than the one in // best_relations_. @@ -128,7 +155,7 @@ void PrecedenceRelations::PushConditionalRelation( // Update. it->second = new_index; CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(prev_entry, rhs, key, enforcements); + conditional_stack_.emplace_back(prev_entry, rhs, expr, enforcements); } } @@ -155,12 +182,14 @@ void PrecedenceRelations::SetLevel(int level) { UpdateBestRelation(back.key, kMaxIntegerValue); conditional_relations_.erase(back.key); - DCHECK_EQ(conditional_after_[back.key.first].back(), - NegationOf(back.key.second)); - DCHECK_EQ(conditional_after_[back.key.second].back(), - NegationOf(back.key.first)); - conditional_after_[back.key.first].pop_back(); - conditional_after_[back.key.second].pop_back(); + if (back.key.coeffs[0] == 1 && back.key.coeffs[1] == 1) { + DCHECK_EQ(conditional_after_[back.key.vars[0]].back(), + NegationOf(back.key.vars[1])); + DCHECK_EQ(conditional_after_[back.key.vars[1]].back(), + NegationOf(back.key.vars[0])); + conditional_after_[back.key.vars[0]].pop_back(); + conditional_after_[back.key.vars[1]].pop_back(); + } } conditional_stack_.pop_back(); } @@ -168,19 +197,26 @@ void PrecedenceRelations::SetLevel(int level) { } } -IntegerValue PrecedenceRelations::GetOffset(IntegerVariable a, - IntegerVariable b) const { - const auto it = root_relations_.find(GetKey(a, NegationOf(b))); +IntegerValue PrecedenceRelations::LevelZeroUpperBound( + LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + const auto it = root_relations_.find(expr); if (it != root_relations_.end()) { - return -it->second; + return CapProdI(it->second, gcd); } - return kMinIntegerValue; + return kMaxIntegerValue; } -absl::Span PrecedenceRelations::GetConditionalEnforcements( - IntegerVariable a, IntegerVariable b) const { - const auto it = conditional_relations_.find(GetKey(a, NegationOf(b))); - if (it == conditional_relations_.end()) return {}; +void PrecedenceRelations::AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* /*unused*/) const { + expr.SimpleCanonicalization(); + if (ub >= LevelZeroUpperBound(expr)) return; + const IntegerValue gcd = expr.DivideByGcd(); + const auto it = conditional_relations_.find(expr); + DCHECK(it != conditional_relations_.end()); const ConditionalEntry& entry = conditional_stack_[it->second]; if (DEBUG_MODE) { @@ -188,23 +224,23 @@ absl::Span PrecedenceRelations::GetConditionalEnforcements( CHECK(trail_->Assignment().LiteralIsTrue(l)); } } - const IntegerValue root_level_offset = GetOffset(a, b); - const IntegerValue conditional_offset = -entry.rhs; - if (conditional_offset <= root_level_offset) return {}; - - DCHECK_EQ(entry.rhs, -GetConditionalOffset(a, b)); - return entry.enforcements; + DCHECK_EQ(CapProdI(gcd, entry.rhs), UpperBound(expr)); + DCHECK_LE(CapProdI(gcd, entry.rhs), ub); + for (const Literal l : entry.enforcements) { + literal_reason->push_back(l.Negated()); + } } -IntegerValue PrecedenceRelations::GetConditionalOffset( - IntegerVariable a, IntegerVariable b) const { - const auto it = best_relations_.find(GetKey(a, NegationOf(b))); +IntegerValue PrecedenceRelations::UpperBound(LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + const auto it = best_relations_.find(expr); if (it != best_relations_.end()) { - return -it->second; + return CapProdI(gcd, it->second); } - DCHECK(!root_relations_.contains(GetKey(a, NegationOf(b)))); - DCHECK(!conditional_relations_.contains(GetKey(a, NegationOf(b)))); - return kMinIntegerValue; + DCHECK(!root_relations_.contains(expr)); + DCHECK(!conditional_relations_.contains(expr)); + return kMaxIntegerValue; } void PrecedenceRelations::Build() { @@ -219,9 +255,8 @@ void PrecedenceRelations::Build() { // And use this to compute the "closure". CHECK(arc_offsets_.empty()); graph_.ReserveArcs(2 * root_relations_.size()); - std::vector< - std::pair, IntegerValue>> - root_relations_sorted(root_relations_.begin(), root_relations_.end()); + std::vector> root_relations_sorted( + root_relations_.begin(), root_relations_.end()); std::sort(root_relations_sorted.begin(), root_relations_sorted.end()); for (const auto [var_pair, negated_offset] : root_relations_sorted) { // TODO(user): Support negative offset? @@ -232,21 +267,26 @@ void PrecedenceRelations::Build() { const IntegerValue offset = -negated_offset; if (offset < 0) continue; + if (var_pair.coeffs[0] != 1 || var_pair.coeffs[1] != 1) { + // TODO(user): Support non-1 coefficients. + continue; + } + // We have two arcs. { - const IntegerVariable tail = var_pair.first; - const IntegerVariable head = NegationOf(var_pair.second); + const IntegerVariable tail = var_pair.vars[0]; + const IntegerVariable head = NegationOf(var_pair.vars[1]); graph_.AddArc(tail.value(), head.value()); arc_offsets_.push_back(offset); - CHECK_LT(var_pair.second, before.size()); + CHECK_LT(var_pair.vars[1], before.size()); before[head].push_back(tail); } { - const IntegerVariable tail = var_pair.second; - const IntegerVariable head = NegationOf(var_pair.first); + const IntegerVariable tail = var_pair.vars[1]; + const IntegerVariable head = NegationOf(var_pair.vars[0]); graph_.AddArc(tail.value(), head.value()); arc_offsets_.push_back(offset); - CHECK_LT(var_pair.second, before.size()); + CHECK_LT(var_pair.vars[1], before.size()); before[head].push_back(tail); } } @@ -294,16 +334,19 @@ void PrecedenceRelations::Build() { const IntegerValue arc_offset = arc_offsets_[arc]; if (++work > kWorkLimit) break; - if (AddInternal(tail_var, head_var, arc_offset)) { + if (AddInternal(LinearExpression2::Difference(tail_var, head_var), + -arc_offset)) { before[head_var].push_back(tail_var); } for (const IntegerVariable before_var : before[tail_var]) { if (++work > kWorkLimit) break; + LinearExpression2 expr_for_key(before_var, tail_var, 1, -1); + expr_for_key.SimpleCanonicalization(); const IntegerValue offset = - -root_relations_.at(GetKey(before_var, NegationOf(tail_var))) + - arc_offset; - if (AddInternal(before_var, head_var, offset)) { + -root_relations_.at(expr_for_key) + arc_offset; + if (AddInternal(LinearExpression2::Difference(before_var, head_var), + -offset)) { before[head_var].push_back(before_var); } } @@ -582,8 +625,9 @@ void PrecedencesPropagator::PushConditionalRelations(const ArcInfo& arc) { // add this to the reason though. if (arc.offset_var != kNoIntegerVariable) return; const IntegerValue offset = ArcOffset(arc); - relations_->PushConditionalRelation(arc.presence_literals, arc.tail_var, - NegationOf(arc.head_var), -offset); + relations_->PushConditionalRelation( + arc.presence_literals, + LinearExpression2::Difference(arc.tail_var, arc.head_var), -offset); } void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) { @@ -1490,6 +1534,7 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( BinaryRelationsMaps::BinaryRelationsMaps(Model* model) : integer_trail_(model->GetOrCreate()), integer_encoder_(model->GetOrCreate()), + watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()) { int index = 0; model->GetOrCreate()->callbacks.push_back( @@ -1524,9 +1569,24 @@ BinaryRelationsMaps::~BinaryRelationsMaps() { if (!VLOG_IS_ON(1)) return; std::vector> stats; stats.push_back({"BinaryRelationsMaps/num_relations", num_updates_}); + stats.push_back( + {"BinaryRelationsMaps/num_affine_updates", num_affine_updates_}); shared_stats_->AddStats(stats); } +IntegerValue BinaryRelationsMaps::GetImpliedUpperBound( + const LinearExpression2& expr) const { + DCHECK_GE(expr.coeffs[0], 0); + DCHECK_GE(expr.coeffs[1], 0); + IntegerValue implied_ub = 0; + for (const int i : {0, 1}) { + if (expr.coeffs[i] > 0) { + implied_ub += expr.coeffs[i] * integer_trail_->UpperBound(expr.vars[i]); + } + } + return implied_ub; +} + std::pair BinaryRelationsMaps::GetImpliedLevelZeroBounds( const LinearExpression2& expr) const { @@ -1561,16 +1621,16 @@ void BinaryRelationsMaps::AddRelationBounds(LinearExpression2 expr, if (lb > ub) return; // unsat ?? if (lb == implied_lb && ub == implied_ub) return; // trivially true. - if (best_upper_bounds_.Add(expr, lb, ub)) { + if (best_root_level_bounds_.Add(expr, lb, ub)) { // TODO(user): Also push them to a global shared repository after // remapping IntegerVariable to proto indices. ++num_updates_; } } -RelationStatus BinaryRelationsMaps::GetStatus(LinearExpression2 expr, - IntegerValue lb, - IntegerValue ub) const { +RelationStatus BinaryRelationsMaps::GetLevelZeroStatus(LinearExpression2 expr, + IntegerValue lb, + IntegerValue ub) const { expr.CanonicalizeAndUpdateBounds(lb, ub); const auto [implied_lb, implied_ub] = GetImpliedLevelZeroBounds(expr); lb = std::max(lb, implied_lb); @@ -1580,11 +1640,11 @@ RelationStatus BinaryRelationsMaps::GetStatus(LinearExpression2 expr, if (lb > ub) return RelationStatus::IS_FALSE; if (lb == implied_lb && ub == implied_ub) return RelationStatus::IS_TRUE; - // Relax as best_upper_bounds_.GetStatus() might have older bounds. + // Relax as best_root_level_bounds_.GetStatus() might have older bounds. if (lb == implied_lb) lb = kMinIntegerValue; if (ub == implied_ub) ub = kMaxIntegerValue; - return best_upper_bounds_.GetStatus(expr, lb, ub); + return best_root_level_bounds_.GetStatus(expr, lb, ub); } std::pair BinaryRelationsMaps::FromDifference( @@ -1600,17 +1660,17 @@ std::pair BinaryRelationsMaps::FromDifference( return {std::move(expr), ub}; } -RelationStatus BinaryRelationsMaps::GetPrecedenceStatus( +RelationStatus BinaryRelationsMaps::GetLevelZeroPrecedenceStatus( AffineExpression a, AffineExpression b) const { const auto [expr, ub] = FromDifference(a, b); - return GetStatus(expr, kMinIntegerValue, ub); + return GetLevelZeroStatus(expr, kMinIntegerValue, ub); } void BinaryRelationsMaps::AddReifiedPrecedenceIfNonTrivial(Literal l, AffineExpression a, AffineExpression b) { const auto [expr, ub] = FromDifference(a, b); - const RelationStatus status = GetStatus(expr, kMinIntegerValue, ub); + const RelationStatus status = GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status != RelationStatus::IS_UNKNOWN) return; relation_to_lit_.insert({{expr, ub}, l}); @@ -1622,7 +1682,7 @@ void BinaryRelationsMaps::AddReifiedPrecedenceIfNonTrivial(Literal l, LiteralIndex BinaryRelationsMaps::GetReifiedPrecedence(AffineExpression a, AffineExpression b) { const auto [expr, ub] = FromDifference(a, b); - const RelationStatus status = GetStatus(expr, kMinIntegerValue, ub); + const RelationStatus status = GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status == RelationStatus::IS_TRUE) { return integer_encoder_->GetTrueLiteral().Index(); } @@ -1635,5 +1695,116 @@ LiteralIndex BinaryRelationsMaps::GetReifiedPrecedence(AffineExpression a, return it->second; } +bool BinaryRelationsMaps::AddAffineUpperBound(LinearExpression2 expr, + AffineExpression affine_ub) { + const IntegerValue new_ub = integer_trail_->UpperBound(affine_ub); + expr.SimpleCanonicalization(); + + // Not better than trivial upper bound. + if (GetImpliedUpperBound(expr) <= new_ub) return false; + + // Not better than the root level upper bound. + if (best_root_level_bounds_.GetUpperBound(expr) <= new_ub) return false; + + const IntegerValue gcd = expr.DivideByGcd(); + + const auto it = best_affine_ub_.find(expr); + if (it != best_affine_ub_.end()) { + const auto [old_affine_ub, old_gcd] = it->second; + // We have an affine bound for this expr in the map. Can be exactly the + // same, a better one or a worse one. + if (old_affine_ub == affine_ub && old_gcd == gcd) { + // The affine bound is already in the map. + NotifyWatchingPropagators(); // The affine bound was updated. + return false; + } + const IntegerValue old_ub = + FloorRatio(integer_trail_->UpperBound(old_affine_ub), old_gcd); + if (old_ub <= new_ub) return false; // old bound is better. + } + + // We have gcd * canonical_expr <= affine_ub, so we do need to store a + // "divisor". + ++num_affine_updates_; + best_affine_ub_[expr] = {affine_ub, gcd}; + NotifyWatchingPropagators(); + return true; +} + +void BinaryRelationsMaps::NotifyWatchingPropagators() const { + for (const int id : propagator_ids_) { + watcher_->CallOnNextPropagate(id); + } +} + +IntegerValue BinaryRelationsMaps::UpperBound(LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + + const IntegerValue trivial_ub = GetImpliedUpperBound(expr); + const IntegerValue root_level_ub = + best_root_level_bounds_.GetUpperBound(expr); + const IntegerValue best_ub = std::min(root_level_ub, trivial_ub); + + const IntegerValue gcd = expr.DivideByGcd(); + const auto it = best_affine_ub_.find(expr); + if (it == best_affine_ub_.end()) { + return best_ub; + } else { + const auto [affine, divisor] = it->second; + const IntegerValue canonical_ub = + FloorRatio(integer_trail_->UpperBound(affine), divisor); + return std::min(best_ub, CapProdI(gcd, canonical_ub)); + } +} + +// TODO(user): If the trivial bound is better, its explanation is different... +void BinaryRelationsMaps::AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* /*literal_reason*/, + std::vector* integer_reason) const { + expr.SimpleCanonicalization(); + + if (expr.coeffs[0] == 0 && expr.coeffs[1] == 0) return; // trivially zero + + // Starts by simple bounds. + if (best_root_level_bounds_.GetUpperBound(expr) <= ub) return; + + // Add explanation if it is a trivial bound. + const IntegerValue implied_ub = GetImpliedUpperBound(expr); + if (implied_ub <= ub) { + const IntegerValue slack = ub - implied_ub; + expr.Negate(); // AppendRelaxedLinearReason() explains a lower bound. + absl::Span vars = expr.non_zero_vars(); + absl::Span coeffs = expr.non_zero_coeffs(); + integer_trail_->AppendRelaxedLinearReason(slack, coeffs, vars, + integer_reason); + return; + } + + // None of the bound above are enough, try the affine one. Note that gcd * + // expr <= ub, is the same as asking why expr <= FloorRatio(ub, gcd). + const IntegerValue gcd = expr.DivideByGcd(); + const auto it = best_affine_ub_.find(expr); + if (it == best_affine_ub_.end()) return; + + // We want the reason for "expr <= ub", that is the reason for + // - "gcd * canonical_expr <= ub" + // - "canonical_expr <= FloorRatio(ub, gcd); + // + // knowing that canonical_expr <= affine_ub / divisor. + const auto [affine, divisor] = it->second; + integer_reason->push_back( + affine.LowerOrEqual(CapProdI(FloorRatio(ub, gcd) + 1, divisor) - 1)); +} + +std::vector +BinaryRelationsMaps::GetAllExpressionsWithAffineBounds() const { + std::vector result; + for (const auto [expr, info] : best_affine_ub_) { + result.push_back(expr); + } + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 8056303e86..4ded3dbc4d 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -47,7 +47,7 @@ struct FullIntegerPrecedence { std::vector offsets; }; -// Stores all the precedences relation of the form "tail_X + offset <= head_X" +// Stores all the precedences relation of the form "a*x + b*y <= ub" // that we could extract from the linear constraint of the model. These are // stored in a directed graph. // @@ -68,11 +68,15 @@ class PrecedenceRelations : public ReversibleInterface { graph_.AddNode(num_variables - 1); } - // Add a relation tail + offset <= head. - // Returns true if it was added and is considered "new". - bool Add(IntegerVariable tail, IntegerVariable head, IntegerValue offset); + // Add a relation lb <= expr <= ub. If expr is not a proper linear2 expression + // (e.g. 0*x + y, y + y, y - y) it will be ignored. Returns true if it was + // added and is considered "new". + bool AddBounds(LinearExpression2 expr, IntegerValue lb, IntegerValue ub); - // Adds add relation (enf => a + b <= rhs) that is assumed to be true at + // Same as above, but only for the upper bound. + bool AddUpperBound(LinearExpression2 expr, IntegerValue ub); + + // Adds add relation (enf => expr <= rhs) that is assumed to be true at // the current level. // // It will be automatically reverted via the SetLevel() functions that is @@ -80,9 +84,11 @@ class PrecedenceRelations : public ReversibleInterface { // // This is assumed to be called when a relation becomes true (enforcement are // assigned) and when it becomes false in reverse order (CHECKed). + // + // If expr is not a proper linear2 expression (e.g. 0*x + y, y + y, y - y) it + // will be ignored. void PushConditionalRelation(absl::Span enforcements, - IntegerVariable a, IntegerVariable b, - IntegerValue rhs); + LinearExpression2 expr, IntegerValue rhs); // Called each time we change decision level. void SetLevel(int level) final; @@ -92,6 +98,9 @@ class PrecedenceRelations : public ReversibleInterface { // This currently only works if the precedence relation form a DAG. // If not we will just abort. TODO(user): generalize. // + // For more efficiency, this method ignores all linear2 expressions with any + // coefficient different from 1. + // // TODO(user): Put some work limit in place, as this can be slow. Complexity // is in O(vars.size()) * num_arcs. // @@ -106,7 +115,10 @@ class PrecedenceRelations : public ReversibleInterface { // Returns a set of precedences (var, index) such that var is after // vars[index]. All entries for the same variable will be contiguous and // sorted by index. We only list variable with at least two entries. The - // offset can be retrieved via GetConditionalOffset(vars[index], var). + // offset can be retrieved via UpperBound(vars[index], var). + // + // For more efficiency, this method ignores all linear2 expressions with any + // coefficient different from 1. struct PrecedenceData { IntegerVariable var; int index; @@ -120,23 +132,26 @@ class PrecedenceRelations : public ReversibleInterface { // // Warning: If there are too many, this will NOT contain all relations. // - // Returns kMinIntegerValue if there are none. - // Otherwise a + offset <= b. - IntegerValue GetOffset(IntegerVariable a, IntegerVariable b) const; + // Returns kMaxIntegerValue if there are none, otherwise return an upper bound + // such that expr <= ub. + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const; - // Returns the minimum distance between a and b, and the reason for it (all - // true). Note that we always check GetOffset() so if it is better, the - // returned literal reason will be empty. + // Returns the maximum value for expr, and the reason for it (all + // true). Note that we always check LevelZeroUpperBound() so if it is better, + // the returned literal reason will be empty. // // We separate the two because usually the reason is only needed when we push, // which happen less often, so we don't mind doing two hash lookups, and we - // really want to optimize the GetConditionalOffset() instead. + // really want to optimize the UpperBound() instead. // // Important: This doesn't contains the transitive closure. // Important: The span is only valid in a narrow scope. - IntegerValue GetConditionalOffset(IntegerVariable a, IntegerVariable b) const; - absl::Span GetConditionalEnforcements(IntegerVariable a, - IntegerVariable b) const; + IntegerValue UpperBound(LinearExpression2 expr) const; + + void AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* integer_reason) const; // The current code requires the internal data to be processed once all // relations are loaded. @@ -147,47 +162,45 @@ class PrecedenceRelations : public ReversibleInterface { private: void CreateLevelEntryIfNeeded(); - std::pair GetKey(IntegerVariable a, - IntegerVariable b) const { - return a <= b ? std::make_pair(a, b) : std::make_pair(b, a); - } - - // tail + offset <= head. - // Which is the same as tail - head <= -offset. - bool AddInternal(IntegerVariable tail, IntegerVariable head, - IntegerValue offset) { - const auto key = GetKey(tail, NegationOf(head)); - const auto [it, inserted] = root_relations_.insert({key, -offset}); - UpdateBestRelationIfBetter(key, -offset); + // expr <= ub. + bool AddInternal(LinearExpression2 expr, IntegerValue ub) { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + return false; + } + const auto [it, inserted] = root_relations_.insert({expr, ub}); + UpdateBestRelationIfBetter(expr, ub); if (inserted) { - const int new_size = std::max(tail.value(), NegationOf(head).value()) + 1; + if (expr.coeffs[0] != 1 || expr.coeffs[1] != 1) { + return true; + } + const int new_size = + std::max(expr.vars[0].value(), expr.vars[1].value()) + 1; if (new_size > after_.size()) after_.resize(new_size); - after_[tail].push_back(head); - after_[NegationOf(head)].push_back(NegationOf(tail)); + after_[expr.vars[0]].push_back(NegationOf(expr.vars[1])); + after_[expr.vars[1]].push_back(NegationOf(expr.vars[0])); return true; } - it->second = std::min(it->second, -offset); + it->second = std::min(it->second, ub); return false; } - void UpdateBestRelationIfBetter( - std::pair key, IntegerValue rhs) { - const auto [it, inserted] = best_relations_.insert({key, rhs}); + void UpdateBestRelationIfBetter(LinearExpression2 expr, IntegerValue rhs) { + const auto [it, inserted] = best_relations_.insert({expr, rhs}); if (!inserted) { it->second = std::min(it->second, rhs); } } - void UpdateBestRelation(std::pair key, - IntegerValue rhs) { - const auto it = root_relations_.find(key); + void UpdateBestRelation(LinearExpression2 expr, IntegerValue rhs) { + const auto it = root_relations_.find(expr); if (it != root_relations_.end()) { rhs = std::min(rhs, it->second); } if (rhs == kMaxIntegerValue) { - best_relations_.erase(key); + best_relations_.erase(expr); } else { - best_relations_[key] = rhs; + best_relations_[expr] = rhs; } } @@ -207,34 +220,31 @@ class PrecedenceRelations : public ReversibleInterface { // TODO(user): this kind of reversible hash_map is already implemented in // other part of the code. Consolidate. struct ConditionalEntry { - ConditionalEntry(int p, IntegerValue r, - std::pair k, + ConditionalEntry(int p, IntegerValue r, LinearExpression2 k, absl::Span e) : prev_entry(p), rhs(r), key(k), enforcements(e.begin(), e.end()) {} int prev_entry; IntegerValue rhs; - std::pair key; + LinearExpression2 key; absl::InlinedVector enforcements; }; std::vector conditional_stack_; std::vector> level_to_stack_size_; - // This is always stored in the form (a + b <= rhs). + // This is always stored in the form (expr <= rhs). // The conditional relations contains indices in the conditional_stack_. - absl::flat_hash_map, IntegerValue> - root_relations_; - absl::flat_hash_map, int> - conditional_relations_; + absl::flat_hash_map root_relations_; + absl::flat_hash_map conditional_relations_; // Contains std::min() of the offset from root_relations_ and // conditional_relations_. - absl::flat_hash_map, IntegerValue> - best_relations_; + absl::flat_hash_map best_relations_; - // Store for each variable x, the variables y that appears in GetOffset(x, y) - // or GetConditionalOffset(x, y). That is the variable that are after x with - // an offset. Note that conditional_after_ is updated on dive/backtrack. + // Store for each variable x, the variables y that appears alongside it in + // LevelZeroUpperBound(expr) or UpperBound(expr). That is the variable + // that are after x with an offset. Note that conditional_after_ is updated on + // dive/backtrack. util_intops::StrongVector> after_; util_intops::StrongVector> @@ -588,12 +598,12 @@ class BinaryRelationsMaps { // relation. void AddRelationBounds(LinearExpression2 expr, IntegerValue lb, IntegerValue ub); - RelationStatus GetStatus(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub) const; + RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub) const; // Return the status of a <= b; - RelationStatus GetPrecedenceStatus(AffineExpression a, - AffineExpression b) const; + RelationStatus GetLevelZeroPrecedenceStatus(AffineExpression a, + AffineExpression b) const; // Register the fact that l <=> ( a <= b ). // These are considered equivalence relation. @@ -605,20 +615,48 @@ class BinaryRelationsMaps { // true/false literal if the status is known at level zero. LiteralIndex GetReifiedPrecedence(AffineExpression a, AffineExpression b); + // If the given upper bound evaluate better than the current one we have, this + // will replace it and returns true, otherwise it returns false. + // + // Note that we never store trivial upper bound (using the current variable + // domain). + bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); + + // Returns the best known upper-bound of the given LinearExpression2 at the + // current decision level. If its explanation is needed, it can be queried + // with the second function. + IntegerValue UpperBound(LinearExpression2 expr) const; + void AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* integer_reason) const; + + // Warning, the order will not be deterministic. + std::vector GetAllExpressionsWithAffineBounds() const; + + int NumExpressionsWithAffineBounds() const { return best_affine_ub_.size(); } + + void WatchAllLinearExpressions2(int id) { propagator_ids_.insert(id); } + private: + void NotifyWatchingPropagators() const; + // Return the pair (a - b) <= rhs. std::pair FromDifference( const AffineExpression& a, const AffineExpression& b) const; + IntegerValue GetImpliedUpperBound(const LinearExpression2& expr) const; std::pair GetImpliedLevelZeroBounds( const LinearExpression2& expr) const; IntegerTrail* integer_trail_; IntegerEncoder* integer_encoder_; + GenericLiteralWatcher* watcher_; SharedStatistics* shared_stats_; - BestBinaryRelationBounds best_upper_bounds_; + BestBinaryRelationBounds best_root_level_bounds_; int64_t num_updates_ = 0; + int64_t num_affine_updates_ = 0; // This stores relations l <=> (linear2 <= rhs). absl::flat_hash_map, Literal> @@ -631,6 +669,18 @@ class BinaryRelationsMaps { absl::flat_hash_set variable_appearing_in_reified_relations_; std::vector> all_reified_relations_; + + // This stores linear2 <= AffineExpression / divisor. + // + // Note(user): This is a "cheap way" to not have to deal with backtracking, If + // we have many possible AffineExpression that bounds a LinearExpression2, we + // keep the best one during "search dive" but on backtrack we might have a + // sub-optimal relation. + absl::flat_hash_map> + best_affine_ub_; + + absl::btree_set propagator_ids_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the @@ -731,7 +781,9 @@ inline std::function LowerOrEqualWithOffset(IntegerVariable a, IntegerVariable b, int64_t offset) { return [=](Model* model) { - model->GetOrCreate()->Add(a, b, IntegerValue(offset)); + LinearExpression2 expr(a, b, 1, -1); + model->GetOrCreate()->AddUpperBound( + expr, IntegerValue(-offset)); model->GetOrCreate()->AddPrecedenceWithOffset( a, b, IntegerValue(offset)); }; @@ -745,8 +797,9 @@ inline std::function AffineCoeffOneLowerOrEqualWithOffset( CHECK_NE(b.var, kNoIntegerVariable); CHECK_EQ(b.coeff, 1); return [=](Model* model) { - model->GetOrCreate()->Add( - a.var, b.var, a.constant - b.constant + offset); + LinearExpression2 expr(a.var, b.var, 1, -1); + model->GetOrCreate()->AddUpperBound( + expr, -a.constant + b.constant - offset); model->GetOrCreate()->AddPrecedenceWithOffset( a.var, b.var, a.constant - b.constant + offset); }; @@ -758,8 +811,9 @@ inline void AddConditionalSum2LowerOrEqual( IntegerVariable b, int64_t ub, Model* model) { // TODO(user): Refactor to be sure we do not miss any level zero relations. if (enforcement_literals.empty()) { - model->GetOrCreate()->Add(a, NegationOf(b), - IntegerValue(-ub)); + LinearExpression2 expr(a, b, 1, 1); + model->GetOrCreate()->AddUpperBound(expr, + IntegerValue(ub)); } PrecedencesPropagator* p = model->GetOrCreate(); diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 994abc248c..781781d5f2 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -45,7 +45,7 @@ using ::testing::UnorderedElementsAre; // TODO(user): move that in a common place. test_utils? #define EXPECT_BOUNDS_EQ(var, lb, ub) \ EXPECT_EQ(integer_trail->LowerBound(var), lb); \ - EXPECT_EQ(integer_trail->UpperBound(var), ub) + EXPECT_EQ(integer_trail->LevelZeroUpperBound(var), ub) // All the tests here uses 10 integer variables initially in [0, 100]. std::vector AddVariables(IntegerTrail* integer_trail) { @@ -68,24 +68,42 @@ TEST(PrecedenceRelationsTest, BasicAPI) { IntegerVariable a(0), b(2), c(4), d(6); PrecedenceRelations precedences(&model); - precedences.Add(a, b, 10); - precedences.Add(d, c, 7); - precedences.Add(b, d, 5); + precedences.AddUpperBound(LinearExpression2::Difference(a, b), -10); + precedences.AddUpperBound(LinearExpression2::Difference(d, c), -7); + precedences.AddUpperBound(LinearExpression2::Difference(b, d), -5); precedences.Build(); - EXPECT_EQ(precedences.GetOffset(a, b), 10); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), NegationOf(a)), 10); - EXPECT_EQ(precedences.GetOffset(a, c), 22); - EXPECT_EQ(precedences.GetOffset(NegationOf(c), NegationOf(a)), 22); - EXPECT_EQ(precedences.GetOffset(a, d), 15); - EXPECT_EQ(precedences.GetOffset(NegationOf(d), NegationOf(a)), 15); - EXPECT_EQ(precedences.GetOffset(d, a), kMinIntegerValue); + EXPECT_EQ( + precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), + -10); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), NegationOf(a))), + -10); + EXPECT_EQ( + precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, c)), + -22); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(c), NegationOf(a))), + -22); + EXPECT_EQ( + precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, d)), + -15); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(d), NegationOf(a))), + -15); + EXPECT_EQ( + precedences.LevelZeroUpperBound(LinearExpression2::Difference(d, a)), + kMaxIntegerValue); // Once built, we can update the offsets. // Note however that this would not propagate through the precedence graphs. - precedences.Add(a, b, 15); - EXPECT_EQ(precedences.GetOffset(a, b), 15); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), NegationOf(a)), 15); + precedences.AddUpperBound(LinearExpression2::Difference(a, b), -15); + EXPECT_EQ( + precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), + -15); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), NegationOf(a))), + -15); } TEST(PrecedenceRelationsTest, CornerCase1) { @@ -97,16 +115,22 @@ TEST(PrecedenceRelationsTest, CornerCase1) { IntegerVariable a(0), b(2), c(4), d(6); PrecedenceRelations precedences(&model); - precedences.Add(a, b, 10); - precedences.Add(b, c, 7); - precedences.Add(b, d, 5); - precedences.Add(NegationOf(b), a, 5); + precedences.AddUpperBound(LinearExpression2::Difference(a, b), -10); + precedences.AddUpperBound(LinearExpression2::Difference(b, c), -7); + precedences.AddUpperBound(LinearExpression2::Difference(b, d), -5); + precedences.AddUpperBound(LinearExpression2::Difference(NegationOf(b), a), + -5); precedences.Build(); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), a), 5); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), b), 15); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), c), 22); - EXPECT_EQ(precedences.GetOffset(NegationOf(b), d), 20); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), a)), + -5); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), c)), + -22); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), d)), + -20); } TEST(PrecedenceRelationsTest, CornerCase2) { @@ -118,16 +142,34 @@ TEST(PrecedenceRelationsTest, CornerCase2) { IntegerVariable a(0), b(2), c(4), d(6); PrecedenceRelations precedences(&model); - precedences.Add(NegationOf(a), a, 10); - precedences.Add(a, b, 7); - precedences.Add(a, c, 5); - precedences.Add(a, d, 2); + precedences.AddUpperBound(LinearExpression2::Difference(NegationOf(a), a), + -10); + precedences.AddUpperBound(LinearExpression2::Difference(a, b), -7); + precedences.AddUpperBound(LinearExpression2::Difference(a, c), -5); + precedences.AddUpperBound(LinearExpression2::Difference(a, d), -2); + EXPECT_EQ(precedences.LevelZeroUpperBound( + LinearExpression2::Difference(NegationOf(b), NegationOf(a))), + -7); + + precedences.Build(); +} + +TEST(PrecedenceRelationsTest, CoefficientGreaterThanOne) { + Model model; + IntegerTrail* integer_trail = model.GetOrCreate(); + const std::vector vars = AddVariables(integer_trail); + + // Note that odd indices are for the negation. + IntegerVariable a(0), b(2), c(4); + + PrecedenceRelations precedences(&model); + precedences.AddUpperBound(LinearExpression2(a, b, 3, -4), 7); + precedences.AddUpperBound(LinearExpression2(a, c, 2, -3), -5); + precedences.AddUpperBound(LinearExpression2(a, b, 6, -8), 5); + EXPECT_EQ(precedences.LevelZeroUpperBound(LinearExpression2(a, b, 9, -12)), + 6); precedences.Build(); - EXPECT_EQ(precedences.GetOffset(NegationOf(a), a), 10); - EXPECT_EQ(precedences.GetOffset(NegationOf(a), b), 17); - EXPECT_EQ(precedences.GetOffset(NegationOf(a), c), 15); - EXPECT_EQ(precedences.GetOffset(NegationOf(a), d), 12); } TEST(PrecedenceRelationsTest, ConditionalRelations) { @@ -142,20 +184,31 @@ TEST(PrecedenceRelationsTest, ConditionalRelations) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2); PrecedenceRelations precedences(&model); - precedences.PushConditionalRelation({l}, a, b, 15); - precedences.PushConditionalRelation({l}, a, b, 20); + precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 15); + precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 20); // We only keep the best one. - EXPECT_EQ(precedences.GetConditionalOffset(a, NegationOf(b)), -15); - EXPECT_THAT(precedences.GetConditionalEnforcements(a, NegationOf(b)), - ElementsAre(l)); + EXPECT_EQ( + precedences.UpperBound(LinearExpression2::Difference(a, NegationOf(b))), + 15); + std::vector literal_reason; + std::vector integer_reason; + precedences.AddReasonForUpperBoundLowerThan( + LinearExpression2::Difference(a, NegationOf(b)), 15, &literal_reason, + &integer_reason); + EXPECT_THAT(literal_reason, ElementsAre(l.Negated())); // Backtrack works. EXPECT_TRUE(sat_solver->ResetToLevelZero()); - EXPECT_EQ(precedences.GetConditionalOffset(a, NegationOf(b)), - kMinIntegerValue); - EXPECT_THAT(precedences.GetConditionalEnforcements(a, NegationOf(b)), - ElementsAre()); + EXPECT_EQ( + precedences.UpperBound(LinearExpression2::Difference(a, NegationOf(b))), + kMaxIntegerValue); + literal_reason.clear(); + integer_reason.clear(); + precedences.AddReasonForUpperBoundLowerThan( + LinearExpression2::Difference(a, NegationOf(b)), kMaxIntegerValue, + &literal_reason, &integer_reason); + EXPECT_THAT(literal_reason, IsEmpty()); } TEST(PrecedencesPropagatorTest, Empty) { @@ -425,12 +478,18 @@ TEST(PrecedenceRelationsTest, CollectPrecedences) { auto* relations = model.GetOrCreate(); std::vector vars = AddVariables(integer_trail); - relations->Add(vars[0], vars[2], IntegerValue(1)); - relations->Add(vars[0], vars[5], IntegerValue(1)); - relations->Add(vars[1], vars[2], IntegerValue(1)); - relations->Add(vars[2], vars[4], IntegerValue(1)); - relations->Add(vars[3], vars[4], IntegerValue(1)); - relations->Add(vars[4], vars[5], IntegerValue(1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), + IntegerValue(-1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[0], vars[5]), + IntegerValue(-1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), + IntegerValue(-1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), + IntegerValue(-1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[3], vars[4]), + IntegerValue(-1)); + relations->AddUpperBound(LinearExpression2::Difference(vars[4], vars[5]), + IntegerValue(-1)); std::vector p; relations->CollectPrecedences({vars[0], vars[2], vars[3]}, &p); @@ -979,6 +1038,81 @@ TEST(PrecedencesPropagatorTest, BasicFiltering2) { EXPECT_THAT(precedences[1].indices, ElementsAre(0, 1, 2, 3)); } +TEST(BinaryRelationMapsTest, AffineUpperBound) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 2)); + + // x - y; + LinearExpression2 expr; + expr.vars[0] = x; + expr.vars[1] = y; + expr.coeffs[0] = IntegerValue(1); + expr.coeffs[1] = IntegerValue(-1); + + // Starts with trivial level zero bound. + auto* tested = model.GetOrCreate(); + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(10)); + + // Lets add a relation. + tested->AddRelationBounds(expr, IntegerValue(-5), IntegerValue(5)); + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(5)); + + // Note that we canonicalize with gcd. + expr.coeffs[0] *= 3; + expr.coeffs[1] *= 3; + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(15)); + + // Lets add an affine upper bound to that expression <= 4 * z + 1. + EXPECT_TRUE(tested->AddAffineUpperBound( + expr, AffineExpression(z, IntegerValue(4), IntegerValue(1)))); + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(9)); + + // Lets test the reason, first push a new bound. + auto* search = model.GetOrCreate(); + search->TakeDecision( + Literal(search->GetDecisionLiteral(BooleanOrIntegerLiteral( + IntegerLiteral::LowerOrEqual(z, IntegerValue(1)))))); + + // Because of gcd, even though ub(affine) is now 5, we get 3, + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(3)); + { + std::vector literal_reason; + std::vector integer_reason; + tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(4), + &literal_reason, &integer_reason); + EXPECT_THAT(literal_reason, ElementsAre()); + EXPECT_THAT(integer_reason, + ElementsAre(IntegerLiteral::LowerOrEqual(z, IntegerValue(1)))); + } + + // If we use a bound not as strong, we get a different reason though. + { + std::vector literal_reason; + std::vector integer_reason; + tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(9), + &literal_reason, &integer_reason); + EXPECT_THAT(literal_reason, ElementsAre()); + EXPECT_THAT(integer_reason, + ElementsAre(IntegerLiteral::LowerOrEqual(z, IntegerValue(2)))); + } + { + // This is implied by the level zero relation x <= 5 + std::vector literal_reason; + std::vector integer_reason; + tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(15), + &literal_reason, &integer_reason); + EXPECT_THAT(literal_reason, ElementsAre()); + EXPECT_THAT(integer_reason, ElementsAre()); + } + + // Note that the bound works on the canonicalized expr. + expr.coeffs[0] /= 3; + expr.coeffs[1] /= 3; + EXPECT_EQ(tested->UpperBound(expr), IntegerValue(1)); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/sat_inprocessing.cc b/ortools/sat/sat_inprocessing.cc index 4d17f1400c..d25c7897b3 100644 --- a/ortools/sat/sat_inprocessing.cc +++ b/ortools/sat/sat_inprocessing.cc @@ -1636,6 +1636,9 @@ bool BoundedVariableElimination::CrossProduct(BooleanVariable var) { if (new_score_ > score_threshold_) return true; // Perform BVE. + // + // TODO(user): If filter_sat_postsolve_clauses is true, only one of the two + // sets need to be kept for postsolve. if (new_score_ > 0) { if (!ResolveAllClauseContaining(lit)) { diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index c42f5a405e..fb7d23f541 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 323 +// NEXT TAG: 325 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -411,6 +411,15 @@ message SatParameters { // occurrences of not(x) is not greater than this parameter. optional int32 presolve_bve_threshold = 54 [default = 500]; + // Internal parameter. During BVE, if we eliminate a variable x, by default we + // will push all clauses containing x and all clauses containing not(x) to the + // postsolve. However, it is possible to write the postsolve code so that only + // one such set is needed. The idea is that, if we push the set containing a + // literal l, is to set l to false except if it is needed to satisfy one of + // the clause in the set. This is always beneficial, but for historical + // reason, not all our postsolve algorithm support this. + optional bool filter_sat_postsolve_clauses = 324 [default = false]; + // During presolve, we apply BVE only if this weight times the number of // clauses plus the number of clause literals is not increased. optional int32 presolve_bve_clause_weight = 55 [default = 3]; @@ -937,6 +946,15 @@ message SatParameters { optional int32 maximum_regions_to_split_in_disconnected_no_overlap_2d = 315 [default = 0]; + // When set, this activates a propagator for the no_overlap_2d constraint that + // uses any eventual linear constraints of the model in the form + // `{start interval 1} - {end interval 2} + c*w <= ub` to detect that two + // intervals must overlap in one dimension for some values of `w`. This is + // particularly useful for problems where the distance between two boxes is + // part of the model. + optional bool use_linear3_for_no_overlap_2d_precedences = 323 + [default = true]; + // When set, it activates a few scheduling parameters to improve the lower // bound of scheduling problems. This is only effective with multiple workers // as it modifies the reduced_cost, lb_tree_search, and probing workers. diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 5fd29afe20..f99d3f0889 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -1121,19 +1120,21 @@ void CtExhaustiveHelper::Init( for (const auto& e2 : events) { if (e2.task_index == e1.task_index) continue; - if (binary_relations->GetPrecedenceStatus(e2.end, e1.start) == + if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) == RelationStatus::IS_TRUE) { predecessors_.AppendToLastVector(e2.task_index); } } } VLOG(2) << "num_tasks:" << max_task_index_ + 1 - << " num_precedences:" << predecessors_.num_entries(); + << " num_precedences:" << predecessors_.num_entries() + << " predecessors size:" << predecessors_.size(); } bool CtExhaustiveHelper::PermutationIsCompatibleWithPrecedences( absl::Span events, absl::Span permutation) { + if (predecessors_.num_entries() == 0) return true; visited_.assign(max_task_index_ + 1, false); for (int i = permutation.size() - 1; i >= 0; --i) { const CompletionTimeEvent& event = events[permutation[i]]; @@ -1159,11 +1160,11 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutationForNoOverlap( IntegerValue end_min_of_previous_task = kMinIntegerValue; for (const int index : permutation) { const CompletionTimeEvent& event = events[index]; - const IntegerValue threshold = + const IntegerValue task_start_min = std::max(event.start_min, end_min_of_previous_task); - if (event.start_max < threshold) return false; // Infeasible. + if (event.start_max < task_start_min) return false; // Infeasible. - end_min_of_previous_task = threshold + event.size_min; + end_min_of_previous_task = task_start_min + event.size_min; sum_of_ends += end_min_of_previous_task; sum_of_weighted_ends += event.energy_min * end_min_of_previous_task; } @@ -1232,14 +1233,13 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( // Iterate on the profile to find the step that contains start_min. // Then push until we find a step with enough capacity. - int current = 0; - while (helper.profile_[current + 1].first <= start_min || - helper.profile_[current].second < event.demand_min) { - ++current; + auto profile_it = helper.profile_.begin(); + while ((profile_it + 1)->first <= start_min || + profile_it->second < event.demand_min) { + ++profile_it; } - IntegerValue actual_start = - std::max(start_min, helper.profile_[current].first); + IntegerValue actual_start = std::max(start_min, profile_it->first); const IntegerValue initial_start_min = actual_start; // Propagate precedences. @@ -1255,6 +1255,8 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( if (actual_start > initial_start_min) { cut_use_precedences = true; + // Catch up the position on the profile w.r.t. the actual start. + while ((profile_it + 1)->first <= actual_start) ++profile_it; VLOG(3) << "push from " << initial_start_min << " to " << actual_start; } @@ -1276,79 +1278,105 @@ bool ComputeWeightedSumOfEndMinsOfOnePermutation( // Update the profile. helper.new_profile_.clear(); + const IntegerValue demand_min = event.demand_min; + + // Insert the start of the shifted profile. helper.new_profile_.push_back( - {actual_start, helper.profile_[current].second - event.demand_min}); - ++current; + {actual_start, profile_it->second - demand_min}); + ++profile_it; - while (helper.profile_[current].first < actual_end) { + // Copy and modify the part of the profile impacted by the current event. + while (profile_it->first < actual_end) { helper.new_profile_.push_back( - {helper.profile_[current].first, - helper.profile_[current].second - event.demand_min}); - ++current; + {profile_it->first, profile_it->second - demand_min}); + ++profile_it; } - if (helper.profile_[current].first > actual_end) { - helper.new_profile_.push_back( - {actual_end, helper.new_profile_.back().second + event.demand_min}); - } - while (current < helper.profile_.size()) { - helper.new_profile_.push_back(helper.profile_[current]); - ++current; + // Insert a new event in the profile at the end of the task if needed. + if (profile_it->first > actual_end) { + helper.new_profile_.push_back({actual_end, (profile_it - 1)->second}); } + + // Insert the tail of the current profile. + helper.new_profile_.insert(helper.new_profile_.end(), profile_it, + helper.profile_.end()); + helper.profile_.swap(helper.new_profile_); } return true; } +const int kCtExhaustiveTargetSize = 6; +// This correspond to the number of permutations the system will explore when +// fully exploring all possible sizes and all possible permutations for up to 6 +// tasks, without any precedence. +const int kExplorationLimit = 873; // 1! + 2! + 3! + 4! + 5! + 6! + } // namespace -bool ComputeMinSumOfWeightedEndMins( +CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( absl::Span events, IntegerValue capacity_max, double unweighted_threshold, double weighted_threshold, CtExhaustiveHelper& helper, double& min_sum_of_ends, - double& min_sum_of_weighted_ends, bool& cut_use_precedences) { + double& min_sum_of_weighted_ends, bool& cut_use_precedences, + int& exploration_credit) { // Reset the events based sums. min_sum_of_ends = std::numeric_limits::max(); min_sum_of_weighted_ends = std::numeric_limits::max(); + helper.task_to_index_.assign(helper.max_task_index() + 1, -1); + for (int i = 0; i < events.size(); ++i) { + helper.task_to_index_[events[i].task_index] = i; + } + helper.valid_permutation_iterator_.Reset(events.size()); + for (int i = 0; i < events.size(); ++i) { + const int task_i = events[i].task_index; + for (const int task_j : helper.predecessors()[task_i]) { + const int j = helper.task_to_index_[task_j]; + if (j != -1) { + helper.valid_permutation_iterator_.AddArc(j, i); + } + } + } + if (!helper.valid_permutation_iterator_.Init()) { + return CompletionTimeExplorationStatus::NO_VALID_PERMUTATION; + } - // Local stats. - int num_explored = 0; - int num_pruned = 0; - bool aborted = false; - - std::vector permutation(events.size()); - std::iota(permutation.begin(), permutation.end(), 0); + int num_valid_permutations = 0; do { + if (--exploration_credit < 0) break; + IntegerValue sum_of_ends = 0; IntegerValue sum_of_weighted_ends = 0; - if (!helper.PermutationIsCompatibleWithPrecedences(events, permutation)) { - cut_use_precedences = true; - continue; - } if (ComputeWeightedSumOfEndMinsOfOnePermutation( - events, permutation, capacity_max, helper, sum_of_ends, - sum_of_weighted_ends, cut_use_precedences)) { + events, helper.valid_permutation_iterator_.permutation(), + capacity_max, helper, sum_of_ends, sum_of_weighted_ends, + cut_use_precedences)) { min_sum_of_ends = std::min(ToDouble(sum_of_ends), min_sum_of_ends); min_sum_of_weighted_ends = std::min(ToDouble(sum_of_weighted_ends), min_sum_of_weighted_ends); - num_explored++; + ++num_valid_permutations; + if (min_sum_of_ends <= unweighted_threshold && min_sum_of_weighted_ends <= weighted_threshold) { - aborted = true; break; } - } else { - num_pruned++; } - } while (std::next_permutation(permutation.begin(), permutation.end())); - VLOG(3) << "DP: size=" << events.size() << ", explored = " << num_explored - << ", pruned = " << num_pruned << ", aborted = " << aborted - << ", min_sum_of_end_mins = " << min_sum_of_ends - << ", min_sum_of_weighted_end_mins = " << min_sum_of_weighted_ends - << ", unweighted_threshold = " << unweighted_threshold - << ", weighted_threshold = " << weighted_threshold; - return num_explored > 0; + } while (helper.valid_permutation_iterator_.Increase()); + const CompletionTimeExplorationStatus status = + exploration_credit < 0 ? CompletionTimeExplorationStatus::ABORTED + : num_valid_permutations > 0 + ? CompletionTimeExplorationStatus::FINISHED + : CompletionTimeExplorationStatus::NO_VALID_PERMUTATION; + VLOG(2) << "DP: size:" << events.size() + << ", num_valid_permutations:" << num_valid_permutations + << ", min_sum_of_end_mins:" << min_sum_of_ends + << ", min_sum_of_weighted_end_mins:" << min_sum_of_weighted_ends + << ", unweighted_threshold:" << unweighted_threshold + << ", weighted_threshold:" << weighted_threshold + << ", exploration_credit:" << exploration_credit + << ", status:" << status; + return status; } // TODO(user): Improve performance @@ -1359,6 +1387,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); + // Sort by start min to bucketize by start_min. std::sort( events.begin(), events.end(), @@ -1374,20 +1403,19 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( bool cut_use_precedences = false; // Used for naming the cut. const IntegerValue sequence_start_min = events[start].start_min; - std::vector residual_tasks(events.begin() + start, - events.end()); + helper.residual_events_.assign(events.begin() + start, events.end()); // We look at events that start before sequence_start_min, but are forced // to cross this time point. for (int before = 0; before < start; ++before) { if (events[before].start_min + events[before].size_min > sequence_start_min) { - residual_tasks.push_back(events[before]); // Copy. - residual_tasks.back().lifted = true; + helper.residual_events_.push_back(events[before]); // Copy. + helper.residual_events_.back().lifted = true; } } - std::sort(residual_tasks.begin(), residual_tasks.end(), + std::sort(helper.residual_events_.begin(), helper.residual_events_.end(), [](const CompletionTimeEvent& e1, const CompletionTimeEvent& e2) { return std::tie(e1.lp_end, e1.task_index) < std::tie(e2.lp_end, e2.task_index); @@ -1399,9 +1427,11 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( double sum_of_square_energies = 0; double min_sum_of_ends = std::numeric_limits::max(); double min_sum_of_weighted_ends = std::numeric_limits::max(); + int exploration_limit = kExplorationLimit; + const int kMaxSize = std::min(helper.residual_events_.size(), 12); - for (int i = 0; i < std::min(residual_tasks.size(), 7); ++i) { - const CompletionTimeEvent& event = residual_tasks[i]; + for (int i = 0; i < kMaxSize; ++i) { + const CompletionTimeEvent& event = helper.residual_events_[i]; const double energy = ToDouble(event.energy_min); sum_of_ends_lp += event.lp_end; sum_of_weighted_ends_lp += event.lp_end * energy; @@ -1409,18 +1439,25 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( sum_of_square_energies += energy * energy; // Both cases with 1 or 2 tasks are trivial and independent of the order. - // Also, if capacity is not exceeded, pushing all ends left is a valid LP - // assignment. + // Also, if the sum of demands is less than or equal to the capacity, + // pushing all ends left is a valid LP assignment. And this assignment + // should be propagated by the lp model. if (i <= 1 || sum_of_demands <= capacity_max) continue; - if (!ComputeMinSumOfWeightedEndMins( - absl::MakeSpan(residual_tasks).first(i + 1), capacity_max, + absl::Span tasks_to_explore = + absl::MakeSpan(helper.residual_events_).first(i + 1); + const CompletionTimeExplorationStatus status = + ComputeMinSumOfWeightedEndMins( + tasks_to_explore, capacity_max, /* unweighted_threshold= */ sum_of_ends_lp + kMinCutViolation, /* weighted_threshold= */ sum_of_weighted_ends_lp + kMinCutViolation, helper, min_sum_of_ends, min_sum_of_weighted_ends, - cut_use_precedences)) { + cut_use_precedences, exploration_limit); + if (status == CompletionTimeExplorationStatus::NO_VALID_PERMUTATION) { return false; + } else if (status == CompletionTimeExplorationStatus::ABORTED) { + break; } const double unweigthed_violation = @@ -1435,7 +1472,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( LinearConstraintBuilder cut(model, min_sum_of_ends, kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { - const CompletionTimeEvent& event = residual_tasks[j]; + const CompletionTimeEvent& event = helper.residual_events_[j]; is_lifted |= event.lifted; cut.AddTerm(event.end, IntegerValue(1)); } @@ -1452,7 +1489,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( kMaxIntegerValue); bool is_lifted = false; for (int j = 0; j <= i; ++j) { - const CompletionTimeEvent& event = residual_tasks[j]; + const CompletionTimeEvent& event = helper.residual_events_[j]; is_lifted |= event.lifted; cut.AddTerm(event.end, event.energy_min); } @@ -1582,8 +1619,7 @@ void AddEventDemandsToCapacitySubsetSum( // ordered by increasing end time in the LP relaxation. void GenerateCompletionTimeCutsWithEnergy( absl::string_view cut_name, std::vector events, - IntegerValue capacity_max, bool skip_low_sizes, Model* model, - LinearConstraintManager* manager) { + IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); const VariablesAssignment& assignment = model->GetOrCreate()->Assignment(); @@ -1623,6 +1659,10 @@ void GenerateCompletionTimeCutsWithEnergy( } } + // If we have less than kCtExhaustiveTargetSize tasks, we are already + // covered by the exhaustive cut generator. + if (residual_tasks.size() <= kCtExhaustiveTargetSize) continue; + // Best cut so far for this loop. int best_end = -1; double best_efficacy = 0.01; @@ -1676,9 +1716,8 @@ void GenerateCompletionTimeCutsWithEnergy( AddEventDemandsToCapacitySubsetSum(event, assignment, capacity_max, tmp_possible_demands, dp); - // This is competing with the brute force approach. Skip cases covered - // by the other code. - if (skip_low_sizes && i < 7) continue; + // Ignore cuts covered by the exhaustive cut generator. + if (i < kCtExhaustiveTargetSize) continue; const IntegerValue reachable_capacity = dp.CurrentMax(); @@ -1769,18 +1808,15 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( CtExhaustiveHelper helper; helper.Init(events, model); - const std::string mirror_str = time_is_forward ? "" : "_mirror"; if (!GenerateShortCompletionTimeCutsWithExactBound( - absl::StrCat("NoOverlapCompletionTimeExhaustive", mirror_str), - events, + "NoOverlapCompletionTimeExhaustive", events, /*capacity_max=*/IntegerValue(1), helper, model, manager)) { return false; } GenerateCompletionTimeCutsWithEnergy( - absl::StrCat("NoOverlapCompletionTimeQueyrane", mirror_str), - std::move(events), /*capacity_max=*/IntegerValue(1), - /*skip_low_sizes=*/true, model, manager); + "NoOverlapCompletionTimeQueyrane", std::move(events), + /*capacity_max=*/IntegerValue(1), model, manager); return true; }; if (!generate_cuts(/*time_is_forward=*/true)) return false; @@ -1835,17 +1871,15 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( helper.Init(events, model); const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - const std::string mirror_str = time_is_forward ? "" : "_mirror"; if (!GenerateShortCompletionTimeCutsWithExactBound( - absl::StrCat("CumulativeCompletionTimeExhaustive", mirror_str), - events, capacity_max, helper, model, manager)) { + "CumulativeCompletionTimeExhaustive", events, capacity_max, + helper, model, manager)) { return false; } - GenerateCompletionTimeCutsWithEnergy( - absl::StrCat("CumulativeCompletionTimeQueyrane", mirror_str), - std::move(events), capacity_max, - /*skip_low_sizes=*/true, model, manager); + GenerateCompletionTimeCutsWithEnergy("CumulativeCompletionTimeQueyrane", + std::move(events), capacity_max, + model, manager); return true; }; diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index a1fbd5a61c..920f5a23e6 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -152,6 +152,8 @@ struct CompletionTimeEvent { class CtExhaustiveHelper { public: + CtExhaustiveHelper() = default; + int max_task_index() const { return max_task_index_; } const CompactVectorVector& predecessors() const { return predecessors_; } @@ -159,6 +161,9 @@ class CtExhaustiveHelper { std::vector> profile_; std::vector> new_profile_; std::vector assigned_ends_; + std::vector task_to_index_; + DagTopologicalSortIterator valid_permutation_iterator_; + std::vector residual_events_; // Collect precedences, set max_task_index. // TODO(user): Do some transitive closure. @@ -174,6 +179,27 @@ class CtExhaustiveHelper { std::vector visited_; }; +enum class CompletionTimeExplorationStatus { + FINISHED, + ABORTED, + NO_VALID_PERMUTATION, +}; + +template +void AbslStringify(Sink& sink, const CompletionTimeExplorationStatus& status) { + switch (status) { + case CompletionTimeExplorationStatus::FINISHED: + sink.Append("FINISHED"); + break; + case CompletionTimeExplorationStatus::ABORTED: + sink.Append("ABORTED"); + break; + case CompletionTimeExplorationStatus::NO_VALID_PERMUTATION: + sink.Append("NO_VALID_PERMUTATION"); + break; + } +} + // Computes the minimum sum of the end min and the minimum sum of the end min // weighted by weight of all events. It returns false if no permutation is // valid w.r.t. the range of starts. @@ -182,11 +208,12 @@ class CtExhaustiveHelper { // small, like <= 10. They should also starts in index order. // // Optim: If both sums are proven <= to the corresponding threshold, we abort. -bool ComputeMinSumOfWeightedEndMins( +CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( absl::Span events, IntegerValue capacity_max, double unweighted_threshold, double weighted_threshold, CtExhaustiveHelper& helper, double& min_sum_of_ends, - double& min_sum_of_weighted_ends, bool& cut_use_precedences); + double& min_sum_of_weighted_ends, bool& cut_use_precedences, + int& exploration_credit); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index f434e296de..543bafd019 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -411,9 +411,12 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3) { CtExhaustiveHelper ct_helper; ct_helper.Init(events, &model); bool cut_use_precedences = false; - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( - events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, - min_sum_of_weighted_end_mins, cut_use_precedences)); + int exploration_credit = 1000; + ASSERT_EQ(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences, + exploration_credit), + CompletionTimeExplorationStatus::FINISHED); EXPECT_EQ(min_sum_of_end_mins, 17); EXPECT_EQ(min_sum_of_weighted_end_mins, 86); EXPECT_FALSE(cut_use_precedences); @@ -460,13 +463,67 @@ TEST(ComputeMinSumOfEndMinsTest, CombinationOf3ConstraintStart) { CtExhaustiveHelper ct_helper; ct_helper.Init(events, &model); bool cut_use_precedences = false; - ASSERT_TRUE(ComputeMinSumOfWeightedEndMins( - events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, - min_sum_of_weighted_end_mins, cut_use_precedences)); + int exploration_credit = 1000; + + ASSERT_EQ(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences, + exploration_credit), + CompletionTimeExplorationStatus::FINISHED); EXPECT_EQ(min_sum_of_end_mins, 18); EXPECT_EQ(min_sum_of_weighted_end_mins, 86); } +TEST(ComputeMinSumOfEndMinsTest, Abort) { + Model model; + auto* intervals_repository = model.GetOrCreate(); + + IntegerValue one(1); + IntegerValue two(2); + + const IntegerVariable start1 = model.Add(NewIntegerVariable(0, 3)); + const IntegerValue size1(3); + const IntervalVariable i1 = intervals_repository->CreateInterval( + start1, AffineExpression(start1, one, size1), size1, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start2 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size2(4); + const IntervalVariable i2 = intervals_repository->CreateInterval( + start2, AffineExpression(start2, one, size2), size2, kNoLiteralIndex, + /*add_linear_relation=*/false); + + const IntegerVariable start3 = model.Add(NewIntegerVariable(0, 10)); + const IntegerValue size3(5); + const IntervalVariable i3 = intervals_repository->CreateInterval( + start3, AffineExpression(start3, one, size3), size3, kNoLiteralIndex, + /*add_linear_relation=*/false); + + SchedulingConstraintHelper* helper = + model.GetOrCreate()->GetOrCreateHelper({i1, i2, i3}); + SchedulingDemandHelper* demands_helper = + new SchedulingDemandHelper({two, one, one}, helper, &model); + model.TakeOwnership(demands_helper); + + CompletionTimeEvent e1(0, helper, demands_helper); + CompletionTimeEvent e2(1, helper, demands_helper); + CompletionTimeEvent e3(2, helper, demands_helper); + const std::vector events = {e1, e2, e3}; + + double min_sum_of_end_mins = 0; + double min_sum_of_weighted_end_mins = 0; + CtExhaustiveHelper ct_helper; + ct_helper.Init(events, &model); + bool cut_use_precedences = false; + int exploration_credit = 2; + + ASSERT_EQ(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences, + exploration_credit), + CompletionTimeExplorationStatus::ABORTED); +} + TEST(ComputeMinSumOfEndMinsTest, Infeasible) { Model model; auto* intervals_repository = model.GetOrCreate(); @@ -508,9 +565,12 @@ TEST(ComputeMinSumOfEndMinsTest, Infeasible) { CtExhaustiveHelper ct_helper; ct_helper.Init(events, &model); bool cut_use_precedences = false; - ASSERT_FALSE(ComputeMinSumOfWeightedEndMins( - events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, - min_sum_of_weighted_end_mins, cut_use_precedences)); + int exploration_credit = 1000; + ASSERT_EQ(ComputeMinSumOfWeightedEndMins( + events, two, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences, + exploration_credit), + CompletionTimeExplorationStatus::NO_VALID_PERMUTATION); } double ExactMakespan(absl::Span sizes, std::vector& demands, @@ -570,15 +630,18 @@ double ExactMakespanBruteForce(absl::Span sizes, CtExhaustiveHelper ct_helper; ct_helper.Init(events, &model); bool cut_use_precedences = false; - EXPECT_TRUE(ComputeMinSumOfWeightedEndMins( - events, capacity, 0.01, 0.01, ct_helper, min_sum_of_end_mins, - min_sum_of_weighted_end_mins, cut_use_precedences)); + int exploration_credit = 10000; + EXPECT_EQ(ComputeMinSumOfWeightedEndMins( + events, capacity, 0.01, 0.01, ct_helper, min_sum_of_end_mins, + min_sum_of_weighted_end_mins, cut_use_precedences, + exploration_credit), + CompletionTimeExplorationStatus::FINISHED); return min_sum_of_end_mins; } TEST(ComputeMinSumOfEndMinsTest, RandomCases) { absl::BitGen random; - const int kNumTests = DEBUG_MODE ? 100 : 1000; + const int kNumTests = DEBUG_MODE ? 50 : 500; const int kNumTasks = 7; for (int loop = 0; loop < kNumTests; ++loop) { const int capacity = absl::Uniform(random, 10, 30); diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index bc596380ec..69d0bad256 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -346,26 +346,20 @@ IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( int a, int b, bool add_reason_if_after) { const AffineExpression before = ends_[a]; const AffineExpression after = starts_[b]; - if (before.var == kNoIntegerVariable || before.coeff != 1 || - after.var == kNoIntegerVariable || after.coeff != 1) { - return kMinIntegerValue; - } + LinearExpression2 expr(before.var, after.var, before.coeff, -after.coeff); - // We take the max of the level zero offset and the one coming from a - // conditional precedence at true. - const IntegerValue conditional_offset = - precedence_relations_->GetConditionalOffset(before.var, after.var); - const IntegerValue known = integer_trail_->LevelZeroLowerBound(after.var) - - integer_trail_->LevelZeroUpperBound(before.var); - const IntegerValue offset = std::max(conditional_offset, known); + // We take the min of the level zero (end_a - start_b) and the one coming from + // a conditional precedence at true. + const IntegerValue conditional_ub = precedence_relations_->UpperBound(expr); + const IntegerValue level_zero_ub = integer_trail_->LevelZeroUpperBound(expr); + const IntegerValue expr_ub = std::min(conditional_ub, level_zero_ub); const IntegerValue needed_offset = before.constant - after.constant; - const IntegerValue distance = offset - needed_offset; - if (add_reason_if_after && distance >= 0 && known < conditional_offset) { - for (const Literal l : precedence_relations_->GetConditionalEnforcements( - before.var, after.var)) { - literal_reason_.push_back(l.Negated()); - } + const IntegerValue ub_of_end_minus_start = expr_ub + needed_offset; + const IntegerValue distance = -ub_of_end_minus_start; + if (add_reason_if_after && distance >= 0 && level_zero_ub > conditional_ub) { + precedence_relations_->AddReasonForUpperBoundLowerThan( + expr, conditional_ub, MutableLiteralReason(), MutableIntegerReason()); } return distance; } @@ -394,7 +388,9 @@ bool SchedulingConstraintHelper::PropagatePrecedence(int a, int b) { } } const IntegerValue offset = before.constant - after.constant; - if (precedence_relations_->Add(before.var, after.var, offset)) { + const LinearExpression2 expr = + LinearExpression2::Difference(before.var, after.var); + if (precedence_relations_->AddUpperBound(expr, -offset)) { VLOG(2) << "new relation " << TaskDebugString(a) << " <= " << TaskDebugString(b); if (before.var == NegationOf(after.var)) { diff --git a/ortools/sat/simplification.cc b/ortools/sat/simplification.cc index 12497a1e91..590b2f1be1 100644 --- a/ortools/sat/simplification.cc +++ b/ortools/sat/simplification.cc @@ -231,6 +231,18 @@ void SatPresolver::SetNumVariables(int num_variables) { } } +void SatPresolver::RebuildLiteralToClauses() { + const int size = literal_to_clauses_.size(); + literal_to_clauses_.clear(); + literal_to_clauses_.resize(size); + for (ClauseIndex ci(0); ci < clauses_.size(); ++ci) { + for (const Literal lit : clauses_[ci]) { + literal_to_clauses_[lit].push_back(ci); + } + } + num_deleted_literals_since_last_cleanup_ = 0; +} + void SatPresolver::AddClauseInternal(std::vector* clause) { if (drat_proof_handler_ != nullptr) drat_proof_handler_->AddClause(*clause); @@ -365,6 +377,10 @@ bool SatPresolver::Presolve(const std::vector& can_be_removed) { if (!can_be_removed[var.value()]) continue; if (CrossProduct(Literal(var, true))) { if (!ProcessAllClauses()) return false; + + if (num_deleted_literals_since_last_cleanup_ > 1e7) { + RebuildLiteralToClauses(); + } } if (time_limit_ != nullptr && time_limit_->LimitReached()) return true; if (num_inspected_signatures_ + num_inspected_literals_ > 1e9) return true; @@ -702,9 +718,18 @@ bool SatPresolver::ProcessClauseToSimplifyOthers(ClauseIndex clause_index) { return true; } -void SatPresolver::RemoveAndRegisterForPostsolveAllClauseContaining(Literal x) { - for (ClauseIndex i : literal_to_clauses_[x]) { - if (!clauses_[i].empty()) RemoveAndRegisterForPostsolve(i, x); +void SatPresolver::RemoveAllClauseContaining(Literal x, + bool register_for_postsolve) { + if (register_for_postsolve) { + for (ClauseIndex i : literal_to_clauses_[x]) { + if (!clauses_[i].empty()) { + RemoveAndRegisterForPostsolve(i, x); + } + } + } else { + for (ClauseIndex i : literal_to_clauses_[x]) { + if (!clauses_[i].empty()) Remove(i); + } } gtl::STLClearObject(&literal_to_clauses_[x]); literal_to_clause_sizes_[x] = 0; @@ -725,18 +750,24 @@ bool SatPresolver::CrossProduct(Literal x) { } // Compute the threshold under which we don't remove x.Variable(). - int threshold = 0; + int num_clauses = 0; + int64_t sum_for_x = 0; + int64_t sum_for_not_x = 0; const int clause_weight = parameters_.presolve_bve_clause_weight(); for (ClauseIndex i : literal_to_clauses_[x]) { if (!clauses_[i].empty()) { - threshold += clause_weight + clauses_[i].size(); + ++num_clauses; + sum_for_x += clauses_[i].size(); } } for (ClauseIndex i : literal_to_clauses_[x.NegatedIndex()]) { if (!clauses_[i].empty()) { - threshold += clause_weight + clauses_[i].size(); + ++num_clauses; + sum_for_not_x += clauses_[i].size(); } } + const int64_t threshold = + clause_weight * num_clauses + sum_for_x + sum_for_not_x; // For the BCE, we prefer s2 to be small. if (s1 < s2) x = x.Negated(); @@ -792,8 +823,17 @@ bool SatPresolver::CrossProduct(Literal x) { // // TODO(user): We could only update the priority queue once for each variable // instead of doing it many times. - RemoveAndRegisterForPostsolveAllClauseContaining(x); - RemoveAndRegisterForPostsolveAllClauseContaining(x.Negated()); + bool push_x_for_postsolve = true; + bool push_not_x_for_postsolve = true; + if (parameters_.filter_sat_postsolve_clauses()) { + if (sum_for_x <= sum_for_not_x) { + push_not_x_for_postsolve = false; + } else { + push_x_for_postsolve = false; + } + } + RemoveAllClauseContaining(x, push_x_for_postsolve); + RemoveAllClauseContaining(x.Negated(), push_not_x_for_postsolve); // TODO(user): At this point x.Variable() is added back to the priority queue. // Avoid doing that. @@ -801,8 +841,9 @@ bool SatPresolver::CrossProduct(Literal x) { } void SatPresolver::Remove(ClauseIndex ci) { + num_deleted_literals_since_last_cleanup_ += clauses_[ci].size(); signatures_[ci] = 0; - for (Literal e : clauses_[ci]) { + for (const Literal e : clauses_[ci]) { literal_to_clause_sizes_[e]--; UpdatePriorityQueue(e.Variable()); UpdateBvaPriorityQueue(Literal(e.Variable(), true).Index()); diff --git a/ortools/sat/simplification.h b/ortools/sat/simplification.h index 75627dd2f1..1f29de687d 100644 --- a/ortools/sat/simplification.h +++ b/ortools/sat/simplification.h @@ -244,10 +244,14 @@ class SatPresolver { // after this call. void AddClauseInternal(std::vector* clause); + // Since we only cleanup the list lazily, literal_to_clauses_ memory usage + // can get out of hand, we clean it up periodically. + void RebuildLiteralToClauses(); + // Clause removal function. void Remove(ClauseIndex ci); void RemoveAndRegisterForPostsolve(ClauseIndex ci, Literal x); - void RemoveAndRegisterForPostsolveAllClauseContaining(Literal x); + void RemoveAllClauseContaining(Literal x, bool register_for_postsolve); // Call ProcessClauseToSimplifyOthers() on all the clauses in // clause_to_process_ and empty the list afterwards. Note that while some @@ -355,6 +359,10 @@ class SatPresolver { // Occurrence list. For each literal, contains the ClauseIndex of the clause // that contains it (ordered by clause index). + // + // This is cleaned up lazily, or when num_deleted_literals_since_last_cleanup_ + // becomes big. + int64_t num_deleted_literals_since_last_cleanup_ = 0; util_intops::StrongVector> literal_to_clauses_; diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index e3cd1cd66d..a4272ca360 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -164,7 +164,7 @@ void SharedResponseManager::LogMessage(absl::string_view prefix, } void SharedResponseManager::LogMessageWithThrottling( - const std::string& prefix, const std::string& message) { + absl::string_view prefix, absl::string_view message) { absl::MutexLock mutex_lock(&mutex_); int id; diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index fe6a22fdf8..c6eadff080 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -399,8 +399,8 @@ class SharedResponseManager { // Wrapper around our SolverLogger, but protected by mutex. void LogMessage(absl::string_view prefix, absl::string_view message); - void LogMessageWithThrottling(const std::string& prefix, - const std::string& message); + void LogMessageWithThrottling(absl::string_view prefix, + absl::string_view message); bool LoggingIsEnabled() const; void AppendResponseToBeMerged(const CpSolverResponse& response); diff --git a/ortools/sat/util.h b/ortools/sat/util.h index ea18680bc4..4c2a26b9f3 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -26,6 +26,7 @@ #include #include +#include "absl/base/attributes.h" #include "absl/container/btree_set.h" #include "absl/log/check.h" #include "absl/log/log_streamer.h" @@ -310,7 +311,7 @@ bool LinearInequalityCanBeReducedWithClosestMultiple( // The model "singleton" random engine used in the solver. // // In test, we usually set use_absl_random() so that the sequence is changed at -// each invocation. This way, clients do not relly on the wrong assumption that +// each invocation. This way, clients do not really on the wrong assumption that // a particular optimal solution will be returned if they are many equivalent // ones. class ModelRandomGenerator : public absl::BitGenRef { @@ -979,6 +980,196 @@ inline void CompactVectorVector::ResetFromTranspose( starts_[0] = 0; } +// A class to generate all possible topological sorting of a dag. +// +// If the graph has no edges, it will generate all possible permutations. +// +// If the graph has edges, it will generate all possible permutations of the +// dag that are a topological sorting of the graph. +// +// The class maintains 5 fields: +// - graph_: a vector of vectors, where graph_[i] contains the list of +// elements that are adjacent to element i. +// - size_: the size of the graph. +// +// - missing_parent_numbers_: a vector of integers, where +// missing_parent_numbers_[i] is the number of parents of element i that are +// not yet in permutation_. Before Init() is called, no element is yet in +// permutation_ so that it is the number of parents of i. After Init(), and +// before Increase() returns true, it is always 0 (except during the +// execution of Increase(), see below). +// +// - permutation_: a vector of integers, that after Init() is called, and +// before Increase() returns false, it is a topological sorting of the graph +// (except during the execution of Increase()). +// - element_original_position_: a vector of integers, where +// element_original_position_[i] is the original position of element i in the +// permutation_. See the algorithm below for more details. + +class DagTopologicalSortIterator { + public: + // Graph maps indices to their children. Any children must exist. + DagTopologicalSortIterator() : size_(0) {} + explicit DagTopologicalSortIterator(int size) : size_(size) { Reset(size); } + + void Reset(int size) { + size_ = size; + graph_.assign(size, {}); + missing_parent_numbers_.assign(size, 0); + permutation_.clear(); + element_original_position_.assign(size, 0); + } + + // Must be called before Init(). + void AddArc(int from, int to) { + DCHECK_GE(from, 0); + DCHECK_LT(from, size_); + DCHECK_GE(to, 0); + DCHECK_LT(to, size_); + graph_[from].push_back(to); + missing_parent_numbers_[to]++; + } + + // To describe the algorithm in Increase() and Init(), we consider the + // following invariant, called Invariant(pos) for a position pos in [0, + // size_): + // 1. permutations_[0], ..., permutations_[pos] form a prefix of a + // topological ordering of the graph; + // 2. permutations_[pos + 1], ..., permutations_.back() are all other + // elements that have all their parents in permutations_[0], ..., + // permutations_[pos], ordered lexicographically by the index of their + // last parent in permutations_[0], ... permutations_[pos] and then by + // their index in the graph; + // 3. missing_parent_numbers_[i] is the number of parents of element i that + // are not in {permutations_[0], ..., permutations_[pos]}. + // 4. element_original_position_[i] is the original position of element i of + // the permutation following the order described in 2. In particular, + // element_original_position_[i] = i for i > pos. + // Set and Unset maintain these invariants. + + // Precondition: Invariant(size_ - 1) holds. + // Postcondition: Invariant(size_ - 1) holds if Increase() returns true. + // If Increase() returns false, all topological orderings of the graph have + // been generated and the state of permutation_ is not specified.. + bool Increase() { + Unset(size_ - 1); + for (int pos = size_ - 2; pos >= 0; --pos) { + // Invariant(pos) holds. + // Increasing logic: once permutation_[pos] has been put back to its + // original position by Unset(pos), elements permutations_[pos], ..., + // permutations_.back() are in their original ordering, in particular in + // the same order as last time the iteration on permutation_[pos] + // occurred (according to Invariant(pos).2, these are exactly the elements + // that have to be tried at pos). + // All possibilities in permutations_[pos], ..., + // permutations_[element_original_position_[pos]] have been run through. + // The next to test is permutations_[element_original_position_[pos] + 1]. + const int k = element_original_position_[pos] + 1; + Unset(pos); + // Invariant(pos - 1) holds. + + // No more elements to iterate on at position pos. Go backwards one + // position to increase that one. + if (k == permutation_.size()) continue; + Set(pos, k); + // Invariant(pos) holds. + for (++pos; pos < size_; ++pos) { + // Invariant(pos - 1) holds. + // According to Invariant(pos - 1).2, if pos >= permutation_.size(), + // there are no more elements we can add to the permutation which means + // that we detected a cycle. It would be a bug as we would have detected + // it in Init(). + CHECK_LT(pos, permutation_.size()) << "Cycle detected"; + // According to Invariant(pos - 1).2, elements that can be used at pos + // are permutations_[pos], ..., permutations_.back(). Starts the + // iteration at permutations_[pos]. + Set(pos, pos); + // Invariant(pos) holds. + } + // Invariant(size_ - 1) holds. + return true; + } + return false; + } + + // Must be called before Increase(). + ABSL_MUST_USE_RESULT bool Init() { + for (int i = 0; i < size_; ++i) { + if (missing_parent_numbers_[i] == 0) { + permutation_.push_back(i); + } + } + for (int pos = 0; pos < size_; ++pos) { + // Invariant(pos - 1) holds. + // According to Invariant(pos - 1).2, if pos >= permutation_.size(), + // there are no more elements we can add to the permutation. + if (pos >= permutation_.size()) return false; + // According to Invariant(pos - 1).2, elements that can be used at pos + // are permutations_[pos], ..., permutations_.back(). Starts the + // iteration at permutations_[pos]. + Set(pos, pos); + // Invariant(pos) holds. + } + // Invariant(pos - 1) hold. We have a permutation. + return true; + } + + const std::vector& permutation() const { return permutation_; } + + private: + // Graph maps indices to their children. Children must be in [0, size_). + std::vector> graph_; + // Number of elements in graph_. + int size_; + // For each element in graph_, the number of parents it has that are not yet + // in permutation_. In particular, it is always 0 when Init has been called + // and when Increase is not in progress (and has not yet returned false). + std::vector missing_parent_numbers_; + // The current permutation. It is ensured to be a topological sorting of the + // graph once Init has been called and Increase has not yet returned false. + std::vector permutation_; + // Keeps track of the original position of the element in permutation_[i]. See + // the comment above the class for the detailed algorithm. + std::vector element_original_position_; + + // Unset the element at pos. + // + // - Precondition: Invariant(pos) holds. + // - Postcondition: Invariant(pos - 1) holds. + void Unset(int pos) { + const int n = permutation_[pos]; + // Before the loop: Invariant(pos).2 and Invariant(pos).3 hold. + // After the swap below: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. + for (const int c : graph_[n]) { + if (missing_parent_numbers_[c] == 0) permutation_.pop_back(); + ++missing_parent_numbers_[c]; + } + std::swap(permutation_[element_original_position_[pos]], permutation_[pos]); + // Invariant(pos).4 -> Invariant(pos - 1).4. + element_original_position_[pos] = pos; + } + + // Set the element at pos to the element at k. + // + // - Precondition: Invariant(pos - 1) holds and k in [pos, + // permutation_.size()). + // - Postcondition: Invariant(pos) holds and permutation_[pos] has been + // swapped with permutation_[k]. + void Set(int pos, int k) { + int n = permutation_[k]; + // Before the loop: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. + // After the loop: Invariant(pos).2 and Invariant(pos).3 hold. + for (int c : graph_[n]) { + --missing_parent_numbers_[c]; + if (missing_parent_numbers_[c] == 0) permutation_.push_back(c); + } + // Invariant(pos - 1).1 -> Invariant(pos).1. + std::swap(permutation_[k], permutation_[pos]); + // Invariant(pos - 1).4 -> Invariant(pos).4. + element_original_position_[pos] = k; + } +}; + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/util_test.cc b/ortools/sat/util_test.cc index 17a06e893c..1e06fbfb2d 100644 --- a/ortools/sat/util_test.cc +++ b/ortools/sat/util_test.cc @@ -23,9 +23,11 @@ #include #include #include +#include #include #include "absl/container/btree_set.h" +#include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/numeric/int128.h" #include "absl/random/random.h" @@ -150,7 +152,7 @@ TEST(CompactVectorVectorTest, ResetFromTranspose) { EXPECT_THAT(transpose[4], ElementsAre(2)); EXPECT_THAT(transpose[5], ElementsAre(0)); - // Note that retransposing sorts ! + // Note that re-transposing sorts ! CompactVectorVector second_transpose; second_transpose.ResetFromTranspose(transpose); @@ -1065,6 +1067,105 @@ TEST(MaxBoundedSubsetSumExactTest, CornerCase) { EXPECT_EQ(helper.MaxSubsetSum({0, 5, 6}, 4), 0); } +TEST(DagTopologicalSortIteratorTest, GenerateValidPermutations) { + DagTopologicalSortIterator dag_iterator(6); + dag_iterator.AddArc(5, 2); + dag_iterator.AddArc(5, 0); + dag_iterator.AddArc(4, 0); + dag_iterator.AddArc(4, 1); + dag_iterator.AddArc(2, 3); + dag_iterator.AddArc(3, 1); + EXPECT_TRUE(dag_iterator.Init()); + int count = 0; + do { + ++count; + } while (dag_iterator.Increase()); + EXPECT_EQ(count, 13); +} + +TEST(DagTopologicalSortIteratorTest, GenerateAllPermutations) { + DagTopologicalSortIterator dag_iterator(6); + EXPECT_TRUE(dag_iterator.Init()); + int count = 0; + do { + ++count; + } while (dag_iterator.Increase()); + EXPECT_EQ(count, 720); +} + +TEST(DagTopologicalSortIteratorTest, OnePrecedence) { + DagTopologicalSortIterator dag_iterator(6); + dag_iterator.AddArc(5, 2); + EXPECT_TRUE(dag_iterator.Init()); + int count = 0; + do { + ++count; + } while (dag_iterator.Increase()); + EXPECT_EQ(count, 360); +} + +TEST(DagTopologicalSortIteratorTest, ReversePrecedence) { + DagTopologicalSortIterator dag_iterator(6); + dag_iterator.AddArc(2, 5); + EXPECT_TRUE(dag_iterator.Init()); + int count = 0; + do { + ++count; + } while (dag_iterator.Increase()); + EXPECT_EQ(count, 360); +} + +TEST(DagTopologicalSortIteratorTest, RandomTest) { + absl::BitGen random; + for (int i = 0; i < 5000; ++i) { + DagTopologicalSortIterator dag_iterator(6); + + const int num_arcs = absl::Uniform(random, 1, 10); + absl::flat_hash_set> arcs; + while (arcs.size() < num_arcs) { + const int from = absl::Uniform(random, 0, 6); + int to = absl::Uniform(random, 0, 5); + if (from == to) ++to; + if (arcs.insert({from, to}).second) { + dag_iterator.AddArc(from, to); + } + } + + absl::flat_hash_set> iterator_solutions; + int count_iterator = 0; + if (dag_iterator.Init()) { + do { + ++count_iterator; + iterator_solutions.insert(dag_iterator.permutation()); + } while (dag_iterator.Increase()); + } + + std::vector permutation = {0, 1, 2, 3, 4, 5}; + absl::flat_hash_set> permutation_solutions; + int count_permutation = 0; + do { + bool ok = true; + for (int i = 1; i < permutation.size(); ++i) { + if (!ok) break; + const int after = permutation[i]; + for (int j = 0; j < i; ++j) { + const int before = permutation[j]; + if (arcs.contains({after, before})) { + ok = false; + break; + } + } + } + if (ok) { + ++count_permutation; + permutation_solutions.insert(permutation); + } + } while (std::next_permutation(permutation.begin(), permutation.end())); + EXPECT_EQ(count_permutation, count_iterator); + EXPECT_EQ(iterator_solutions, permutation_solutions); + } +} + } // namespace } // namespace sat } // namespace operations_research From de3d0b452a96488d9246d9cfa63fd2c3282193e6 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 27 May 2025 13:49:48 +0200 Subject: [PATCH 037/509] inprove code --- ortools/util/fp_utils.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index 4590ce1167..a5e6cec283 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -163,7 +163,8 @@ bool AreWithinAbsoluteTolerance(FloatType x, FloatType y, template bool IsSmallerWithinTolerance(FloatType x, FloatType y, FloatType tolerance) { if (IsPositiveOrNegativeInfinity(y)) return x <= y; - return x <= y + tolerance * std::max(1.0, std::min(std::abs(x), std::abs(y))); + return x <= y + tolerance * std::max(FloatType(1.0), + std::min(std::abs(x), std::abs(y))); } // Returns true if x is within tolerance of any integer. Always returns From 947a211284fda521c4325bc599e053319d885d7b Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 27 May 2025 14:26:36 +0200 Subject: [PATCH 038/509] bazel: cleanup cc_binary load --- ortools/sat/BUILD.bazel | 2 +- ortools/set_cover/BUILD.bazel | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 56eb67a470..44c42cfcd1 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -17,8 +17,8 @@ load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@protobuf//bazel:java_proto_library.bzl", "java_proto_library") load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") load("@rules_go//proto:def.bzl", "go_proto_library") -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") package(default_visibility = ["//visibility:public"]) diff --git a/ortools/set_cover/BUILD.bazel b/ortools/set_cover/BUILD.bazel index 00386cafe3..c38cf696ad 100644 --- a/ortools/set_cover/BUILD.bazel +++ b/ortools/set_cover/BUILD.bazel @@ -14,7 +14,7 @@ load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") +load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") package(default_visibility = ["//visibility:public"]) From 77e8bb8e65fce5e910bf4c1d335347a4750928b3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 27 May 2025 16:41:13 +0200 Subject: [PATCH 039/509] lp_data: Fix compilation (#4662) --- ortools/lp_data/lp_parser.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ortools/lp_data/lp_parser.cc b/ortools/lp_data/lp_parser.cc index f41e28626a..c8654c49e1 100644 --- a/ortools/lp_data/lp_parser.cc +++ b/ortools/lp_data/lp_parser.cc @@ -237,6 +237,9 @@ bool LPParser::ParseConstraint(StringPiece constraint) { namespace { +template +constexpr bool dependent_false = false; // workaround before CWG2518/P2593R1 + template bool SimpleAtoFractional(absl::string_view str, T* value) { if constexpr (std::is_same_v) { @@ -244,7 +247,7 @@ bool SimpleAtoFractional(absl::string_view str, T* value) { } else if constexpr (std::is_same_v) { return absl::SimpleAtof(str, value); } else { - static_assert(false, "Unsupported fractional type"); + static_assert(dependent_false, "Unsupported fractional type"); return false; } } From 5b1da719d7588ec758485fb3c4ccb496af3ac75d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 09:50:18 +0200 Subject: [PATCH 040/509] make: Fix archive_cpp target --- makefiles/Makefile.cpp.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefiles/Makefile.cpp.mk b/makefiles/Makefile.cpp.mk index 05fa5692fe..3d3e656ba0 100644 --- a/makefiles/Makefile.cpp.mk +++ b/makefiles/Makefile.cpp.mk @@ -249,7 +249,7 @@ else endif endef -CPP_SAMPLES := algorithms graph glop constraint_solver linear_solver math_opt model_builder pdlp routing sat +CPP_SAMPLES := algorithms graph glop constraint_solver linear_solver math_opt model_builder pdlp routing sat set_cover $(foreach sample,$(CPP_SAMPLES),$(eval $(call cpp-sample-target,$(sample)))) # Examples From 92b92f41b9d32ae29ff79c344a8c299c7a7c0300 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 09:51:09 +0200 Subject: [PATCH 041/509] cmake: log Python executable path at configure --- cmake/dependencies/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index d0f14a71a0..bdd1d4362c 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -194,6 +194,7 @@ endif() if(BUILD_PYTHON) # Find Python 3 find_package(Python3 REQUIRED COMPONENTS Interpreter Development.Module) + message(STATUS "Python: ${Python3_EXECUTABLE}") if(BUILD_pybind11) message(CHECK_START "Fetching pybind11") From 9b18eff9ec4fc4af7f189d5dd44360dcc4433e96 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 10:11:38 +0200 Subject: [PATCH 042/509] make: fix archive_python target --- makefiles/Makefile.python.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefiles/Makefile.python.mk b/makefiles/Makefile.python.mk index 4922f5b926..161abfa9ed 100644 --- a/makefiles/Makefile.python.mk +++ b/makefiles/Makefile.python.mk @@ -81,7 +81,7 @@ rpy_%: \ "$(PYTHON_EXECUTABLE)" ortools$S$1$Ssamples$S$$*.py $(ARGS) endef -PYTHON_SAMPLES := algorithms graph constraint_solver linear_solver math_opt pdlp routing sat +PYTHON_SAMPLES := algorithms graph constraint_solver linear_solver math_opt pdlp routing sat set_cover $(foreach sample,$(PYTHON_SAMPLES),$(eval $(call python-sample-target,$(sample)))) # Examples From ac26215f36fa90082661f76ea0e78b00a3117211 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 11:10:26 +0200 Subject: [PATCH 043/509] tools/docker: Add Fedora-42 --- tools/docker/Makefile | 3 +- tools/docker/images/fedora-42.Dockerfile | 102 ++++++++++++++++++ tools/docker/test/fedora-42/cpp.Dockerfile | 14 +++ tools/docker/test/fedora-42/dotnet.Dockerfile | 22 ++++ tools/docker/test/fedora-42/java.Dockerfile | 20 ++++ tools/docker/test/fedora-42/python.Dockerfile | 14 +++ 6 files changed, 174 insertions(+), 1 deletion(-) create mode 100644 tools/docker/images/fedora-42.Dockerfile create mode 100644 tools/docker/test/fedora-42/cpp.Dockerfile create mode 100644 tools/docker/test/fedora-42/dotnet.Dockerfile create mode 100644 tools/docker/test/fedora-42/java.Dockerfile create mode 100644 tools/docker/test/fedora-42/python.Dockerfile diff --git a/tools/docker/Makefile b/tools/docker/Makefile index dbc2101ee9..58a946d600 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -96,6 +96,7 @@ help: # @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" @echo -e "\t\t${BOLD}debian-12${RESET} (Bookworm)" @echo -e "\t\t${BOLD}debian-11${RESET} (Bullseye)" + @echo -e "\t\t${BOLD}fedora-42${RESET}" @echo -e "\t\t${BOLD}fedora-41${RESET}" @echo -e "\t\t${BOLD}fedora-40${RESET}" @echo -e "\t\t${BOLD}opensuse-leap${RESET} (latest)" @@ -431,7 +432,7 @@ DISTROS := \ alpine-edge \ archlinux \ debian-11 debian-12 debian-sid \ - fedora-40 fedora-41 \ + fedora-40 fedora-41 fedora-42 \ opensuse-leap \ rockylinux-9 \ ubuntu-20.04 ubuntu-22.04 ubuntu-24.04 ubuntu-24.10 diff --git a/tools/docker/images/fedora-42.Dockerfile b/tools/docker/images/fedora-42.Dockerfile new file mode 100644 index 0000000000..33a8524552 --- /dev/null +++ b/tools/docker/images/fedora-42.Dockerfile @@ -0,0 +1,102 @@ +# ref: https://hub.docker.com/_/fedora +FROM fedora:42 AS env + +############# +## SETUP ## +############# +RUN dnf -y update \ +&& dnf -y install git \ + wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ +&& dnf -y install @development-tools \ +&& dnf -y install gcc-c++ cmake \ +&& dnf clean all +ENTRYPOINT ["/usr/bin/bash", "-c"] +CMD ["/usr/bin/bash"] + +# Install SWIG +RUN dnf -y update \ +&& dnf -y install swig \ +&& dnf clean all + +# Install .Net +# see: https://docs.microsoft.com/en-us/dotnet/core/install/linux-fedora +RUN dnf -y update \ +&& dnf -y install dotnet-sdk-8.0 crypto-policies-scripts \ +&& dnf clean all +# https://docs.redhat.com/en/documentation/net/6.0/html/release_notes_for_.net_6.0_rpm_packages/known-issues_release-notes-for-dotnet-rpms +RUN update-crypto-policies --set DEFAULT:SHA1 +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +# Install Java +RUN dnf -y update \ +&& dnf -y install java-11-openjdk java-11-openjdk-devel maven \ +&& dnf clean all +ENV JAVA_HOME=/usr/lib/jvm/java-openjdk + +# Install Python +RUN dnf -y update \ +&& dnf -y install python3 python3-devel python3-pip \ +&& dnf clean all +RUN python3 -m pip install absl-py mypy mypy-protobuf + +################ +## OR-TOOLS ## +################ +FROM env AS devel +WORKDIR /root +# Copy the snk key +COPY or-tools.snk /root/or-tools.snk +ENV DOTNET_SNK=/root/or-tools.snk + +ARG SRC_GIT_BRANCH +ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ARG SRC_GIT_SHA1 +ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} + +ARG OR_TOOLS_PATCH +ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} + +# Download sources +# use SRC_GIT_SHA1 to modify the command +# i.e. avoid docker reusing the cache when new commit is pushed +RUN git clone -b "${SRC_GIT_BRANCH}" --single-branch --depth=1 https://github.com/google/or-tools \ +&& [[ $(cd or-tools && git rev-parse --verify HEAD) == ${SRC_GIT_SHA1} ]] +WORKDIR /root/or-tools + +# C++ +## build +FROM devel AS cpp_build +RUN make detect_cpp \ +&& make cpp JOBS=8 +## archive +FROM cpp_build AS cpp_archive +RUN make archive_cpp + +# .Net +## build +FROM cpp_build AS dotnet_build +ENV USE_DOTNET_CORE_31=ON +RUN make detect_dotnet \ +&& make dotnet JOBS=8 +## archive +FROM dotnet_build AS dotnet_archive +RUN make archive_dotnet + +# Java +## build +FROM cpp_build AS java_build +RUN make detect_java \ +&& make java JOBS=8 +## archive +FROM java_build AS java_archive +RUN make archive_java + +# Python +## build +FROM cpp_build AS python_build +RUN make detect_python \ +&& make python JOBS=8 +## archive +FROM python_build AS python_archive +RUN make archive_python diff --git a/tools/docker/test/fedora-42/cpp.Dockerfile b/tools/docker/test/fedora-42/cpp.Dockerfile new file mode 100644 index 0000000000..80356489bd --- /dev/null +++ b/tools/docker/test/fedora-42/cpp.Dockerfile @@ -0,0 +1,14 @@ +# ref: https://hub.docker.com/_/fedora +FROM fedora:42 + +RUN dnf -y update \ +&& dnf -y install git \ + wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ +&& dnf -y install @development-tools \ +&& dnf -y install gcc-c++ cmake \ +&& dnf clean all + +WORKDIR /root +ADD or-tools_amd64_fedora-42_cpp_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/fedora-42/dotnet.Dockerfile b/tools/docker/test/fedora-42/dotnet.Dockerfile new file mode 100644 index 0000000000..118af294a8 --- /dev/null +++ b/tools/docker/test/fedora-42/dotnet.Dockerfile @@ -0,0 +1,22 @@ +# ref: https://hub.docker.com/_/fedora +FROM fedora:42 + +RUN dnf -y update \ +&& dnf -y install git \ + wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ +&& dnf -y install @development-tools \ +&& dnf -y install gcc-c++ cmake \ +&& dnf clean all + +# Install .Net +# see: https://docs.microsoft.com/en-us/dotnet/core/install/linux-fedora +RUN dnf -y update \ +&& dnf -y install dotnet-sdk-8.0 \ +&& dnf clean all +# Trigger first run experience by running arbitrary cmd +RUN dotnet --info + +WORKDIR /root +ADD or-tools_amd64_fedora-42_dotnet_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/fedora-42/java.Dockerfile b/tools/docker/test/fedora-42/java.Dockerfile new file mode 100644 index 0000000000..95666161de --- /dev/null +++ b/tools/docker/test/fedora-42/java.Dockerfile @@ -0,0 +1,20 @@ +# ref: https://hub.docker.com/_/fedora +FROM fedora:42 + +RUN dnf -y update \ +&& dnf -y install git \ + wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ +&& dnf -y install @development-tools \ +&& dnf -y install gcc-c++ cmake \ +&& dnf clean all + +# Java Install +RUN dnf -y update \ +&& dnf -y install java-11-openjdk java-11-openjdk-devel maven \ +&& dnf clean all +ENV JAVA_HOME=/usr/lib/jvm/java-openjdk + +WORKDIR /root +ADD or-tools_amd64_fedora-42_java_v*.tar.gz . + +RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/fedora-42/python.Dockerfile b/tools/docker/test/fedora-42/python.Dockerfile new file mode 100644 index 0000000000..a5d0bf0ba7 --- /dev/null +++ b/tools/docker/test/fedora-42/python.Dockerfile @@ -0,0 +1,14 @@ +# ref: https://hub.docker.com/_/fedora +FROM fedora:42 + +RUN dnf -y update \ +&& dnf -y install git \ + wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ +&& dnf -y install @development-tools \ +&& dnf -y install gcc-c++ cmake \ +&& dnf clean all + +WORKDIR /root +ADD or-tools_amd64_fedora-42_python_v*.tar.gz . + +RUN cd or-tools_*_v* && make test From b223ef813403a223ef888f6c38ec0b6e3fb9bdd0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 11:10:58 +0200 Subject: [PATCH 044/509] tools/docker: target and fix dotnet 8.0 support --- tools/docker/Makefile | 2 +- tools/docker/images/almalinux-9.Dockerfile | 2 +- tools/docker/images/debian-11.Dockerfile | 2 +- tools/docker/images/debian-12.Dockerfile | 2 +- tools/docker/images/debian-13.Dockerfile | 2 +- tools/docker/images/debian-sid.Dockerfile | 2 +- tools/docker/images/opensuse-leap.Dockerfile | 2 +- tools/docker/images/rockylinux-9.Dockerfile | 2 +- tools/docker/images/ubuntu-20.04.Dockerfile | 2 +- tools/docker/images/ubuntu-22.04.Dockerfile | 2 +- .../docker/test/almalinux-9/dotnet.Dockerfile | 2 +- tools/docker/test/debian-11/dotnet.Dockerfile | 2 +- tools/docker/test/debian-12/dotnet.Dockerfile | 2 +- tools/docker/test/debian-13/dotnet.Dockerfile | 2 +- .../docker/test/debian-sid/dotnet.Dockerfile | 2 +- .../test/opensuse-leap/dotnet.Dockerfile | 31 +++++-------------- .../test/rockylinux-9/dotnet.Dockerfile | 2 +- .../test/ubuntu-20.04/dotnet.Dockerfile | 2 +- .../test/ubuntu-22.04/dotnet.Dockerfile | 2 +- 19 files changed, 25 insertions(+), 42 deletions(-) diff --git a/tools/docker/Makefile b/tools/docker/Makefile index 58a946d600..ccb9bf8703 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -116,7 +116,7 @@ help: @echo @echo -e "\t${BOLD}${RESET}: Language to build" @echo -e "\t\t${BOLD}cpp${RESET} C++" - @echo -e "\t\t${BOLD}dotnet${RESET} .Net Core 3.1 and/or .Net 6.0 wrappers" + @echo -e "\t\t${BOLD}dotnet${RESET} .Net Core 3.1 and/or .Net 8.0 wrappers" @echo -e "\t\t${BOLD}java${RESET} Java (JDK 8.0) wrappers" @echo -e "\t\t${BOLD}python${RESET} Python 3.9+ wrappers" @echo diff --git a/tools/docker/images/almalinux-9.Dockerfile b/tools/docker/images/almalinux-9.Dockerfile index c20d3b77ab..55bc67a1ca 100644 --- a/tools/docker/images/almalinux-9.Dockerfile +++ b/tools/docker/images/almalinux-9.Dockerfile @@ -33,7 +33,7 @@ RUN dnf -y update \ RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/debian-11.Dockerfile b/tools/docker/images/debian-11.Dockerfile index 6092ff9abc..6d06748800 100644 --- a/tools/docker/images/debian-11.Dockerfile +++ b/tools/docker/images/debian-11.Dockerfile @@ -25,7 +25,7 @@ RUN ARCH=$(uname -m) \ RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/debian-12.Dockerfile b/tools/docker/images/debian-12.Dockerfile index d0ce8c61fb..4ec3143266 100644 --- a/tools/docker/images/debian-12.Dockerfile +++ b/tools/docker/images/debian-12.Dockerfile @@ -18,7 +18,7 @@ CMD ["/bin/bash"] RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/debian-13.Dockerfile b/tools/docker/images/debian-13.Dockerfile index 770f04d1f5..eea5a1992e 100644 --- a/tools/docker/images/debian-13.Dockerfile +++ b/tools/docker/images/debian-13.Dockerfile @@ -18,7 +18,7 @@ CMD ["/bin/bash"] RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/debian-sid.Dockerfile b/tools/docker/images/debian-sid.Dockerfile index 37ff35bf24..fc48b81fc5 100644 --- a/tools/docker/images/debian-sid.Dockerfile +++ b/tools/docker/images/debian-sid.Dockerfile @@ -25,7 +25,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-6.0 \ +&& apt-get install -qq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/images/opensuse-leap.Dockerfile b/tools/docker/images/opensuse-leap.Dockerfile index e6ed9c4be5..b3efec0857 100644 --- a/tools/docker/images/opensuse-leap.Dockerfile +++ b/tools/docker/images/opensuse-leap.Dockerfile @@ -26,7 +26,7 @@ RUN zypper refresh \ RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/rockylinux-9.Dockerfile b/tools/docker/images/rockylinux-9.Dockerfile index f84ab889de..c70ec35b84 100644 --- a/tools/docker/images/rockylinux-9.Dockerfile +++ b/tools/docker/images/rockylinux-9.Dockerfile @@ -33,7 +33,7 @@ RUN dnf -y update \ RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/ubuntu-20.04.Dockerfile b/tools/docker/images/ubuntu-20.04.Dockerfile index 50861698f9..6a0e6cf91d 100644 --- a/tools/docker/images/ubuntu-20.04.Dockerfile +++ b/tools/docker/images/ubuntu-20.04.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update -qq \ && wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/images/ubuntu-22.04.Dockerfile b/tools/docker/images/ubuntu-22.04.Dockerfile index 216cd1386c..92f0c60a29 100644 --- a/tools/docker/images/ubuntu-22.04.Dockerfile +++ b/tools/docker/images/ubuntu-22.04.Dockerfile @@ -24,7 +24,7 @@ RUN apt-get update -qq \ # see: https://docs.microsoft.com/en-us/dotnet/core/install/linux-ubuntu # see: https://github.com/dotnet/core/pull/7423/files RUN apt-get update -qq \ -&& apt-get install -yq dotnet-sdk-6.0 \ +&& apt-get install -yq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/almalinux-9/dotnet.Dockerfile b/tools/docker/test/almalinux-9/dotnet.Dockerfile index bd6403762b..a867a3bbc3 100644 --- a/tools/docker/test/almalinux-9/dotnet.Dockerfile +++ b/tools/docker/test/almalinux-9/dotnet.Dockerfile @@ -13,7 +13,7 @@ RUN dnf -y update \ # Install .Net RUN dnf -y update \ -&& dnf -y install dotnet-sdk-6.0 \ +&& dnf -y install dotnet-sdk-8.0 \ && dnf clean all \ && rm -rf /var/cache/dnf # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-11/dotnet.Dockerfile b/tools/docker/test/debian-11/dotnet.Dockerfile index 7cf56aab50..0c1344ab6b 100644 --- a/tools/docker/test/debian-11/dotnet.Dockerfile +++ b/tools/docker/test/debian-11/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-12/dotnet.Dockerfile b/tools/docker/test/debian-12/dotnet.Dockerfile index eb46d03985..6d3c5543d4 100644 --- a/tools/docker/test/debian-12/dotnet.Dockerfile +++ b/tools/docker/test/debian-12/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-13/dotnet.Dockerfile b/tools/docker/test/debian-13/dotnet.Dockerfile index 35786df018..571d312853 100644 --- a/tools/docker/test/debian-13/dotnet.Dockerfile +++ b/tools/docker/test/debian-13/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-sid/dotnet.Dockerfile b/tools/docker/test/debian-sid/dotnet.Dockerfile index fb48518b33..506c4048ad 100644 --- a/tools/docker/test/debian-sid/dotnet.Dockerfile +++ b/tools/docker/test/debian-sid/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/opensuse-leap/dotnet.Dockerfile b/tools/docker/test/opensuse-leap/dotnet.Dockerfile index 6c05e32fb2..8153d9b9e6 100644 --- a/tools/docker/test/opensuse-leap/dotnet.Dockerfile +++ b/tools/docker/test/opensuse-leap/dotnet.Dockerfile @@ -13,30 +13,13 @@ ENTRYPOINT ["/usr/bin/bash", "-c"] CMD ["/usr/bin/bash"] # .Net Install -RUN zypper install -y wget tar gzip libicu-devel - -RUN mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet - -# see: https://dotnet.microsoft.com/download/dotnet-core/3.1 -RUN dotnet_sdk_version=3.1.415 \ -&& wget -qO dotnet.tar.gz \ -"https://builds.dotnet.microsoft.com/dotnet/Sdk/${dotnet_sdk_version}/dotnet-sdk-${dotnet_sdk_version}-linux-x64.tar.gz" \ -&& dotnet_sha512='df7a6d1abed609c382799a8f69f129ec72ce68236b2faecf01aed4c957a40a9cfbbc9126381bf517dff3dbe0e488f1092188582701dd0fef09a68b8c5707c747' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz -# Trigger first run experience by running arbitrary cmd -RUN dotnet --info - -# see: https://dotnet.microsoft.com/download/dotnet-core/6.0 -RUN dotnet_sdk_version=6.0.100 \ -&& wget -qO dotnet.tar.gz \ -"https://builds.dotnet.microsoft.com/dotnet/Sdk/${dotnet_sdk_version}/dotnet-sdk-${dotnet_sdk_version}-linux-x64.tar.gz" \ -&& dotnet_sha512='cb0d174a79d6294c302261b645dba6a479da8f7cf6c1fe15ae6998bc09c5e0baec810822f9e0104e84b0efd51fdc0333306cb2a0a6fcdbaf515a8ad8cf1af25b' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +RUN zypper refresh \ +&& zypper install -y wget tar gzip libicu-devel +# see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install +RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ +&& chmod a+x dotnet-install.sh \ +&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/test/rockylinux-9/dotnet.Dockerfile b/tools/docker/test/rockylinux-9/dotnet.Dockerfile index 62a7bd0f53..d3429117be 100644 --- a/tools/docker/test/rockylinux-9/dotnet.Dockerfile +++ b/tools/docker/test/rockylinux-9/dotnet.Dockerfile @@ -13,7 +13,7 @@ RUN dnf -y update \ # Install .Net RUN dnf -y update \ -&& dnf -y install dotnet-sdk-6.0 \ +&& dnf -y install dotnet-sdk-8.0 \ && dnf clean all \ && rm -rf /var/cache/dnf # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile b/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile index 788e0ce6a0..fa5faca501 100644 --- a/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile +++ b/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && apt-get update -qq \ -&& DEBIAN_FRONTEND=noninteractive apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-6.0 \ +&& DEBIAN_FRONTEND=noninteractive apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/ubuntu-22.04/dotnet.Dockerfile b/tools/docker/test/ubuntu-22.04/dotnet.Dockerfile index dabae8071e..4fd3e88570 100644 --- a/tools/docker/test/ubuntu-22.04/dotnet.Dockerfile +++ b/tools/docker/test/ubuntu-22.04/dotnet.Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update -qq \ # Install .Net # see https://docs.microsoft.com/en-us/dotnet/core/install/linux-ubuntu#2110- RUN apt-get update -qq \ -&& apt-get install -yq dotnet-sdk-6.0 \ +&& apt-get install -yq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd From 2b792c01cfe68b0b8ff24a17a652a2886f1b71c4 Mon Sep 17 00:00:00 2001 From: Peter Mitri Date: Mon, 2 Jun 2025 13:44:31 +0200 Subject: [PATCH 045/509] MPSolver-XPRESS: Remove superfluous calls to XPRSloadlp and XPRScreateprob --- ortools/linear_solver/xpress_interface.cc | 28 ++++++++----------- .../linear_solver/xpress_interface_test.cc | 15 +++++++++- ortools/xpress/environment.cc | 8 +++--- ortools/xpress/environment.h | 5 ++-- 4 files changed, 32 insertions(+), 24 deletions(-) diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 590ab4eaad..0fa2b23465 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include "absl/strings/str_format.h" #include "ortools/base/logging.h" @@ -844,7 +845,6 @@ XpressInterface::XpressInterface(MPSolver* const solver, bool mip) CHECK_STATUS(status); DCHECK(mLp != nullptr); // should not be NULL if status=0 int nReturn = XPRSaddcbmessage(mLp, optimizermsg, (void*)this, 0); - CHECK_STATUS(XPRSloadlp(mLp, "newProb", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); CHECK_STATUS( XPRSchgobjsense(mLp, maximize_ ? XPRS_OBJ_MAXIMIZE : XPRS_OBJ_MINIMIZE)); } @@ -875,20 +875,15 @@ std::string XpressInterface::SolverVersion() const { // ------ Model modifications and extraction ----- void XpressInterface::Reset() { - // Instead of explicitly clearing all modeling objects we - // just delete the problem object and allocate a new one. - CHECK_STATUS(XPRSdestroyprob(mLp)); - - int status; - status = XPRScreateprob(&mLp); - CHECK_STATUS(status); - DCHECK(mLp != nullptr); // should not be NULL if status=0 - int nReturn = XPRSaddcbmessage(mLp, optimizermsg, (void*)this, 0); - CHECK_STATUS(XPRSloadlp(mLp, "newProb", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)); - - CHECK_STATUS( - XPRSchgobjsense(mLp, maximize_ ? XPRS_OBJ_MAXIMIZE : XPRS_OBJ_MINIMIZE)); - + int nRows = getnumrows(mLp); + std::vector rows(nRows); + std::iota(rows.begin(), rows.end(), 0); + int nCols = getnumcols(mLp); + std::vector cols(nCols); + std::iota(cols.begin(), cols.end(), 0); + XPRSdelrows(mLp, nRows, rows.data()); + XPRSdelcols(mLp, nCols, cols.data()); + XPRSdelobj(mLp, 0); ResetExtractionInformation(); mCstat.clear(); mRstat.clear(); @@ -985,8 +980,7 @@ void XpressInterface::MakeRhs(double lb, double ub, double& rhs, char& sense, << (ub - std::abs(ub - lb)) << "]"; } rhs = ub; - range = std::abs( - ub - lb); // This happens implicitly by XPRSaddrows() and XPRSloadlp() + range = std::abs(ub - lb); // This happens implicitly by XPRSaddrows() sense = 'R'; } else if (ub < XPRS_PLUSINFINITY || (std::abs(ub) == XPRS_PLUSINFINITY && std::abs(lb) > XPRS_PLUSINFINITY)) { diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index 36e020cb07..16925a1e36 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -155,6 +155,15 @@ class XPRSGetter { return value; } + std::string getStringAttribute(int attrib) { + std::string value(280, '\0'); + int valueSize; + EXPECT_STATUS(XPRSgetstringattrib(prob(), attrib, &value[0], value.size(), + &valueSize)); + value.resize(valueSize - 1); + return value; + } + private: MPSolver* solver_; @@ -400,12 +409,16 @@ TEST_F(XpressFixtureMIP, Reset) { solver.MakeBoolVar("x1"); solver.MakeBoolVar("x2"); solver.MakeRowConstraint(12., 100.0); + solver.MutableObjective()->SetMaximization(); solver.Solve(); EXPECT_EQ(getter.getNumConstraints(), 1); EXPECT_EQ(getter.getNumVariables(), 2); + auto oldProbUuid = getter.getStringAttribute(XPRS_UUID); solver.Reset(); + EXPECT_EQ(getter.getStringAttribute(XPRS_UUID), oldProbUuid); EXPECT_EQ(getter.getNumConstraints(), 0); EXPECT_EQ(getter.getNumVariables(), 0); + EXPECT_EQ(getter.getObjectiveSense(), XPRS_OBJ_MAXIMIZE); } TEST_F(XpressFixtureMIP, MakeIntVar) { @@ -737,7 +750,7 @@ TEST_F(XpressFixtureMIP, Write) { // disable formatting to keep the expected MPS readable // clang-format off std::string expectedMps = std::string("") + - "NAME newProb" + "\n" + + "NAME " + "\n" + "OBJSENSE MAXIMIZE" + "\n" + "ROWS" + "\n" + " N __OBJ___ " + "\n" + diff --git a/ortools/xpress/environment.cc b/ortools/xpress/environment.cc index 51f1025a76..5e628099e2 100644 --- a/ortools/xpress/environment.cc +++ b/ortools/xpress/environment.cc @@ -65,10 +65,9 @@ std::function XPRSgetintcon std::function XPRSgetdblcontrol = nullptr; std::function XPRSgetstringcontrol = nullptr; std::function XPRSgetintattrib = nullptr; +std::function XPRSgetstringattrib = nullptr; std::function XPRSgetdblattrib = nullptr; std::function XPRSgetcontrolinfo = nullptr; -std::function XPRSloadlp = nullptr; -std::function XPRSloadlp64 = nullptr; std::function XPRSgetobj = nullptr; std::function XPRSgetrhs = nullptr; std::function XPRSgetrhsrange = nullptr; @@ -104,6 +103,7 @@ std::function XPRSchgrhs = nullptr; std::function XPRSchgrhsrange = nullptr; std::function XPRSchgrowtype = nullptr; +std::function XPRSdelobj = nullptr; std::function XPRSaddcbintsol = nullptr; std::function XPRSremovecbintsol = nullptr; std::function XPRSaddcbmessage = nullptr; @@ -141,9 +141,8 @@ void LoadXpressFunctions(DynamicLibrary* xpress_dynamic_library) { xpress_dynamic_library->GetFunction(&XPRSgetdblcontrol, "XPRSgetdblcontrol"); xpress_dynamic_library->GetFunction(&XPRSgetstringcontrol, "XPRSgetstringcontrol"); xpress_dynamic_library->GetFunction(&XPRSgetintattrib, "XPRSgetintattrib"); + xpress_dynamic_library->GetFunction(&XPRSgetstringattrib, "XPRSgetstringattrib"); xpress_dynamic_library->GetFunction(&XPRSgetdblattrib, "XPRSgetdblattrib"); - xpress_dynamic_library->GetFunction(&XPRSloadlp, "XPRSloadlp"); - xpress_dynamic_library->GetFunction(&XPRSloadlp64, "XPRSloadlp64"); xpress_dynamic_library->GetFunction(&XPRSgetobj, "XPRSgetobj"); xpress_dynamic_library->GetFunction(&XPRSgetrhs, "XPRSgetrhs"); xpress_dynamic_library->GetFunction(&XPRSgetrhsrange, "XPRSgetrhsrange"); @@ -179,6 +178,7 @@ void LoadXpressFunctions(DynamicLibrary* xpress_dynamic_library) { xpress_dynamic_library->GetFunction(&XPRSchgrhs, "XPRSchgrhs"); xpress_dynamic_library->GetFunction(&XPRSchgrhsrange, "XPRSchgrhsrange"); xpress_dynamic_library->GetFunction(&XPRSchgrowtype, "XPRSchgrowtype"); + xpress_dynamic_library->GetFunction(&XPRSdelobj, "XPRSdelobj"); xpress_dynamic_library->GetFunction(&XPRSaddcbintsol, "XPRSaddcbintsol"); xpress_dynamic_library->GetFunction(&XPRSremovecbintsol, "XPRSremovecbintsol"); xpress_dynamic_library->GetFunction(&XPRSaddcbmessage, "XPRSaddcbmessage"); diff --git a/ortools/xpress/environment.h b/ortools/xpress/environment.h index 1db953f629..9a1fe558be 100644 --- a/ortools/xpress/environment.h +++ b/ortools/xpress/environment.h @@ -453,6 +453,7 @@ absl::Status LoadXpressDynamicLibrary(std::string& xpresspath); #define XPRS_ALG_BARRIER 4 #define XPRS_OBJ_MINIMIZE 1 #define XPRS_OBJ_MAXIMIZE -1 +#define XPRS_UUID 3011 // *************************************************************************** // * variable types * // *************************************************************************** @@ -497,10 +498,9 @@ OR_DLL extern std::function OR_DLL extern std::function XPRSgetdblcontrol; OR_DLL extern std::function XPRSgetstringcontrol; OR_DLL extern std::function XPRSgetintattrib; +OR_DLL extern std::function XPRSgetstringattrib; OR_DLL extern std::function XPRSgetdblattrib; extern std::function XPRSgetcontrolinfo; -extern std::function XPRSloadlp; -extern std::function XPRSloadlp64; OR_DLL extern std::function XPRSgetobj; OR_DLL extern std::function XPRSgetrhs; OR_DLL extern std::function XPRSgetrhsrange; @@ -536,6 +536,7 @@ extern std::function XPRSchgrhs; extern std::function XPRSchgrhsrange; extern std::function XPRSchgrowtype; +extern std::function XPRSdelobj; extern std::function XPRSaddcbintsol; extern std::function XPRSremovecbintsol; extern std::function XPRSaddcbmessage; From 98a3e8e6805d4552399f403dab9a8ddd9cba34d4 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 14:13:37 +0200 Subject: [PATCH 046/509] [CP-SAT] reindent samples; fix #4666 --- ortools/sat/2d_distances_propagator.cc | 40 +- ortools/sat/cp_model_expand.cc | 5 +- ortools/sat/cp_model_search.cc | 1 - ortools/sat/cp_model_solver.cc | 2 +- ortools/sat/cp_model_solver.h | 2 +- ortools/sat/diffn.cc | 25 +- ortools/sat/linear_propagation.cc | 7 +- ortools/sat/linear_propagation.h | 45 +- ortools/sat/opb_reader.h | 3 +- ortools/sat/samples/assignment_groups_sat.py | 1 + ortools/sat/samples/assignment_sat.py | 1 + .../sat/samples/assignment_task_sizes_sat.py | 1 + ortools/sat/samples/assignment_teams_sat.py | 1 + ortools/sat/samples/assumptions_sample_sat.py | 1 + ortools/sat/samples/bin_packing_sat.py | 1 + ortools/sat/samples/cp_is_fun_sat.py | 1 + ortools/sat/samples/cp_sat_example.py | 1 + ortools/sat/samples/minimal_jobshop_sat.py | 1 + ortools/sat/samples/multiple_knapsack_sat.py | 1 + ortools/sat/samples/nqueens_sat.py | 2 + ortools/sat/samples/nurses_sat.py | 1 + ortools/sat/samples/schedule_requests_sat.py | 1 + ortools/sat/samples/simple_sat_program.py | 1 + ortools/sat/scheduling_cuts.cc | 23 +- ortools/sat/util.h | 429 +++++++++++------- ortools/sat/util_test.cc | 28 +- 26 files changed, 371 insertions(+), 254 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 62eb603ee8..71b44cabc3 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -30,7 +30,6 @@ #include "ortools/sat/model.h" #include "ortools/sat/no_overlap_2d_helper.h" #include "ortools/sat/precedences.h" -#include "ortools/sat/sat_base.h" #include "ortools/sat/scheduling_helpers.h" #include "ortools/sat/synchronization.h" @@ -58,10 +57,10 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { for (int dim = 0; dim < 2; ++dim) { const SchedulingConstraintHelper& dim_helper = dim == 0 ? helper_.x_helper() : helper_.y_helper(); - for (int i = 0; i < helper_.NumBoxes(); ++i) { + for (int j = 0; j < 2; ++j) { const absl::Span interval_points = - i == 0 ? dim_helper.Starts() : dim_helper.Ends(); - for (int j = 0; j < 2; ++j) { + j == 0 ? dim_helper.Starts() : dim_helper.Ends(); + for (int i = 0; i < helper_.NumBoxes(); ++i) { if (interval_points[i].var != kNoIntegerVariable) { var_to_box_and_coeffs[PositiveVariable(interval_points[i].var)] .boxes[dim][j] @@ -89,13 +88,14 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { dim == 0 ? helper_.x_helper() : helper_.y_helper(); for (const int box1 : usage1.boxes[dim][0 /* start */]) { for (const int box2 : usage2.boxes[dim][1 /* end */]) { + if (box1 == box2) continue; const AffineExpression& start = dim_helper.Starts()[box1]; const AffineExpression& end = dim_helper.Ends()[box2]; LinearExpression2 expr2; expr2.vars[0] = start.var; - expr2.vars[1] = NegationOf(end.var); + expr2.vars[1] = end.var; expr2.coeffs[0] = start.coeff; - expr2.coeffs[1] = end.coeff; + expr2.coeffs[1] = -end.coeff; expr2.SimpleCanonicalization(); expr2.DivideByGcd(); if (expr == expr2) { @@ -133,7 +133,8 @@ bool Precedences2DPropagator::Propagate() { for (const auto& [box1, box2] : non_trivial_pairs_) { DCHECK(box1 < helper_.NumBoxes()); DCHECK(box2 < helper_.NumBoxes()); - if (!helper_.IsPresent(box1) && !helper_.IsPresent(box2)) { + DCHECK_NE(box1, box2); + if (!helper_.IsPresent(box1) || !helper_.IsPresent(box2)) { continue; } @@ -148,9 +149,9 @@ bool Precedences2DPropagator::Propagate() { } LinearExpression2 expr; expr.vars[0] = helper->Starts()[b1].var; - expr.vars[1] = NegationOf(helper->Ends()[b2].var); + expr.vars[1] = helper->Ends()[b2].var; expr.coeffs[0] = helper->Starts()[b1].coeff; - expr.coeffs[1] = helper->Ends()[b2].coeff; + expr.coeffs[1] = -helper->Ends()[b2].coeff; const IntegerValue ub_of_start_minus_end_value = binary_relations_maps_->UpperBound(expr) + helper->Starts()[b1].constant - helper->Ends()[b2].constant; @@ -158,8 +159,8 @@ bool Precedences2DPropagator::Propagate() { is_unfeasible = false; break; } - if (!is_unfeasible) break; } + if (!is_unfeasible) break; } if (!is_unfeasible) continue; @@ -167,8 +168,6 @@ bool Precedences2DPropagator::Propagate() { helper_.ClearReason(); num_conflicts_++; - std::vector reason; - std::vector lit_reason; for (int dim = 0; dim < 2; dim++) { SchedulingConstraintHelper* helper = helpers[dim]; @@ -180,23 +179,18 @@ bool Precedences2DPropagator::Propagate() { } LinearExpression2 expr; expr.vars[0] = helper->Starts()[b1].var; - expr.vars[1] = NegationOf(helper->Ends()[b2].var); + expr.vars[1] = helper->Ends()[b2].var; expr.coeffs[0] = helper->Starts()[b1].coeff; - expr.coeffs[1] = helper->Ends()[b2].coeff; + expr.coeffs[1] = -helper->Ends()[b2].coeff; binary_relations_maps_->AddReasonForUpperBoundLowerThan( expr, - -(helper->Starts()[b1].constant - helper->Ends()[b2].constant), - &lit_reason, &reason); + -(helper->Starts()[b1].constant - helper->Ends()[b2].constant) - 1, + helper_.x_helper().MutableLiteralReason(), + helper_.x_helper().MutableIntegerReason()); } } helper_.AddPresenceReason(box1); helper_.AddPresenceReason(box2); - helper_.x_helper().MutableIntegerReason()->insert( - helper_.x_helper().MutableIntegerReason()->end(), reason.begin(), - reason.end()); - helper_.x_helper().MutableLiteralReason()->insert( - helper_.x_helper().MutableLiteralReason()->end(), lit_reason.begin(), - lit_reason.end()); return helper_.ReportConflict(); } return true; @@ -205,7 +199,7 @@ bool Precedences2DPropagator::Propagate() { int Precedences2DPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_.WatchAllBoxes(id); - // TODO(user): add an API to BinaryRelationsMaps to watch linear2 + binary_relations_maps_->WatchAllLinearExpressions2(id); return id; } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 08157b7341..b6d4c2404f 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -1407,9 +1407,8 @@ void ProcessOneCompressedColumn( // Simpler encoding for table constraints with 2 variables. void AddSizeTwoTable( - const std::vector& vars, - const std::vector>& tuples, - const std::vector>& values_per_var, + absl::Span vars, absl::Span> tuples, + absl::Span> values_per_var, PresolveContext* context) { CHECK_EQ(vars.size(), 2); const int left_var = vars[0]; diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index ff253b4041..84f28e1998 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -188,7 +188,6 @@ void AddExtraSchedulingPropagators(SatParameters& new_params) { new_params.set_use_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_area_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_try_edge_reasoning_in_no_overlap_2d(true); - new_params.set_no_overlap_2d_boolean_relations_limit(100); } // We want a random tie breaking among variables with equivalent values. diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 57a9fc7bcd..8aef798e1f 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -3009,7 +3009,7 @@ CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, } CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, - const std::string& params) { + absl::string_view params) { Model model; model.Add(NewSatParameters(params)); return SolveCpModel(model_proto, &model); diff --git a/ortools/sat/cp_model_solver.h b/ortools/sat/cp_model_solver.h index 79d3f7195b..008c7319f8 100644 --- a/ortools/sat/cp_model_solver.h +++ b/ortools/sat/cp_model_solver.h @@ -71,7 +71,7 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model); * format, and returns an instance of CpSolverResponse. */ CpSolverResponse SolveWithParameters(const CpModelProto& model_proto, - const std::string& params); + absl::string_view params); #endif // !__PORTABLE_PLATFORM__ /** diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index ab5b398d19..078de5eb92 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -32,7 +32,6 @@ #include "absl/log/vlog_is_on.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" -// #include "ortools/base/stl_util.h" #include "ortools/sat/2d_distances_propagator.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" @@ -277,11 +276,11 @@ void AddNonOverlappingRectangles(const std::vector& x, DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); for (int i = 0; i < num_boxes; ++i) { - if (repository->IsAbsent(x[i])) continue; - if (repository->IsAbsent(y[i])) continue; + if (repository->IsOptional(x[i])) continue; + if (repository->IsOptional(y[i])) continue; for (int j = i + 1; j < num_boxes; ++j) { - if (repository->IsAbsent(x[j])) continue; - if (repository->IsAbsent(y[j])) continue; + if (repository->IsOptional(x[j])) continue; + if (repository->IsOptional(y[j])) continue; // At most one of these two x options is true. const Literal x_ij = repository->GetOrCreatePrecedenceLiteral( @@ -308,21 +307,7 @@ void AddNonOverlappingRectangles(const std::vector& x, } // At least one of the 4 options is true. - std::vector clause = {x_ij, x_ji, y_ij, y_ji}; - if (repository->IsOptional(x[i])) { - clause.push_back(repository->PresenceLiteral(x[i]).Negated()); - } - if (repository->IsOptional(y[i])) { - clause.push_back(repository->PresenceLiteral(y[i]).Negated()); - } - if (repository->IsOptional(x[j])) { - clause.push_back(repository->PresenceLiteral(x[j]).Negated()); - } - if (repository->IsOptional(y[j])) { - clause.push_back(repository->PresenceLiteral(y[j]).Negated()); - } - gtl::STLSortAndRemoveDuplicates(&clause); - if (!sat_solver->AddProblemClause(clause)) { + if (!sat_solver->AddProblemClause({x_ij, x_ji, y_ij, y_ji})) { return; } } diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index 31fb792911..c77ff22752 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -389,7 +389,8 @@ LinearPropagator::LinearPropagator(Model* model) random_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), watcher_id_(watcher_->Register(this)), - order_(random_, [this](int id) { return GetVariables(infos_[id]); }) { + order_(random_, time_limit_, + [this](int id) { return GetVariables(infos_[id]); }) { // Note that we need this class always in sync. integer_trail_->RegisterWatcher(&modified_vars_); integer_trail_->RegisterReversibleClass(this); @@ -475,6 +476,8 @@ void LinearPropagator::SetPropagatedBy(IntegerVariable var, int id) { void LinearPropagator::OnVariableChange(IntegerVariable var, IntegerValue lb, int id) { + DCHECK_EQ(lb, integer_trail_->LowerBound(var)); + // If no constraint use this var, we just ignore it. const int size = var_to_constraint_ids_[var].size(); if (size == 0) return; @@ -1331,7 +1334,7 @@ bool LinearPropagator::DisassembleSubtree(int root_id, int num_tight) { if (next_increase > 0) { disassemble_queue_.push_back({id, next_var, next_increase}); - // We know this will push later, so we register hit with a sentinel + // We know this will push later, so we register it with a sentinel // value so that it do not block any earlier propagation. Hopefully, // adding this "dependency" should help find a better propagation // order. diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index 561ecdf268..b98f46711e 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -149,16 +149,19 @@ class EnforcementPropagator : public SatPropagator { // Each constraint might push some variables which might in turn make other // constraint tighter. In general, it seems better to make sure we push first // constraints that are not affected by other variables and delay the -// propagation of constraint that we know will become tigher. +// propagation of constraint that we know will become tigher. This also likely +// simplifies the reasons. // // Note that we can have cycle in this graph, and that this is not necessarily a // conflict. class ConstraintPropagationOrder { public: ConstraintPropagationOrder( - ModelRandomGenerator* random, + ModelRandomGenerator* random, TimeLimit* time_limit, std::function(int)> id_to_vars) - : random_(random), id_to_vars_func_(std::move(id_to_vars)) {} + : random_(random), + time_limit_(time_limit), + id_to_vars_func_(std::move(id_to_vars)) {} void Resize(int num_vars, int num_ids) { var_has_entry_.Resize(IntegerVariable(num_vars)); @@ -166,7 +169,7 @@ class ConstraintPropagationOrder { var_to_lb_.resize(num_vars); var_to_pos_.resize(num_vars); - in_ids_.Resize(num_ids); + in_ids_.resize(num_ids); } void Register(int id, IntegerVariable var, IntegerValue lb) { @@ -203,15 +206,17 @@ class ConstraintPropagationOrder { // Return -1 if there is none. // This returns a constraint with min degree. // - // TODO(user): fix quadratic algo? We can use var_to_ids_func_() to maintain - // the degree. But note that with the start_ optim and because we expect - // mainly degree zero, this seems to be faster. + // TODO(user): fix quadratic or even linear algo? We can use + // var_to_ids_func_() to maintain the degree. But note that since we reorder + // constraints and because we expect mainly degree zero, this seems to be + // faster. int NextId() { if (ids_.empty()) return -1; int best_id = 0; int best_num_vars = 0; int best_degree = std::numeric_limits::max(); + int64_t work_done = 0; const int size = ids_.size(); const auto var_has_entry = var_has_entry_.const_view(); for (int i = 0; i < size; ++i) { @@ -219,10 +224,28 @@ class ConstraintPropagationOrder { ids_.pop_front(); DCHECK(in_ids_[id]); + // By degree, we mean the number of variables of the constraint that do + // not have yet their lower bounds up to date; they will be pushed by + // other constraints as we propagate them. If possible, we want to delay + // the propagation of a constraint with positive degree until all involved + // lower bounds are up to date (i.e. degree == 0). int degree = 0; absl::Span vars = id_to_vars_func_(id); + work_done += vars.size(); for (const IntegerVariable var : vars) { - if (var_has_entry[var]) ++degree; + if (var_has_entry[var]) { + if (var_has_entry[NegationOf(var)] && + var_to_id_[NegationOf(var)] == id) { + // We have two constraints, this one (id) push NegationOf(var), and + // var_to_id_[var] push var. So whichever order we choose, the first + // constraint will need to be scanned at least twice. Lets not count + // this situation in the degree. + continue; + } + + DCHECK_NE(var_to_id_[var], id); + ++degree; + } } // We select the min-degree and prefer lower constraint size. @@ -241,6 +264,11 @@ class ConstraintPropagationOrder { ids_.push_back(id); } + if (work_done > 100) { + time_limit_->AdvanceDeterministicTime(static_cast(work_done) * + 5e-9); + } + // We didn't find any degree zero, we scanned the whole queue. // Extract best_id while keeping the order stable. // @@ -277,6 +305,7 @@ class ConstraintPropagationOrder { public: ModelRandomGenerator* random_; + TimeLimit* time_limit_; std::function(int)> id_to_vars_func_; // For each variable we only keep the constraint id that pushes it further. diff --git a/ortools/sat/opb_reader.h b/ortools/sat/opb_reader.h index e67b2bc309..cb6a36d500 100644 --- a/ortools/sat/opb_reader.h +++ b/ortools/sat/opb_reader.h @@ -28,6 +28,7 @@ #include "absl/log/log.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/stl_util.h" @@ -274,7 +275,7 @@ class OpbReader { return true; } - static int ParseIndex(const std::string& word) { + static int ParseIndex(absl::string_view word) { int index; CHECK(absl::SimpleAtoi(word, &index)); return index; diff --git a/ortools/sat/samples/assignment_groups_sat.py b/ortools/sat/samples/assignment_groups_sat.py index 482bb8f8c5..2ea4fc973b 100644 --- a/ortools/sat/samples/assignment_groups_sat.py +++ b/ortools/sat/samples/assignment_groups_sat.py @@ -16,6 +16,7 @@ """Solves an assignment problem for given group of workers.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/assignment_sat.py b/ortools/sat/samples/assignment_sat.py index bbecaafa1d..96f20e8f74 100644 --- a/ortools/sat/samples/assignment_sat.py +++ b/ortools/sat/samples/assignment_sat.py @@ -21,6 +21,7 @@ import io import pandas as pd from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/assignment_task_sizes_sat.py b/ortools/sat/samples/assignment_task_sizes_sat.py index 4e1bf155ff..0baca4a6df 100644 --- a/ortools/sat/samples/assignment_task_sizes_sat.py +++ b/ortools/sat/samples/assignment_task_sizes_sat.py @@ -16,6 +16,7 @@ """Solves a simple assignment problem.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/assignment_teams_sat.py b/ortools/sat/samples/assignment_teams_sat.py index 9b3ae74ed3..375087e856 100644 --- a/ortools/sat/samples/assignment_teams_sat.py +++ b/ortools/sat/samples/assignment_teams_sat.py @@ -16,6 +16,7 @@ """Solves a simple assignment problem.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/assumptions_sample_sat.py b/ortools/sat/samples/assumptions_sample_sat.py index 29d4a5150e..62501b9b2f 100644 --- a/ortools/sat/samples/assumptions_sample_sat.py +++ b/ortools/sat/samples/assumptions_sample_sat.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/bin_packing_sat.py b/ortools/sat/samples/bin_packing_sat.py index ecb9c9e074..8477160ea3 100644 --- a/ortools/sat/samples/bin_packing_sat.py +++ b/ortools/sat/samples/bin_packing_sat.py @@ -21,6 +21,7 @@ import io import pandas as pd from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/cp_is_fun_sat.py b/ortools/sat/samples/cp_is_fun_sat.py index 9669878c0c..7a8aeaedc0 100644 --- a/ortools/sat/samples/cp_is_fun_sat.py +++ b/ortools/sat/samples/cp_is_fun_sat.py @@ -22,6 +22,7 @@ This problem has 72 different solutions in base 10. """ # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/cp_sat_example.py b/ortools/sat/samples/cp_sat_example.py index 1c0db98e87..f7f68b2365 100755 --- a/ortools/sat/samples/cp_sat_example.py +++ b/ortools/sat/samples/cp_sat_example.py @@ -16,6 +16,7 @@ """Simple solve.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/minimal_jobshop_sat.py b/ortools/sat/samples/minimal_jobshop_sat.py index 36b2961781..a79406febd 100644 --- a/ortools/sat/samples/minimal_jobshop_sat.py +++ b/ortools/sat/samples/minimal_jobshop_sat.py @@ -17,6 +17,7 @@ # [START import] import collections from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/multiple_knapsack_sat.py b/ortools/sat/samples/multiple_knapsack_sat.py index 7bdc840dd1..3f3b3e567a 100644 --- a/ortools/sat/samples/multiple_knapsack_sat.py +++ b/ortools/sat/samples/multiple_knapsack_sat.py @@ -16,6 +16,7 @@ """Solves a multiple knapsack problem using the CP-SAT solver.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/nqueens_sat.py b/ortools/sat/samples/nqueens_sat.py index 81fe3a2a8f..770df0aea3 100644 --- a/ortools/sat/samples/nqueens_sat.py +++ b/ortools/sat/samples/nqueens_sat.py @@ -18,6 +18,7 @@ import sys import time from ortools.sat.python import cp_model + # [END import] @@ -54,6 +55,7 @@ class NQueenSolutionPrinter(cp_model.CpSolverSolutionCallback): print() print() + # [END solution_printer] diff --git a/ortools/sat/samples/nurses_sat.py b/ortools/sat/samples/nurses_sat.py index 0bc04b16e5..16fae1af17 100644 --- a/ortools/sat/samples/nurses_sat.py +++ b/ortools/sat/samples/nurses_sat.py @@ -16,6 +16,7 @@ """Example of a simple nurse scheduling problem.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/schedule_requests_sat.py b/ortools/sat/samples/schedule_requests_sat.py index ee1e1e2bd6..f89e5475a9 100644 --- a/ortools/sat/samples/schedule_requests_sat.py +++ b/ortools/sat/samples/schedule_requests_sat.py @@ -18,6 +18,7 @@ from typing import Union from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/samples/simple_sat_program.py b/ortools/sat/samples/simple_sat_program.py index 25f234e3e6..3c2041c6cf 100644 --- a/ortools/sat/samples/simple_sat_program.py +++ b/ortools/sat/samples/simple_sat_program.py @@ -16,6 +16,7 @@ """Simple solve.""" # [START import] from ortools.sat.python import cp_model + # [END import] diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index f99d3f0889..9842654ca0 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1337,21 +1337,19 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( } } } - if (!helper.valid_permutation_iterator_.Init()) { - return CompletionTimeExplorationStatus::NO_VALID_PERMUTATION; - } + bool is_dag = false; int num_valid_permutations = 0; - do { + for (const auto& permutation : helper.valid_permutation_iterator_) { + is_dag = true; if (--exploration_credit < 0) break; IntegerValue sum_of_ends = 0; IntegerValue sum_of_weighted_ends = 0; if (ComputeWeightedSumOfEndMinsOfOnePermutation( - events, helper.valid_permutation_iterator_.permutation(), - capacity_max, helper, sum_of_ends, sum_of_weighted_ends, - cut_use_precedences)) { + events, permutation, capacity_max, helper, sum_of_ends, + sum_of_weighted_ends, cut_use_precedences)) { min_sum_of_ends = std::min(ToDouble(sum_of_ends), min_sum_of_ends); min_sum_of_weighted_ends = std::min(ToDouble(sum_of_weighted_ends), min_sum_of_weighted_ends); @@ -1362,7 +1360,10 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( break; } } - } while (helper.valid_permutation_iterator_.Increase()); + } + if (!is_dag) { + return CompletionTimeExplorationStatus::NO_VALID_PERMUTATION; + } const CompletionTimeExplorationStatus status = exploration_credit < 0 ? CompletionTimeExplorationStatus::ABORTED : num_valid_permutations > 0 @@ -1383,7 +1384,7 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( // - detect disjoint tasks (no need to crossover to the second part) // - better caching of explored states ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( - const std::string& cut_name, std::vector events, + absl::string_view cut_name, std::vector events, IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model, LinearConstraintManager* manager) { TopNCuts top_n_cuts(5); @@ -1476,7 +1477,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( is_lifted |= event.lifted; cut.AddTerm(event.end, IntegerValue(1)); } - std::string full_name = cut_name; + std::string full_name(cut_name); if (cut_use_precedences) full_name.append("_prec"); if (is_lifted) full_name.append("_lifted"); top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); @@ -1493,7 +1494,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( is_lifted |= event.lifted; cut.AddTerm(event.end, event.energy_min); } - std::string full_name = cut_name; + std::string full_name(cut_name); if (is_lifted) full_name.append("_lifted"); if (cut_use_precedences) full_name.append("_prec"); full_name.append("_weighted"); diff --git a/ortools/sat/util.h b/ortools/sat/util.h index 4c2a26b9f3..88a5b927d3 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -984,192 +985,288 @@ inline void CompactVectorVector::ResetFromTranspose( // // If the graph has no edges, it will generate all possible permutations. // -// If the graph has edges, it will generate all possible permutations of the -// dag that are a topological sorting of the graph. +// If the graph has edges, it will generate all possible permutations of the dag +// that are a topological sorting of the graph. // -// The class maintains 5 fields: -// - graph_: a vector of vectors, where graph_[i] contains the list of -// elements that are adjacent to element i. -// - size_: the size of the graph. +// Typical usage: // -// - missing_parent_numbers_: a vector of integers, where -// missing_parent_numbers_[i] is the number of parents of element i that are -// not yet in permutation_. Before Init() is called, no element is yet in -// permutation_ so that it is the number of parents of i. After Init(), and -// before Increase() returns true, it is always 0 (except during the -// execution of Increase(), see below). +// DagTopologicalSortIterator dag_topological_sort(5); // -// - permutation_: a vector of integers, that after Init() is called, and -// before Increase() returns false, it is a topological sorting of the graph -// (except during the execution of Increase()). -// - element_original_position_: a vector of integers, where -// element_original_position_[i] is the original position of element i in the -// permutation_. See the algorithm below for more details. +// dag_topological_sort.AddArc(0, 1); +// dag_topological_sort.AddArc(1, 2); +// dag_topological_sort.AddArc(3, 4); +// +// for (const auto& permutation : dag_topological_sort) { +// // Do something with each permutation. +// } +// +// Note: to test if there are cycles, it is enough to check if at least one +// iteration occurred in the above loop. +// +// Note 2: adding an arc during an iteration is not supported and the behavior +// is undefined. class DagTopologicalSortIterator { public: - // Graph maps indices to their children. Any children must exist. - DagTopologicalSortIterator() : size_(0) {} - explicit DagTopologicalSortIterator(int size) : size_(size) { Reset(size); } + DagTopologicalSortIterator() = default; - void Reset(int size) { - size_ = size; - graph_.assign(size, {}); - missing_parent_numbers_.assign(size, 0); - permutation_.clear(); - element_original_position_.assign(size, 0); + // Graph maps indices to their children. Any children must exist. + explicit DagTopologicalSortIterator(int size) + : graph_(size, std::vector{}) {} + + // An iterator class to generate all possible topological sorting of a dag. + // + // If the graph has no edges, it will generate all possible permutations. + // + // If the graph has edges, it will generate all possible permutations of the + // dag that are a topological sorting of the graph. + // + // The class maintains 5 fields: + // - graph_: a vector of vectors, where graph_[i] contains the list of + // elements that are adjacent to element i. This is not owned. + // - size_: the size of the graph. + // - missing_parent_numbers_: a vector of integers, where + // missing_parent_numbers_[i] is the number of parents of element i that + // are not yet in permutation_. It is always 0 except during the + // execution of operator++(). + // - permutation_: a vector of integers, that is a topological sorting of the + // graph except during the execution of operator++(). + // - element_original_position_: a vector of integers, where + // element_original_position_[i] is the original position of element i in + // the permutation_. See the algorithm below for more details. + + class Iterator { + friend class DagTopologicalSortIterator; + + public: + using iterator_category = std::input_iterator_tag; + using value_type = const std::vector; + using difference_type = ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + Iterator& operator++(); + + friend bool operator==(const Iterator& a, const Iterator& b) { + return &a.graph_ == &b.graph_ && a.ordering_index_ == b.ordering_index_; + } + + friend bool operator!=(const Iterator& a, const Iterator& b) { + return !(a == b); + } + + reference operator*() const { return permutation_; } + + pointer operator->() const { return &permutation_; } + + private: + // End iterator. + explicit Iterator(const std::vector>& graph + ABSL_ATTRIBUTE_LIFETIME_BOUND, + bool) + : graph_(graph), ordering_index_(-1) {} + + // Begin iterator. + explicit Iterator(const std::vector>& graph + ABSL_ATTRIBUTE_LIFETIME_BOUND); + + // Unset the element at pos. + void Unset(int pos); + + // Set the element at pos to the element at k. + void Set(int pos, int k); + + // Graph maps indices to their children. Children must be in [0, size_). + const std::vector>& graph_; + // Number of elements in graph_. + int size_; + // For each element in graph_, the number of parents it has that are not yet + // in permutation_. In particular, it is always 0 outside of operator++(). + std::vector missing_parent_numbers_; + // The current permutation. It is ensured to be a topological sorting of the + // graph outside of operator++(). + std::vector permutation_; + // Keeps track of the original position of the element in permutation_[i]. + // See the comment above the class for the detailed algorithm. + std::vector element_original_position_; + + // Index of the current ordering. Used to compare iterators. It is -1 if the + // end has been reached. + int64_t ordering_index_; + }; + + Iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return Iterator(graph_); + } + Iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND { + return Iterator(graph_, true); } - // Must be called before Init(). + void Reset(int size) { graph_.assign(size, {}); } + + // Must be called before iteration starts or between iterations. void AddArc(int from, int to) { DCHECK_GE(from, 0); - DCHECK_LT(from, size_); + DCHECK_LT(from, graph_.size()); DCHECK_GE(to, 0); - DCHECK_LT(to, size_); + DCHECK_LT(to, graph_.size()); graph_[from].push_back(to); - missing_parent_numbers_[to]++; } - // To describe the algorithm in Increase() and Init(), we consider the - // following invariant, called Invariant(pos) for a position pos in [0, - // size_): - // 1. permutations_[0], ..., permutations_[pos] form a prefix of a - // topological ordering of the graph; - // 2. permutations_[pos + 1], ..., permutations_.back() are all other - // elements that have all their parents in permutations_[0], ..., - // permutations_[pos], ordered lexicographically by the index of their - // last parent in permutations_[0], ... permutations_[pos] and then by - // their index in the graph; - // 3. missing_parent_numbers_[i] is the number of parents of element i that - // are not in {permutations_[0], ..., permutations_[pos]}. - // 4. element_original_position_[i] is the original position of element i of - // the permutation following the order described in 2. In particular, - // element_original_position_[i] = i for i > pos. - // Set and Unset maintain these invariants. - - // Precondition: Invariant(size_ - 1) holds. - // Postcondition: Invariant(size_ - 1) holds if Increase() returns true. - // If Increase() returns false, all topological orderings of the graph have - // been generated and the state of permutation_ is not specified.. - bool Increase() { - Unset(size_ - 1); - for (int pos = size_ - 2; pos >= 0; --pos) { - // Invariant(pos) holds. - // Increasing logic: once permutation_[pos] has been put back to its - // original position by Unset(pos), elements permutations_[pos], ..., - // permutations_.back() are in their original ordering, in particular in - // the same order as last time the iteration on permutation_[pos] - // occurred (according to Invariant(pos).2, these are exactly the elements - // that have to be tried at pos). - // All possibilities in permutations_[pos], ..., - // permutations_[element_original_position_[pos]] have been run through. - // The next to test is permutations_[element_original_position_[pos] + 1]. - const int k = element_original_position_[pos] + 1; - Unset(pos); - // Invariant(pos - 1) holds. - - // No more elements to iterate on at position pos. Go backwards one - // position to increase that one. - if (k == permutation_.size()) continue; - Set(pos, k); - // Invariant(pos) holds. - for (++pos; pos < size_; ++pos) { - // Invariant(pos - 1) holds. - // According to Invariant(pos - 1).2, if pos >= permutation_.size(), - // there are no more elements we can add to the permutation which means - // that we detected a cycle. It would be a bug as we would have detected - // it in Init(). - CHECK_LT(pos, permutation_.size()) << "Cycle detected"; - // According to Invariant(pos - 1).2, elements that can be used at pos - // are permutations_[pos], ..., permutations_.back(). Starts the - // iteration at permutations_[pos]. - Set(pos, pos); - // Invariant(pos) holds. - } - // Invariant(size_ - 1) holds. - return true; - } - return false; - } - - // Must be called before Increase(). - ABSL_MUST_USE_RESULT bool Init() { - for (int i = 0; i < size_; ++i) { - if (missing_parent_numbers_[i] == 0) { - permutation_.push_back(i); - } - } - for (int pos = 0; pos < size_; ++pos) { - // Invariant(pos - 1) holds. - // According to Invariant(pos - 1).2, if pos >= permutation_.size(), - // there are no more elements we can add to the permutation. - if (pos >= permutation_.size()) return false; - // According to Invariant(pos - 1).2, elements that can be used at pos - // are permutations_[pos], ..., permutations_.back(). Starts the - // iteration at permutations_[pos]. - Set(pos, pos); - // Invariant(pos) holds. - } - // Invariant(pos - 1) hold. We have a permutation. - return true; - } - - const std::vector& permutation() const { return permutation_; } - private: // Graph maps indices to their children. Children must be in [0, size_). std::vector> graph_; - // Number of elements in graph_. - int size_; - // For each element in graph_, the number of parents it has that are not yet - // in permutation_. In particular, it is always 0 when Init has been called - // and when Increase is not in progress (and has not yet returned false). - std::vector missing_parent_numbers_; - // The current permutation. It is ensured to be a topological sorting of the - // graph once Init has been called and Increase has not yet returned false. - std::vector permutation_; - // Keeps track of the original position of the element in permutation_[i]. See - // the comment above the class for the detailed algorithm. - std::vector element_original_position_; - - // Unset the element at pos. - // - // - Precondition: Invariant(pos) holds. - // - Postcondition: Invariant(pos - 1) holds. - void Unset(int pos) { - const int n = permutation_[pos]; - // Before the loop: Invariant(pos).2 and Invariant(pos).3 hold. - // After the swap below: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. - for (const int c : graph_[n]) { - if (missing_parent_numbers_[c] == 0) permutation_.pop_back(); - ++missing_parent_numbers_[c]; - } - std::swap(permutation_[element_original_position_[pos]], permutation_[pos]); - // Invariant(pos).4 -> Invariant(pos - 1).4. - element_original_position_[pos] = pos; - } - - // Set the element at pos to the element at k. - // - // - Precondition: Invariant(pos - 1) holds and k in [pos, - // permutation_.size()). - // - Postcondition: Invariant(pos) holds and permutation_[pos] has been - // swapped with permutation_[k]. - void Set(int pos, int k) { - int n = permutation_[k]; - // Before the loop: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. - // After the loop: Invariant(pos).2 and Invariant(pos).3 hold. - for (int c : graph_[n]) { - --missing_parent_numbers_[c]; - if (missing_parent_numbers_[c] == 0) permutation_.push_back(c); - } - // Invariant(pos - 1).1 -> Invariant(pos).1. - std::swap(permutation_[k], permutation_[pos]); - // Invariant(pos - 1).4 -> Invariant(pos).4. - element_original_position_[pos] = k; - } }; +// To describe the algorithm in operator++() and constructor(), we consider the +// following invariant, called Invariant(pos) for a position pos in [0, size_): +// 1. permutations_[0], ..., permutations_[pos] form a prefix of a topological +// ordering of the graph; +// 2. permutations_[pos + 1], ..., permutations_.back() are all other elements +// that have all their parents in permutations_[0], ..., permutations_[pos], +// ordered lexicographically by the index of their last parent in +// permutations_[0], ... permutations_[pos] and then by their index in the +// graph; +// 3. missing_parent_numbers_[i] is the number of parents of element i that are +// not in {permutations_[0], ..., permutations_[pos]}. +// 4. element_original_position_[i] is the original position of element i of +// the permutation following the order described in 2. In particular, +// element_original_position_[i] = i for i > pos. +// Set and Unset maintain these invariants. + +// Precondition: Invariant(size_ - 1) holds. +// Postcondition: Invariant(size_ - 1) holds if the end of the iteration is not +// reached. +inline DagTopologicalSortIterator::Iterator& +DagTopologicalSortIterator::Iterator::operator++() { + CHECK_GE(ordering_index_, 0) << "Iteration past end"; + if (size_ == 0) { + // Special case: empty graph, only one topological ordering is + // generated. + ordering_index_ = -1; + return *this; + } + + Unset(size_ - 1); + for (int pos = size_ - 2; pos >= 0; --pos) { + // Invariant(pos) holds. + // Increasing logic: once permutation_[pos] has been put back to its + // original position by Unset(pos), elements permutations_[pos], ..., + // permutations_.back() are in their original ordering, in particular in + // the same order as last time the iteration on permutation_[pos] occurred + // (according to Invariant(pos).2, these are exactly the elements that have + // to be tried at pos). All possibilities in permutations_[pos], ..., + // permutations_[element_original_position_[pos]] have been run through. + // The next to test is permutations_[element_original_position_[pos] + 1]. + const int k = element_original_position_[pos] + 1; + Unset(pos); + // Invariant(pos - 1) holds. + + // No more elements to iterate on at position pos. Go backwards one position + // to increase that one. + if (k == permutation_.size()) continue; + Set(pos, k); + // Invariant(pos) holds. + for (++pos; pos < size_; ++pos) { + // Invariant(pos - 1) holds. + // According to Invariant(pos - 1).2, if pos >= permutation_.size(), there + // are no more elements we can add to the permutation which means that we + // detected a cycle. It would be a bug as we would have detected it in + // the constructor. + CHECK_LT(pos, permutation_.size()) + << "Unexpected cycle detected during iteration"; + // According to Invariant(pos - 1).2, elements that can be used at pos are + // permutations_[pos], ..., permutations_.back(). Starts the iteration at + // permutations_[pos]. + Set(pos, pos); + // Invariant(pos) holds. + } + // Invariant(size_ - 1) holds. + ++ordering_index_; + return *this; + } + ordering_index_ = -1; + return *this; +} + +inline DagTopologicalSortIterator::Iterator::Iterator( + const std::vector>& graph) + : graph_(graph), + size_(graph.size()), + missing_parent_numbers_(size_, 0), + element_original_position_(size_, 0), + ordering_index_(0) { + if (size_ == 0) { + // Special case: empty graph, only one topological ordering is generated, + // which is the "empty" ordering. + return; + } + + for (const auto& children : graph_) { + for (const int child : children) { + missing_parent_numbers_[child]++; + } + } + + for (int i = 0; i < size_; ++i) { + if (missing_parent_numbers_[i] == 0) { + permutation_.push_back(i); + } + } + for (int pos = 0; pos < size_; ++pos) { + // Invariant(pos - 1) holds. + // According to Invariant(pos - 1).2, if pos >= permutation_.size(), there + // are no more elements we can add to the permutation. + if (pos >= permutation_.size()) { + ordering_index_ = -1; + return; + } + // According to Invariant(pos - 1).2, elements that can be used at pos are + // permutations_[pos], ..., permutations_.back(). Starts the iteration at + // permutations_[pos]. + Set(pos, pos); + // Invariant(pos) holds. + } + // Invariant(pos - 1) hold. We have a permutation. +} + +// Unset the element at pos. +// +// - Precondition: Invariant(pos) holds. +// - Postcondition: Invariant(pos - 1) holds. +inline void DagTopologicalSortIterator::Iterator::Unset(int pos) { + const int n = permutation_[pos]; + // Before the loop: Invariant(pos).2 and Invariant(pos).3 hold. + // After the swap below: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. + for (const int c : graph_[n]) { + if (missing_parent_numbers_[c] == 0) permutation_.pop_back(); + ++missing_parent_numbers_[c]; + } + std::swap(permutation_[element_original_position_[pos]], permutation_[pos]); + // Invariant(pos).4 -> Invariant(pos - 1).4. + element_original_position_[pos] = pos; +} + +// Set the element at pos to the element at k. +// +// - Precondition: Invariant(pos - 1) holds and k in [pos, +// permutation_.size()). +// - Postcondition: Invariant(pos) holds and permutation_[pos] has been swapped +// with permutation_[k]. +inline void DagTopologicalSortIterator::Iterator::Set(int pos, int k) { + int n = permutation_[k]; + // Before the loop: Invariant(pos - 1).2 and Invariant(pos - 1).3 hold. + // After the loop: Invariant(pos).2 and Invariant(pos).3 hold. + for (int c : graph_[n]) { + --missing_parent_numbers_[c]; + if (missing_parent_numbers_[c] == 0) permutation_.push_back(c); + } + // Invariant(pos - 1).1 -> Invariant(pos).1. + std::swap(permutation_[k], permutation_[pos]); + // Invariant(pos - 1).4 -> Invariant(pos).4. + element_original_position_[pos] = k; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/util_test.cc b/ortools/sat/util_test.cc index 1e06fbfb2d..1b2be2db49 100644 --- a/ortools/sat/util_test.cc +++ b/ortools/sat/util_test.cc @@ -1075,43 +1075,39 @@ TEST(DagTopologicalSortIteratorTest, GenerateValidPermutations) { dag_iterator.AddArc(4, 1); dag_iterator.AddArc(2, 3); dag_iterator.AddArc(3, 1); - EXPECT_TRUE(dag_iterator.Init()); int count = 0; - do { + for ([[maybe_unused]] const auto& permutation : dag_iterator) { ++count; - } while (dag_iterator.Increase()); + } EXPECT_EQ(count, 13); } TEST(DagTopologicalSortIteratorTest, GenerateAllPermutations) { DagTopologicalSortIterator dag_iterator(6); - EXPECT_TRUE(dag_iterator.Init()); int count = 0; - do { + for ([[maybe_unused]] const auto& permutation : dag_iterator) { ++count; - } while (dag_iterator.Increase()); + } EXPECT_EQ(count, 720); } TEST(DagTopologicalSortIteratorTest, OnePrecedence) { DagTopologicalSortIterator dag_iterator(6); dag_iterator.AddArc(5, 2); - EXPECT_TRUE(dag_iterator.Init()); int count = 0; - do { + for ([[maybe_unused]] const auto& permutation : dag_iterator) { ++count; - } while (dag_iterator.Increase()); + } EXPECT_EQ(count, 360); } TEST(DagTopologicalSortIteratorTest, ReversePrecedence) { DagTopologicalSortIterator dag_iterator(6); dag_iterator.AddArc(2, 5); - EXPECT_TRUE(dag_iterator.Init()); int count = 0; - do { + for ([[maybe_unused]] const auto& permutation : dag_iterator) { ++count; - } while (dag_iterator.Increase()); + } EXPECT_EQ(count, 360); } @@ -1133,11 +1129,9 @@ TEST(DagTopologicalSortIteratorTest, RandomTest) { absl::flat_hash_set> iterator_solutions; int count_iterator = 0; - if (dag_iterator.Init()) { - do { - ++count_iterator; - iterator_solutions.insert(dag_iterator.permutation()); - } while (dag_iterator.Increase()); + for (const auto& permutation : dag_iterator) { + ++count_iterator; + iterator_solutions.insert(permutation); } std::vector permutation = {0, 1, 2, 3, 4, 5}; From 8f43bdb90d6be3c9c738e3738ab05ac02b8f9c70 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 14:14:02 +0200 Subject: [PATCH 047/509] add iterators to sorted_interval_list --- ortools/util/sorted_interval_list.h | 91 ++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 2 deletions(-) diff --git a/ortools/util/sorted_interval_list.h b/ortools/util/sorted_interval_list.h index 07a55b7616..f07dca7c71 100644 --- a/ortools/util/sorted_interval_list.h +++ b/ortools/util/sorted_interval_list.h @@ -14,6 +14,7 @@ #ifndef OR_TOOLS_UTIL_SORTED_INTERVAL_LIST_H_ #define OR_TOOLS_UTIL_SORTED_INTERVAL_LIST_H_ +#include #include #include #include @@ -32,21 +33,41 @@ namespace operations_research { * Represents a closed interval [start, end]. We must have start <= end. */ struct ClosedInterval { +#if !defined(SWIG) + /** + * An iterator over the values of a ClosedInterval object. + * + * To iterate over the values, you can use either a range for loop: + * + * ClosedInterval interval = {0, 100}; + * for (const int64_t value : interval) { Work(value); } + * + * or a classical for loop: + * for (auto it = begin(interval); it != end(interval); ++it) { Work(*it); } + * + * The iterator is designed to be very efficient, using just a single counter. + * It works correctly for any combination of `start` and `end` except the full + * int64_t range (start == INT64_MIN && end == INT64_MAX). + */ + class Iterator; +#endif // !defined(SWIG) + ClosedInterval() {} + explicit ClosedInterval(int64_t v) : start(v), end(v) {} ClosedInterval(int64_t s, int64_t e) : start(s), end(e) { DLOG_IF(DFATAL, s > e) << "Invalid ClosedInterval(" << s << ", " << e << ")"; } std::string DebugString() const; - bool operator==(const ClosedInterval& other) const { + constexpr bool operator==(const ClosedInterval& other) const { return start == other.start && end == other.end; } // Because we mainly manipulate vector of disjoint intervals, we only need to // sort by the start. We do not care about the order in which interval with // the same start appear since they will always be merged into one interval. - bool operator<(const ClosedInterval& other) const { + constexpr bool operator<(const ClosedInterval& other) const { return start < other.start; } @@ -59,6 +80,11 @@ struct ClosedInterval { int64_t end = 0; // Inclusive. }; +#if !defined(SWIG) +inline ClosedInterval::Iterator begin(ClosedInterval interval); +inline ClosedInterval::Iterator end(ClosedInterval interval); +#endif // !defined(SWIG) + std::ostream& operator<<(std::ostream& out, const ClosedInterval& interval); std::ostream& operator<<(std::ostream& out, const std::vector& intervals); @@ -648,6 +674,67 @@ class SortedDisjointIntervalList { IntervalSet intervals_; }; +// Implementation details. + +#if !defined(SWIG) +class ClosedInterval::Iterator { + public: + using value_type = int64_t; + using difference_type = std::ptrdiff_t; + + Iterator(const Iterator&) = default; + + int64_t operator*() const { return static_cast(current_); } + + Iterator& operator++() { + ++current_; + return *this; + } + void operator++(int) { ++current_; } + + bool operator==(Iterator other) const { return current_ == other.current_; } + bool operator!=(Iterator other) const { return current_ != other.current_; } + + Iterator& operator=(const Iterator&) = default; + + static Iterator Begin(ClosedInterval interval) { + AssertNotFullInt64Range(interval); + return Iterator(static_cast(interval.start)); + } + static Iterator End(ClosedInterval interval) { + AssertNotFullInt64Range(interval); + return Iterator(static_cast(interval.end) + 1); + } + + private: + explicit Iterator(uint64_t current) : current_(current) {} + + // Triggers a DCHECK-failure when `interval` represents the full int64_t + // range. + static void AssertNotFullInt64Range(ClosedInterval interval) { + DCHECK_NE(static_cast(interval.start), + static_cast(interval.end) + 1) + << "Iteration over the full int64_t range is not supported."; + } + + // Implementation note: In C++, integer overflow is well-defined only for + // unsigned integers. To avoid any compilation issues or UBSan failures, the + // iterator uses uint64_t internally and relies on the fact that since C++20 + // unsigned->signed conversion is well-defined for all values using modulo + // arithmetic. + uint64_t current_; +}; + +// begin()/end() are required for iteration over ClosedInterval in a range for +// loop. +inline ClosedInterval::Iterator begin(ClosedInterval interval) { + return ClosedInterval::Iterator::Begin(interval); +} +inline ClosedInterval::Iterator end(ClosedInterval interval) { + return ClosedInterval::Iterator::End(interval); +} +#endif // !defined(SWIG) + } // namespace operations_research #endif // OR_TOOLS_UTIL_SORTED_INTERVAL_LIST_H_ From 67529ab469ec27d9803ad0e58e8a23e9a8ca68d2 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 14:14:18 +0200 Subject: [PATCH 048/509] small fix --- ortools/glop/revised_simplex.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index e9b26c8029..fcfe6ef92e 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -474,8 +474,8 @@ ABSL_MUST_USE_RESULT Status RevisedSimplex::SolveInternal( // it is hard to claim we are really unbounded. This is a quick // heuristic to error on the side of optimality rather than // unboundedness. - double max_magnitude = 0.0; - double min_distance = kInfinity; + Fractional max_magnitude = 0.0; + Fractional min_distance = kInfinity; const DenseRow& lower_bounds = variables_info_.GetVariableLowerBounds(); const DenseRow& upper_bounds = variables_info_.GetVariableUpperBounds(); Fractional cost_delta = 0.0; From 8d2d46fde84ba1d0ea407f2dd24aec3a2195e4c2 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 14:14:32 +0200 Subject: [PATCH 049/509] reindent --- ortools/lp_data/lp_parser.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/lp_data/lp_parser.cc b/ortools/lp_data/lp_parser.cc index c8654c49e1..e872c4fe97 100644 --- a/ortools/lp_data/lp_parser.cc +++ b/ortools/lp_data/lp_parser.cc @@ -237,8 +237,8 @@ bool LPParser::ParseConstraint(StringPiece constraint) { namespace { -template -constexpr bool dependent_false = false; // workaround before CWG2518/P2593R1 +template +constexpr bool dependent_false = false; // workaround before CWG2518/P2593R1 template bool SimpleAtoFractional(absl::string_view str, T* value) { From 29fabd91f73b8d2f5d54af9c55c36fe5b92c1048 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 14:53:51 +0200 Subject: [PATCH 050/509] [CP-SAT] stronger no_overlap_2d variation --- ortools/sat/BUILD.bazel | 1 + ortools/sat/cp_model_search.cc | 1 + ortools/sat/diffn.cc | 25 ++++++++++++++++++++----- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 44c42cfcd1..ba675c7341 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -3570,6 +3570,7 @@ cc_library( ":synchronization", ":timetable", ":util", + "//ortools/base:stl_util", "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 84f28e1998..ff253b4041 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -188,6 +188,7 @@ void AddExtraSchedulingPropagators(SatParameters& new_params) { new_params.set_use_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_area_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_try_edge_reasoning_in_no_overlap_2d(true); + new_params.set_no_overlap_2d_boolean_relations_limit(100); } // We want a random tie breaking among variables with equivalent values. diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 078de5eb92..b848740606 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -32,6 +32,7 @@ #include "absl/log/vlog_is_on.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" +#include "ortools/base/stl_util.h" #include "ortools/sat/2d_distances_propagator.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" @@ -276,11 +277,11 @@ void AddNonOverlappingRectangles(const std::vector& x, DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); for (int i = 0; i < num_boxes; ++i) { - if (repository->IsOptional(x[i])) continue; - if (repository->IsOptional(y[i])) continue; + if (repository->IsAbsent(x[i])) continue; + if (repository->IsAbsent(y[i])) continue; for (int j = i + 1; j < num_boxes; ++j) { - if (repository->IsOptional(x[j])) continue; - if (repository->IsOptional(y[j])) continue; + if (repository->IsAbsent(x[j])) continue; + if (repository->IsAbsent(y[j])) continue; // At most one of these two x options is true. const Literal x_ij = repository->GetOrCreatePrecedenceLiteral( @@ -307,7 +308,21 @@ void AddNonOverlappingRectangles(const std::vector& x, } // At least one of the 4 options is true. - if (!sat_solver->AddProblemClause({x_ij, x_ji, y_ij, y_ji})) { + std::vector clause = {x_ij, x_ji, y_ij, y_ji}; + if (repository->IsOptional(x[i])) { + clause.push_back(repository->PresenceLiteral(x[i]).Negated()); + } + if (repository->IsOptional(y[i])) { + clause.push_back(repository->PresenceLiteral(y[i]).Negated()); + } + if (repository->IsOptional(x[j])) { + clause.push_back(repository->PresenceLiteral(x[j]).Negated()); + } + if (repository->IsOptional(y[j])) { + clause.push_back(repository->PresenceLiteral(y[j]).Negated()); + } + gtl::STLSortAndRemoveDuplicates(&clause); + if (!sat_solver->AddProblemClause(clause)) { return; } } From 55fbc3d2df69e9025c0d0d76828952fcaeb8a645 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 2 Jun 2025 15:41:00 +0200 Subject: [PATCH 051/509] fix --- ortools/sat/cp_model_solver_test.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ortools/sat/cp_model_solver_test.cc b/ortools/sat/cp_model_solver_test.cc index 96fb307224..63ab2fae0f 100644 --- a/ortools/sat/cp_model_solver_test.cc +++ b/ortools/sat/cp_model_solver_test.cc @@ -1072,7 +1072,8 @@ TEST(SolveCpModelTest, SolutionHintMinimizeL1DistanceTest) { Model model; model.Add( NewSatParameters("repair_hint:true, stop_after_first_solution:true, " - "keep_all_feasible_solutions_in_presolve:true")); + "keep_all_feasible_solutions_in_presolve:true " + "num_workers:1")); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_THAT(response.status(), AnyOf(Eq(CpSolverStatus::OPTIMAL), Eq(CpSolverStatus::FEASIBLE))); From 8c9033f9ff746056bdc19c40f879f6d1987708af Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 14:52:21 +0200 Subject: [PATCH 052/509] make: Fix test_dotnet and test_java targets --- makefiles/Makefile.dotnet.mk | 4 ++-- makefiles/Makefile.java.mk | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/makefiles/Makefile.dotnet.mk b/makefiles/Makefile.dotnet.mk index a8bc1d5399..0b95e2efad 100644 --- a/makefiles/Makefile.dotnet.mk +++ b/makefiles/Makefile.dotnet.mk @@ -162,7 +162,7 @@ endif cd $(TEMP_DOTNET_DIR)$S$1$S$$* && "$(DOTNET_BIN)" clean -c Release -v minimal endef -DOTNET_SAMPLES := init algorithms graph constraint_solver linear_solver sat util +DOTNET_SAMPLES := init algorithms graph constraint_solver linear_solver routing sat util $(foreach sample,$(DOTNET_SAMPLES),$(eval $(call dotnet-sample-target,$(sample)))) # Examples @@ -307,7 +307,7 @@ endif cd $(TEMP_DOTNET_DIR)$S$1$S$$* && "$(DOTNET_BIN)" clean -c Release -v minimal endef -DOTNET_TESTS := init algorithms graph constraint_solver linear_solver sat util +DOTNET_TESTS := init algorithms graph constraint_solver linear_solver routing sat util $(foreach test,$(DOTNET_TESTS),$(eval $(call dotnet-test-target,$(test)))) #################### diff --git a/makefiles/Makefile.java.mk b/makefiles/Makefile.java.mk index e8e59a5cd0..da28cac308 100644 --- a/makefiles/Makefile.java.mk +++ b/makefiles/Makefile.java.mk @@ -176,7 +176,7 @@ rjava_%: \ cd $(TEMP_JAVA_DIR)$S$1$S$$* && "$(MVN_BIN)" exec:java $(ARGS) endef -JAVA_SAMPLES := init algorithms graph constraint_solver linear_solver sat util +JAVA_SAMPLES := init algorithms graph constraint_solver linear_solver routing sat util $(foreach sample,$(JAVA_SAMPLES),$(eval $(call java-sample-target,$(sample),$(subst _,,$(sample))))) # Examples @@ -275,7 +275,7 @@ rjava_%: \ cd $(TEMP_JAVA_DIR)$S$1$S$$* && "$(MVN_BIN)" test $(ARGS) endef -JAVA_TESTS := init algorithms graph constraint_solver linear_solver sat util +JAVA_TESTS := init algorithms graph constraint_solver linear_solver routing sat util $(foreach test,$(JAVA_TESTS),$(eval $(call java-test-target,$(test)))) #################### From c2777d5ee016c5e0cb241e9344d9ee61322b557a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 15:17:30 +0200 Subject: [PATCH 053/509] java: update private key used by release scripts note: previous key is expired since 2025-03 --- tools/release/private-key.gpg.enc | Bin 5248 -> 5248 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/tools/release/private-key.gpg.enc b/tools/release/private-key.gpg.enc index 03377db3536ff2a78d630b7a8859db5ec057c8bf..f6d31f17fb7f1512023f5b12de5be982a23eaea1 100644 GIT binary patch literal 5248 zcmV-`6o2beVQh3|WM5wpTa5tGQ6NVR#sJn~`zD!A-IKtV>;{kBR7|<0e9_4 zc>$K7H^56$uorRc7g{-C9tISi?$V~4hX!-kMlv@yQ4&ErLmp>CvRx8BC$^h5o4sEN zJ2Gp&cbFH2ebcod7u)7l<{In?Cl@B&AW3y=Pr^lz|9Oh@SM_JZ`W$M(fyChzPtg4O z1@?ZBhs7q++0k$3E>(vBdLOHO)$&rVvsbYXee|KmPW{!*{O#yw}BlkQL@=SCCMbpj$>mDQ1xk^=c%0-BuoaL?MHs+ zm)=0$QBH?IrYqpCsxYB^hs$1itv4l_eJBGev#SPRcA{yy(DeQ&Z^pbo%e>DA^kr78 z-`K^Fn+D@~>H2jebW%(u5Z7vZZch?Zjm&UtK*`j5$^*3+&rACU4)5duD75#1rO0vHVE z(wxb;Po|8_1h{Yi3mT1=N3v^&7tVV; z)^J)(Ff>LWysmh?e;sc=!9|zivtXEE6$Nd>(;vsAUK^P#><0`_%YeS8drQ@~6qCP~ zs^VW?{bs32Z@taMA-p5cJ7XRDDn==9yWkE7k)L#UsM9s4^(Y_y$sHH3^9e7^fyvT+ zR4gvM`LueKtAR8#3qed46IG@D)lpgUQJvyTOX6hX+Lp0d!hs3PYJr!mTB+(Z0%lD8 zz)9>;t9wOD235SSvzlH}hhu#uyy3|kcqF}3R4TL|2(8SOQBVpQ#M4WTr{j(%9Bz_V9)skQ$}X$#z~+liv|45zG&+ zQGXXRCPj5$Ovhc|L@qsA<{1K%9pbl}`m+9JY5Q8fi`ZsDV!o*7I^1)D=IrPPbVM%?jxlKJ}10-P+X%Ko2 z%_7ScDg82}K`g|Q%j`1oTM_Ma>rYeb5yaS1AbPM4f30I#nW9Cn3%b$qNR>YtFBj+k z>P%$|^LCGx{Hv^_)Es0pO z#$%B1_w!+{Ch?wxSbX=`ry?n>XqIR8vqL!&R`_b=%u;KTcT%b*RmX71c(kw9;oN?7 zMi#(zxRag0JiW^BBhI|5?B<`OkdLF{ojI|d&8ws3B`c+76nhs+Gg3{DvDJVOsJ7;n zzU3?|9B>3oWV?)La?2icN~=h9AQGtIA?3{C5^&mSHg&gFRy7r+75}Mm^gvTZ|M!mJ z+%FH--i6yOAK-q|_T_Js+iOZ2SRtv17WQQe8-u$9H(`)doj~e+6N339taoPWSSURL z5&HsHcn9fL9MvFnR)5tb3TW&V-<=@0}tpcmLJuTCt~j zaJy=`SwrudAe%L8JD*0B`yYLu9rZMbRpXZsE4|tXryEgf*b-jaRQBe?s9svI?-8H> z!i)uA8AlQ>soo18ZTScq1*x=aV0|gbBZCNpp{zRLUw6Q`HE3B&H zC6V+c&3>qsBWW6e^OkdhikI~&Z35@+z*H%p+bF7Fk3nm&obK>5FK8IH-KE&A!kZ%Q zZ(KxFF8|hEfBSuGjPI);i?hUqhaEy9$P>vJx|R?Y|9XNhnC86u&1DY|y*#oKh~{45 zvI|krEWB7f6iMQ#o1-ifA_)ABmS^xyM%b`w4lOwF?1!aqkH!95uLB&2)x$L_<@wK@ zVJ7TAS8IBrD3=K{5er=32`}y46QkuyOn5-Jk}9nnWmIG^Bq@8FD7y_`i@&h+{buk= z;O{~Z5MtOR5amc!3!-yJNF+p^4ZCqojKxmNP>wmE*V18~!PsQz>satX_Sw{o1+>Gf z+m?`by*;%T1izcl|DQ)UG%Ig`VZ(LxBh&Y!;sAf5lDn%T=j(BqGDi6ho#^3AZqrRJ z&6Y+a_aai0aD3B3fAcx~^Qx|+k?2%MxiGujMu6}0-J~-OV^Pe!_MVV>6#J!;mKW?D zPuy}c0xE=YRDQPoQV&YSW0*9w!5% zMb<7=vAI1_y*?XO3=#_DnkJC((@t?+LL<$IfWuI;TNkjUDoHXXg8a23W?1txhQ;H*E(!Qk2ut21Mfv8Ivc&KkZnO zlum0bNfjPUj(bzU%Z3s*`U{WS%9rSyQddQXCWYEc$Lp6TD~g)faSrz0eKK1Q z8VC_c@RTPdhi9hn5%WEQKpkP6|D22|FQ_6wB~hRbew`f60yln`ss)O44^c-#KFJE! zZaoTkeNxbjnaGYIgYK7!_8_4xfY6r&1k0CGQIhZ=tTb@U2zcl<9?H}$>fcOx`bQ4S z65tJ|;Vb4tybh+k;(+hE3VyH>sCafa1AG=SV}N9^FZU!4dgOcTT|hs1%}QG}NOD?d zm}k^Uq=zXF&zN3<1ajPq3=6VUda-qL4JYC9aNJT{a!WwUDH69l5eXdm)vbeMS2>k_ zNg;MUR*yw~_8gaHL>g5BOG8EKA%q~~SE*w3-4G$sA#`gmwl8%aLLb5wHe`*`im`Xr z;+ox-Vo4WAr}0|6;H%b*D?sIgqZ)JYU0S3M1d$P;n9Kf zxzhg(jTLT{w);-+!7qdn$k*TA-nCm7P4xyL4bfU%zj0ROfanpu6WxK{jkgakTgd@m zu)~aKSeaZgNUOOa(-*}5OKLBO`P?k2N(0qg$6cnZ9%-CLhb)`zTR=Y(71L?A#Da>G zD-zud@3ajXHpo6TP2mvJRcX!DSy|XTjWWD#P1@g`AW!`8T%uW)z zCXDNnJf6L~>{v8Lg7Hi>R0DYttwCS-1?(7|LD(q-5c-5f_$`5;SW-buV{8q`oz+i}qza=Q*`G+8)WD~YxJ7eI^6=(CY1oJf0XE)Y^y z?Eo8T%coaA-8-1rxRBrNn}mBeqGyq9-xDjC@FtN{sHe))h7!=B!%;z1g3E<|)1vL- zx3ccbP&d=`zARLB>{wlX`#^92EuCQqTD)c2-4gAMdsH25*4T^UQ3S0%;5ot!>K1e} zH0F`h9$drNO`IxLSuc8B-qO+r_(*PKl}KKE>WjEglr()pW_#T4@h^9zzH?8-TnSy6 z;iVe>G=+GT(^!k?EJdKZcyoPBrBrNI;icp(iJ#V&oiBGY>)Tarql#?j!5y%g!{X(q z3*rX_odTSj6?bxK*|miYyyw8IJqYUTn0`IWNz9{@hvMoTgsb)%U-i~OP1UE^r3OpH zn?sJL4(g_iKi86iWYQ!I6E8D(1Ufs*DR1hD{7_Go z$Na;|58j*?oGrt4^OO5YF_{ucS2V~Nx7m#r+ewMNkQX4vx=QNYvTP0a_S}B9zEIH1 z{v=fzM(xI0)^wB9b)1~+p!D88KKQj43APt?(XEgzoMaXyQ{6sxPJ87_t#ITLY=BKJ z>~2KdRvRB!jGLLYzi&Et|5I#!vzN8*(*j7Cx$B#d6@s`2h2{D(Rn1}~FD&YY>3Yib zK@0w=rJYdoeURcwB9OAJ%R!HG0l*AKs-7xl%@X%WcyoT*(U8YpktbsvnZVmR z^r!`O>Gn$_;T((9pj=eyvtrtTELHuXJFPwnRU2(fppQckH5Sf}TpXj*hEdX@t#3#O zR?7A723eAO%#~d!Owt zZN3xmS#yodr=L|{_#bFwlS{vCL|7q*uDivBLzZ~tDn(N6nyqdBN%AtfXI zDfk8z<=#}Lj;Gu|EbUvI9C!l2wPcu_$cREM_$~i4_FqXl_ zvSOa-|IL@Z0jo%vi`FT`{TVX*|4t51EO;{4@88pXR>T>xm!th>ZBos%#oL$u0k>|F z@hCG=@g13SN%{dB&-=)V?^Y9k{2xBj7n=9y-VHlvnF9yp=8M%3)R^LIT_a07z9o0R z`dQWCRuI`odJ^CAms-2`5L(}K0~GzW*C=HL(HFlol=g$DK|2*5ZBg|`gvh)2n8gY5 z5kN-^mfnOjqDv!Y+J-akl^}NCCK+TzU(;+$0VWx~Ng%{5B7L7df0Gj^kFax$=}Q%8 zbB=PS6-{Oo2QM~@d$d+t=);^6%FcQ!o!|r2OXpYuDYuSK91){nJ6dWH;5j<-9!7m^ z+HFrK1gpII62Z~gnZ3;$_ys+t6>bJtCMb++at5N+ufdj-P(MJUnlM6j>JHoZQc?fP zaT}(Ow(3x$fEPiua`$tf8prmgN%lcw^gaGIe49Moi$~rPu_K!P1ZA7aHqm$$zL1LEX#n`^l}h5*Yi9lXcdYqA7Ls`FxD%7dGp z(ohViV13Qrblk(3f^`5RK;0#W#j!~wHZgI~msSqE0`HO^6g^#NH`}`<@qO3@LflUV1jR$~EqG%Ae$qkf*ebjz2oZpk zL%%Zwk)W7E4&>f5nAlZ0JX+850M9p=E|hBK%_gf;2o#uqOnb%m>|*Ii^)hK3*LC_@ z`&?X(njd3~M?Fb&gZyuR&D2Ydi#%5@HPCdQ`PyepexZIrj&FPsiQ|i*v_&a2xkn5< z2E)jY07MJa^tkCMHb%AMkWJDjV~=mKO&_SAjvE#h7KzhOi{acj4U2EgC8`2xN~}&g zJdmw8E_Z@(k#h_3;cyyW4lM$b|M)cbg1S|%-8%)4UD-mp4IUm15-Rg6$Zrpc^N!`g z(S02#Jd)TwB;H3ezsd&KTHGv7T>W39!5^)!3nPnMBIhHvC#9^IlHc52JP!wwtwKq( z!tntaSqN<6Iw%AMJ13KePbf)5ZB+Bo(DuHoU#O1G`r(E5M!R0N+Yg_PZ4xN%Y}sS2yYZSg4_Q0S@V}5RM^B6h}duLz5@hQZ4Lkluopj zpC-BxGvkudv*WxHw|l_>`*Eyn@^t1W4h~-uU~uSUFnypyp(cPxqY*2~z}7WNed^J{ z{!VwU^$Fhxh<4h6D35V9%p>P>ZU(%noH7Jp$)NursP`nXI7LU;*~ zsD2hDAF)3fQZ!!R%!(?Td1i3KL;T28P`9JKER~~T1WVqgJL{E->GX?YUUx~026g9(hon=Q}M8Y21imJPW0d7CZqtN2ps@#(du(;KK)A1T2Xf++? zCLyWm6Qyt@B^yDGOUL0EvV3KqO&GVh`aUswW~U}scXEGl`DX(yx*=ajVDr3%-N55Z zP?${#&2Me9C9l$?4|cFfdr4YY(}g1_i16uVy&CcOs$-XFgGEwQV7{@D0koTg&V2f# zl4B@tdu<#P_PHZ0WVGkc;4R7?R-g?E?hX4tC{@fj@!=mE7bNJWrf_$)e_!%1tJB2} z76F1L-N+S6qPpuhQU8^!At|ZlNrDH&1}^Kvn5-+s|EW0cOaUGyO>n;vsk0g2u}oBfIA{TO#Kx zR&w`Fr7VbWjBQ|~4Q&4hD>;%eIa;8ga1EEdlxeMYCTo;R<1DN^*ul z!zqxbn=-ANbKP?DK9ixJjVGg!PtaFbn;BOA5{-aiz6mQ?ZE&Da2S$*k2|xn$E=lkJ@SiR$T3QuWHlV z$hUB%Aap1ZAk*BUVOr%qupHm-1@3FkgKzYuCE&wt)wW$3t2LB@8zpfa1T~7%B=F%hP$d2pOZe-u_+hm>~&MITn4oblFsH| z;jZ;A4d+rlf(%5TbD%ljD}t;8RhWqW1_#;Y^I>3t@3>-Ic;_EJRz=wOju}zGLWI&f z?*pM5i{U>%U6`+k2te)CQT@OqZ^XBSFEq0^Msl@CB>Aeok?8mbem%#$v_A4mkD~=V z)S)0ayVO6Ow~b>Df;ez1yI_3QzkV53_H-+fH4IH%xId^APT?Z)_ZWAq0-Nf`RUTCQ zLYMUCj(Cr@(8J0ed>vs-X(mz7h9VE^U;X%R8QRe`GGf<5Dft@HxZu-vGN4W{85TWq z@A~ffnX<{XIq6L$D4aJH5C-_K^HR+I%xLW0LQc@CjXBj5Y7m#k3J>F48~HxN+9n zbRe<~2=EX7(;-<(Ti^qia8OJhSFXctF1<&hBq;913M-`G-oi}!r+E%0Jp{~Gk(t?C zoEU#KV$}WuVA2%19hi^)LTNTnA6T3vARug7KlB||!VyD(vgBFHZi_3PEm6@ctCj`w zJa~!5!J8TCKs0#)JV}xNnJ<% zaQ&4FAOP77>KL|fU&fQFzGE|Gx~Z-H8*nJN)9U`L7UJMF{#U;0AtcI($fLKt2t-Jk zFXeU?yCOg*)^(OEkk8wlP2nwvGXZ{wT__J0zv4X0qW<1zY6|xWIf1oX{FrUA&*!~a z5F1b0(Ws{|-_4aW^Fv}W8DbX?Zqt=m)cm~RG`Q(-Sk$2TRUyyJi%aOY^ zhC@kyuVRTwhS0`=7(IEm*KN37>cG0eo{3d+s=yACp`@HxrX+qaWOftR$i~RISu97e zionxfRiJ#*Th1gtU1U~r5LNP-qr~J2dbi#^DtTm02n+d*Ox5k}{#2R^G>-8o*K#T~ z8#DTMjyMub^2nXsetBixD0GD%%80gVqb;~vfwg5Jsb z-b5gOcN(p0#jo6Hjf*;cMQ4&c>oueVDEoqn*Ucqp#Cxgc1p3*9EbQNb< zrN|1&E1VS2SeGI5*zH>fTzA>Ri9inrCyPnij2t5FHF{iXMMzV zkiGWQcdd9|x343*tIDVS-^Ilm9OkP(x#7oQs(m?Tz973A>}Jjx*R+k(#lfhY zo`5fcY=SpM@8;+TK8({l{=$vkzm#Y{Y#=ZTO6kAG<)Sx^p-|9485yptks|VpX_+@) z02lR+oOO%VitC8ILHie{y>G>zqL(ib`^Zg1OUfwlt!5|9#qTMmSKx)+Lm&D-EvwbU zzb_!hH2c&q=W8=Wr49VMBIVfhRQK$Z7vQ;oa|6TDN9_^Tna`D-8tD|@H)vRsP#*JB z=VaDl*`6W{zB>*Fvs{a%@*>WHS41eKgG;S|&|R?Lma>=6b~onc^WU796?wR+BVk^{ zuQwoaV<~vZEUN5o@A^VM&@J$8jtb(Ia5vX;;?E8|G5RyQCqONkm&cD%9OG25BB;|L zstuO#BzI9v3P$VzPGoju$tm0Ck!km3o4o7IWL^?TrcKbKUJC5FUJ#(Y=M252m;M9N z-XY0-aO#A`x8hw%#nq`BvoH;873m%N@n+Ttu;M`BVCbqC25UK7X^1&o$f=$owfY6g zy~U57O(v$VL=-=hErJ8$*|7wZ;V3h!ABwU}B0Y}hbEZt1CXE0qKadxmx5qfj6u8a|nZlW;KFaI8 zj&CkP7W5iJEad!aYxFwX+5mCPP7;cScd0*J2ltlRcfJ87S*TI=p6}O1tPKQ7_Vs_l z5ZA4l{FpaQ4VU&f!!B+VXB;=dXUTqwHT$ zmk=ZevfCc?WBB`Mr|@9f`eM=ya&5@2n)n}6NU0VMAb@6JoP>kr^PP4Tyni>1wG`;|4#iL~NSwde_%NI1)B|)`5z{=Mh0HpU1tUrM zFRm;F&{}jJUJAO)r@%1A(ZVeS87-%?qFehYTkQV-f-{)?-@(l$-a~J!)11w4KcSB6 zwbJOhDwe znQIgxvS2dw)dP*&v7A=@TDcey_UD3;kyK>J4-*eQ36>Y2_Fc-(7o2ZR7`KXoqzcK+gIc=#4_ zJCpNqYynbq_R+lg!QJxYEv#~)hR}k2_a#72xLRS-ss*Fa3bURU&zOx&d9rXpw#4t< zl3bD{2QHTa6o7x2gO|M_e`#gNU-^nhp270oaUWS5YRF~elG3||<2i(=bG_Dz%n18e z*PL9Rv*7R`q+-(B3}<+IuHBJk7sow88E`1XPzb1Sx1_)4D9?mOc$?w^jp$?8*+1zOdyPJ8ln>v z_pm7S$Hd>_R`Rv(vu*6~`qSZ(m^?Myc*}IS*iiS4MAua9ijvTw*^p8W; zTJvO2g2OGIY!wCKKmK@&Rk4pgJ|3a)h84Cj&C=~#BzUgZbro1*jB1SxL>?NNxdla` zbgHoA@p#Wvgkh5CgEIcogGR~)t_U`Cd!^y|ml!y^RpY{9>D2nmLxv?zcoTteLBy#D zyks%WON1gLw%c!EZpg(ksFs}#M=mLLh9VG2DD1`(2CZIq?Mcc`T9&~0`eG-Hbuxt( zvAq%_Mfwf!cIJl@|Jy^(0C}-&sQ3j-{}EQepk^7MH*4uU$KhyMPN#H_Yw(VTK?(sa zk>%a>kI_)XfeEG^IZaX#m&2Erj&#-6dT*eqG5s?6QJ#7#B;4jah}dJSUq^&M!o+KO zEcsC6$}0a_CpF__tg1lB-eF5If@VbTI)}2$l G)N7#aAx#tj From 8be2627644a2338a741296504a0be68d3930c149 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 16:57:14 +0200 Subject: [PATCH 054/509] ci/tools: Simplify alpine .Net 8.0 SDK install --- cmake/docker/alpine/dotnet.Dockerfile | 14 +---------- makefiles/docker/alpine/dotnet.Dockerfile | 16 ++---------- tools/docker/images/alpine-edge.Dockerfile | 24 ++---------------- .../docker/test/alpine-edge/dotnet.Dockerfile | 25 ++----------------- 4 files changed, 7 insertions(+), 72 deletions(-) diff --git a/cmake/docker/alpine/dotnet.Dockerfile b/cmake/docker/alpine/dotnet.Dockerfile index ddc72c4801..fc0cf30351 100644 --- a/cmake/docker/alpine/dotnet.Dockerfile +++ b/cmake/docker/alpine/dotnet.Dockerfile @@ -1,19 +1,7 @@ FROM ortools/cmake:alpine_swig AS env # .NET install -RUN apk add --no-cache wget icu-libs libintl \ -&& mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet - -## .Net 8.0 -## see: https://dotnet.microsoft.com/download/dotnet-core/8.0 -RUN dotnet_sdk_version=8.0.408 \ -&& wget -qO dotnet.tar.gz \ -"https://dotnetcli.azureedge.net/dotnet/Sdk/$dotnet_sdk_version/dotnet-sdk-${dotnet_sdk_version}-linux-musl-x64.tar.gz" \ -&& dotnet_sha512='0ab0c0d52985bde69b594454b5e1d9e1a6e003159656ee2972058d2960cfb0968dbe4d470d8eb21dcea41ff594976520e189a8e13afc44a419ca08e456df36e1' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +RUN apk add --no-cache dotnet8-sdk # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/makefiles/docker/alpine/dotnet.Dockerfile b/makefiles/docker/alpine/dotnet.Dockerfile index a3a6973d03..9924db424f 100644 --- a/makefiles/docker/alpine/dotnet.Dockerfile +++ b/makefiles/docker/alpine/dotnet.Dockerfile @@ -1,19 +1,7 @@ FROM ortools/make:alpine_swig AS env -# Install .Net -RUN apk add --no-cache wget icu-libs libintl \ -&& mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet - -## .Net 6.0 -## see: https://dotnet.microsoft.com/download/dotnet-core/6.0 -RUN dotnet_sdk_version=6.0.405 \ -&& wget -qO dotnet.tar.gz \ -"https://dotnetcli.azureedge.net/dotnet/Sdk/$dotnet_sdk_version/dotnet-sdk-${dotnet_sdk_version}-linux-musl-x64.tar.gz" \ -&& dotnet_sha512='ca98ebc5858339c5ce4164f5f5932a5bf8aae9f13d54adf382a41f5e6d1302df278bd7e218ecc2f651dcf67e705c40c43347cd33956732c6bd88d3b3d2881b84' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +# .NET install +RUN apk add --no-cache dotnet8-sdk # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/images/alpine-edge.Dockerfile b/tools/docker/images/alpine-edge.Dockerfile index fb4ddfa585..96c04328c6 100644 --- a/tools/docker/images/alpine-edge.Dockerfile +++ b/tools/docker/images/alpine-edge.Dockerfile @@ -13,28 +13,8 @@ CMD ["/bin/sh"] # Install Swig RUN apk add --no-cache swig -# Install .Net -RUN apk add --no-cache wget icu-libs libintl \ -&& mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet -## .Net 3.1 -## see: https://dotnet.microsoft.com/download/dotnet-core/3.1 -RUN dotnet_sdk_version=3.1.415 \ -&& wget -qO dotnet.tar.gz \ -"https://builds.dotnet.microsoft.com/dotnet/Sdk/${dotnet_sdk_version}/dotnet-sdk-${dotnet_sdk_version}-linux-musl-x64.tar.gz" \ -&& dotnet_sha512='20297eb436db2fe0cb3d8edfe4ad5b7c7871116616843314830533471a344f0ca943fbc5f92685113afc331a64c90f271245a36be1c232c364add936dd06d13d' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz -## .Net 6.0 -## see: https://dotnet.microsoft.com/download/dotnet-core/6.0 -RUN dotnet_sdk_version=6.0.405 \ -&& wget -qO dotnet.tar.gz \ -"https://builds.dotnet.microsoft.com/dotnet/Sdk/$dotnet_sdk_version/dotnet-sdk-${dotnet_sdk_version}-linux-musl-x64.tar.gz" \ -&& dotnet_sha512='ca98ebc5858339c5ce4164f5f5932a5bf8aae9f13d54adf382a41f5e6d1302df278bd7e218ecc2f651dcf67e705c40c43347cd33956732c6bd88d3b3d2881b84' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +# .NET install +RUN apk add --no-cache dotnet8-sdk # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/test/alpine-edge/dotnet.Dockerfile b/tools/docker/test/alpine-edge/dotnet.Dockerfile index e420b4396e..788bc91d0e 100644 --- a/tools/docker/test/alpine-edge/dotnet.Dockerfile +++ b/tools/docker/test/alpine-edge/dotnet.Dockerfile @@ -10,29 +10,8 @@ RUN apk add --no-cache git build-base linux-headers make ENTRYPOINT ["/bin/sh", "-c"] CMD ["/bin/sh"] -# Install .Net -RUN apk add --no-cache wget icu-libs libintl \ -&& mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet - -# see: https://dotnet.microsoft.com/download/dotnet-core/3.1 -RUN dotnet_sdk_version=3.1.413 \ -&& wget -qO dotnet.tar.gz https://builds.dotnet.microsoft.com/dotnet/Sdk/$dotnet_sdk_version/dotnet-sdk-$dotnet_sdk_version-linux-musl-x64.tar.gz \ -&& dotnet_sha512='46ffb31754b295cdb7dc615d5f905aa5842e3ada0e3f975217dfecbaa94e7b0190e86136fe9693d354b6ef88faa83e1c48496ffb1d644bd7ff437aeb48b9229c' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz -# Trigger first run experience by running arbitrary cmd -RUN dotnet --info - -# see: https://dotnet.microsoft.com/download/dotnet-core/6.0 -RUN dotnet_sdk_version=6.0.100 \ -&& wget -qO dotnet.tar.gz \ -"https://builds.dotnet.microsoft.com/dotnet/Sdk/$dotnet_sdk_version/dotnet-sdk-${dotnet_sdk_version}-linux-musl-x64.tar.gz" \ -&& dotnet_sha512='428082c31fd588b12fd34aeae965a58bf1c26b0282184ae5267a85cdadc503f667c7c00e8641892c97fbd5ef26a38a605b683b45a0fef2da302ec7f921cf64fe' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +# .NET install +RUN apk add --no-cache dotnet8-sdk # Trigger first run experience by running arbitrary cmd RUN dotnet --info From 22be41357cfe3b0fb55e863add58c0964283bb9e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 17:22:12 +0200 Subject: [PATCH 055/509] dotnet: get rid of last remaining .Net 6.0 --- makefiles/docker/debian/dotnet.Dockerfile | 2 +- makefiles/docker/fedora/dotnet.Dockerfile | 2 +- makefiles/docker/opensuse/dotnet.Dockerfile | 20 +++++++------------- tools/release/amd64.Dockerfile | 2 +- tools/release/arm64.Dockerfile | 2 +- 5 files changed, 11 insertions(+), 17 deletions(-) diff --git a/makefiles/docker/debian/dotnet.Dockerfile b/makefiles/docker/debian/dotnet.Dockerfile index bed49fcb94..08af62ec16 100644 --- a/makefiles/docker/debian/dotnet.Dockerfile +++ b/makefiles/docker/debian/dotnet.Dockerfile @@ -6,7 +6,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -yq dotnet-sdk-6.0 \ +&& apt-get install -yq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/makefiles/docker/fedora/dotnet.Dockerfile b/makefiles/docker/fedora/dotnet.Dockerfile index eb0946bd5d..8ec8515c13 100644 --- a/makefiles/docker/fedora/dotnet.Dockerfile +++ b/makefiles/docker/fedora/dotnet.Dockerfile @@ -2,7 +2,7 @@ FROM ortools/make:fedora_swig AS env # see: https://docs.microsoft.com/en-us/dotnet/core/install/linux-fedora RUN dnf -y update \ -&& dnf -y install dotnet-sdk-6.0 \ +&& dnf -y install dotnet-sdk-8.0 \ && dnf clean all # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/makefiles/docker/opensuse/dotnet.Dockerfile b/makefiles/docker/opensuse/dotnet.Dockerfile index 8a2ec61cdf..9a4b082894 100644 --- a/makefiles/docker/opensuse/dotnet.Dockerfile +++ b/makefiles/docker/opensuse/dotnet.Dockerfile @@ -1,21 +1,15 @@ FROM ortools/make:opensuse_swig AS env -# Install .Net +# .NET install # see: https://docs.microsoft.com/en-us/dotnet/core/install/linux-opensuse RUN zypper refresh \ -&& zypper install -y wget tar gzip libicu-devel -RUN mkdir -p /usr/share/dotnet \ -&& ln -s /usr/share/dotnet/dotnet /usr/bin/dotnet +&& zypper install -y wget tar awk gzip libicu-devel findutils -## .Net 6.0 -## see: https://dotnet.microsoft.com/download/dotnet-core/6.0 -RUN dotnet_sdk_version=6.0.100 \ -&& wget -qO dotnet.tar.gz \ -"https://dotnetcli.azureedge.net/dotnet/Sdk/${dotnet_sdk_version}/dotnet-sdk-${dotnet_sdk_version}-linux-x64.tar.gz" \ -&& dotnet_sha512='cb0d174a79d6294c302261b645dba6a479da8f7cf6c1fe15ae6998bc09c5e0baec810822f9e0104e84b0efd51fdc0333306cb2a0a6fcdbaf515a8ad8cf1af25b' \ -&& echo "$dotnet_sha512 dotnet.tar.gz" | sha512sum -c - \ -&& tar -C /usr/share/dotnet -oxzf dotnet.tar.gz \ -&& rm dotnet.tar.gz +## .Net 8.0 +# see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install +RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ +&& chmod a+x dotnet-install.sh \ +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index 58f2d2a174..d34f4c4725 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -25,7 +25,7 @@ CMD ["/usr/bin/bash"] RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index d04bd506ac..3e404fa439 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -28,7 +28,7 @@ CMD ["/usr/bin/bash"] RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ && ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ -&& ./dotnet-install.sh -c 6.0 -i /usr/local/bin +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd #RUN objdump -p /lib64/libstdc++.so.6 #RUN g++ --version From c22621bd52256f242f010d8be71ed4a9dc101d00 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 2 Jun 2025 17:53:48 +0200 Subject: [PATCH 056/509] dotnet: Remove EOL .Net Core 3.1 in release artifacts Please target .Net 8.0 or above... ref: https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core --- cmake/docker/almalinux/dotnet.Dockerfile | 1 - cmake/docker/rockylinux/dotnet.Dockerfile | 1 - tools/docker/Makefile | 2 +- tools/docker/images/almalinux-9.Dockerfile | 2 -- tools/docker/images/alpine-edge.Dockerfile | 1 - tools/docker/images/archlinux.Dockerfile | 1 - tools/docker/images/debian-11.Dockerfile | 2 -- tools/docker/images/debian-12.Dockerfile | 2 -- tools/docker/images/debian-13.Dockerfile | 2 -- tools/docker/images/debian-sid.Dockerfile | 5 ++--- tools/docker/images/fedora-40.Dockerfile | 1 - tools/docker/images/fedora-41.Dockerfile | 1 - tools/docker/images/fedora-42.Dockerfile | 1 - tools/docker/images/opensuse-leap.Dockerfile | 2 -- tools/docker/images/rockylinux-9.Dockerfile | 2 -- tools/docker/images/ubuntu-20.04.Dockerfile | 3 +-- tools/docker/images/ubuntu-22.04.Dockerfile | 1 - tools/docker/images/ubuntu-24.04.Dockerfile | 1 - tools/docker/images/ubuntu-24.10.Dockerfile | 1 - tools/docker/test/debian-11/dotnet.Dockerfile | 2 +- tools/docker/test/debian-12/dotnet.Dockerfile | 2 +- tools/docker/test/debian-13/dotnet.Dockerfile | 2 +- tools/docker/test/debian-sid/dotnet.Dockerfile | 6 +++--- tools/docker/test/opensuse-leap/dotnet.Dockerfile | 1 - tools/docker/test/ubuntu-20.04/dotnet.Dockerfile | 2 +- tools/release/amd64.Dockerfile | 1 - tools/release/arm64.Dockerfile | 1 - 27 files changed, 11 insertions(+), 38 deletions(-) diff --git a/cmake/docker/almalinux/dotnet.Dockerfile b/cmake/docker/almalinux/dotnet.Dockerfile index 568088c64f..0fc850366d 100644 --- a/cmake/docker/almalinux/dotnet.Dockerfile +++ b/cmake/docker/almalinux/dotnet.Dockerfile @@ -4,7 +4,6 @@ FROM ortools/cmake:almalinux_swig AS env # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/cmake/docker/rockylinux/dotnet.Dockerfile b/cmake/docker/rockylinux/dotnet.Dockerfile index 7e4fbb4995..f0c2db853e 100644 --- a/cmake/docker/rockylinux/dotnet.Dockerfile +++ b/cmake/docker/rockylinux/dotnet.Dockerfile @@ -4,7 +4,6 @@ FROM ortools/cmake:rockylinux_swig AS env # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/Makefile b/tools/docker/Makefile index ccb9bf8703..ca398cbc73 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -116,7 +116,7 @@ help: @echo @echo -e "\t${BOLD}${RESET}: Language to build" @echo -e "\t\t${BOLD}cpp${RESET} C++" - @echo -e "\t\t${BOLD}dotnet${RESET} .Net Core 3.1 and/or .Net 8.0 wrappers" + @echo -e "\t\t${BOLD}dotnet${RESET} .Net 8.0 wrappers" @echo -e "\t\t${BOLD}java${RESET} Java (JDK 8.0) wrappers" @echo -e "\t\t${BOLD}python${RESET} Python 3.9+ wrappers" @echo diff --git a/tools/docker/images/almalinux-9.Dockerfile b/tools/docker/images/almalinux-9.Dockerfile index 55bc67a1ca..4a6102159c 100644 --- a/tools/docker/images/almalinux-9.Dockerfile +++ b/tools/docker/images/almalinux-9.Dockerfile @@ -32,7 +32,6 @@ RUN dnf -y update \ # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -90,7 +89,6 @@ RUN make archive_cpp ## build FROM cpp_build AS dotnet_build RUN sed -i 's/\(\).*\(<\/SignAssembly>\)/\1false\2/' ortools/dotnet/Google.OrTools*.csproj.in -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/alpine-edge.Dockerfile b/tools/docker/images/alpine-edge.Dockerfile index 96c04328c6..3e57be822e 100644 --- a/tools/docker/images/alpine-edge.Dockerfile +++ b/tools/docker/images/alpine-edge.Dockerfile @@ -66,7 +66,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/archlinux.Dockerfile b/tools/docker/images/archlinux.Dockerfile index 76c9c64ba9..5ad355cfe5 100644 --- a/tools/docker/images/archlinux.Dockerfile +++ b/tools/docker/images/archlinux.Dockerfile @@ -67,7 +67,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=OFF RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/debian-11.Dockerfile b/tools/docker/images/debian-11.Dockerfile index 6d06748800..2c514de3f8 100644 --- a/tools/docker/images/debian-11.Dockerfile +++ b/tools/docker/images/debian-11.Dockerfile @@ -24,7 +24,6 @@ RUN ARCH=$(uname -m) \ # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -83,7 +82,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/debian-12.Dockerfile b/tools/docker/images/debian-12.Dockerfile index 4ec3143266..424fd31282 100644 --- a/tools/docker/images/debian-12.Dockerfile +++ b/tools/docker/images/debian-12.Dockerfile @@ -17,7 +17,6 @@ CMD ["/bin/bash"] # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -77,7 +76,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/debian-13.Dockerfile b/tools/docker/images/debian-13.Dockerfile index eea5a1992e..1616e3c48f 100644 --- a/tools/docker/images/debian-13.Dockerfile +++ b/tools/docker/images/debian-13.Dockerfile @@ -17,7 +17,6 @@ CMD ["/bin/bash"] # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -76,7 +75,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/debian-sid.Dockerfile b/tools/docker/images/debian-sid.Dockerfile index fc48b81fc5..1b4803b773 100644 --- a/tools/docker/images/debian-sid.Dockerfile +++ b/tools/docker/images/debian-sid.Dockerfile @@ -18,10 +18,10 @@ RUN apt update -qq \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Install .Net -# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-11- +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-13- RUN apt-get update -qq \ && apt-get install -qq gpg apt-transport-https \ -&& wget -q "https://packages.microsoft.com/config/debian/11/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& wget -q "https://packages.microsoft.com/config/debian/13/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ @@ -88,7 +88,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/fedora-40.Dockerfile b/tools/docker/images/fedora-40.Dockerfile index 1a5a8dd8f2..0fd6b0c905 100644 --- a/tools/docker/images/fedora-40.Dockerfile +++ b/tools/docker/images/fedora-40.Dockerfile @@ -74,7 +74,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/fedora-41.Dockerfile b/tools/docker/images/fedora-41.Dockerfile index 3319aa0d36..4f5efeaef3 100644 --- a/tools/docker/images/fedora-41.Dockerfile +++ b/tools/docker/images/fedora-41.Dockerfile @@ -76,7 +76,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/fedora-42.Dockerfile b/tools/docker/images/fedora-42.Dockerfile index 33a8524552..78472a4412 100644 --- a/tools/docker/images/fedora-42.Dockerfile +++ b/tools/docker/images/fedora-42.Dockerfile @@ -76,7 +76,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/opensuse-leap.Dockerfile b/tools/docker/images/opensuse-leap.Dockerfile index b3efec0857..300efe9555 100644 --- a/tools/docker/images/opensuse-leap.Dockerfile +++ b/tools/docker/images/opensuse-leap.Dockerfile @@ -25,7 +25,6 @@ RUN zypper refresh \ # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -75,7 +74,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/rockylinux-9.Dockerfile b/tools/docker/images/rockylinux-9.Dockerfile index c70ec35b84..c885bb3d87 100644 --- a/tools/docker/images/rockylinux-9.Dockerfile +++ b/tools/docker/images/rockylinux-9.Dockerfile @@ -32,7 +32,6 @@ RUN dnf -y update \ # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -90,7 +89,6 @@ RUN make archive_cpp ## build FROM cpp_build AS dotnet_build RUN sed -i 's/\(\).*\(<\/SignAssembly>\)/\1false\2/' ortools/dotnet/Google.OrTools*.csproj.in -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/ubuntu-20.04.Dockerfile b/tools/docker/images/ubuntu-20.04.Dockerfile index 6a0e6cf91d..01564cf3ff 100644 --- a/tools/docker/images/ubuntu-20.04.Dockerfile +++ b/tools/docker/images/ubuntu-20.04.Dockerfile @@ -33,7 +33,7 @@ RUN apt-get update -qq \ && wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& apt-get install -yq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd @@ -93,7 +93,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/ubuntu-22.04.Dockerfile b/tools/docker/images/ubuntu-22.04.Dockerfile index 92f0c60a29..2b6a884c10 100644 --- a/tools/docker/images/ubuntu-22.04.Dockerfile +++ b/tools/docker/images/ubuntu-22.04.Dockerfile @@ -84,7 +84,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=OFF RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/ubuntu-24.04.Dockerfile b/tools/docker/images/ubuntu-24.04.Dockerfile index 471aa49052..9cc69b1380 100644 --- a/tools/docker/images/ubuntu-24.04.Dockerfile +++ b/tools/docker/images/ubuntu-24.04.Dockerfile @@ -85,7 +85,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=OFF RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/images/ubuntu-24.10.Dockerfile b/tools/docker/images/ubuntu-24.10.Dockerfile index 2ae962210e..290a38decb 100644 --- a/tools/docker/images/ubuntu-24.10.Dockerfile +++ b/tools/docker/images/ubuntu-24.10.Dockerfile @@ -85,7 +85,6 @@ RUN make archive_cpp # .Net ## build FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=OFF RUN make detect_dotnet \ && make dotnet JOBS=8 ## archive diff --git a/tools/docker/test/debian-11/dotnet.Dockerfile b/tools/docker/test/debian-11/dotnet.Dockerfile index 0c1344ab6b..6220739273 100644 --- a/tools/docker/test/debian-11/dotnet.Dockerfile +++ b/tools/docker/test/debian-11/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& apt-get install -qq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-12/dotnet.Dockerfile b/tools/docker/test/debian-12/dotnet.Dockerfile index 6d3c5543d4..cfae176874 100644 --- a/tools/docker/test/debian-12/dotnet.Dockerfile +++ b/tools/docker/test/debian-12/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& apt-get install -qq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-13/dotnet.Dockerfile b/tools/docker/test/debian-13/dotnet.Dockerfile index 571d312853..cf0334c796 100644 --- a/tools/docker/test/debian-13/dotnet.Dockerfile +++ b/tools/docker/test/debian-13/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& apt-get install -qq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/debian-sid/dotnet.Dockerfile b/tools/docker/test/debian-sid/dotnet.Dockerfile index 506c4048ad..5ebd330602 100644 --- a/tools/docker/test/debian-sid/dotnet.Dockerfile +++ b/tools/docker/test/debian-sid/dotnet.Dockerfile @@ -7,14 +7,14 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Install .Net -# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-11- +# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-13- RUN apt-get update -qq \ && apt-get install -qq gpg apt-transport-https \ -&& wget -q "https://packages.microsoft.com/config/debian/11/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ +&& wget -q "https://packages.microsoft.com/config/debian/13/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && rm packages-microsoft-prod.deb \ && apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& apt-get install -qq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/docker/test/opensuse-leap/dotnet.Dockerfile b/tools/docker/test/opensuse-leap/dotnet.Dockerfile index 8153d9b9e6..89b82a53dc 100644 --- a/tools/docker/test/opensuse-leap/dotnet.Dockerfile +++ b/tools/docker/test/opensuse-leap/dotnet.Dockerfile @@ -18,7 +18,6 @@ RUN zypper refresh \ # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile b/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile index fa5faca501..eed4448aea 100644 --- a/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile +++ b/tools/docker/test/ubuntu-20.04/dotnet.Dockerfile @@ -14,7 +14,7 @@ RUN apt-get update -qq \ && wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb \ && dpkg -i packages-microsoft-prod.deb \ && apt-get update -qq \ -&& DEBIAN_FRONTEND=noninteractive apt-get install -yq dotnet-sdk-3.1 dotnet-sdk-8.0 \ +&& DEBIAN_FRONTEND=noninteractive apt-get install -yq dotnet-sdk-8.0 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # Trigger first run experience by running arbitrary cmd diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index d34f4c4725..b95ad2405e 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -24,7 +24,6 @@ CMD ["/usr/bin/bash"] # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index 3e404fa439..b19b71c8fc 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -27,7 +27,6 @@ CMD ["/usr/bin/bash"] # see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ && chmod a+x dotnet-install.sh \ -&& ./dotnet-install.sh -c 3.1 -i /usr/local/bin \ && ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd #RUN objdump -p /lib64/libstdc++.so.6 From a90d9d9e1be9100df58f9b3e929f11f81e3a9183 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 3 Jun 2025 15:57:20 +0200 Subject: [PATCH 057/509] pdlp: Fix test when using pytest 8.4.0 note: test_lp was considered as a test leading to the error ```sh (cd build && ctest -C Release -R "python_pdlp_") Test project .../build Start 175: python_pdlp_pdlp_test 1/2 Test #175: python_pdlp_pdlp_test .............***Failed 1.94 sec ============================= test session starts ============================== platform linux -- Python 3.13.2, pytest-8.4.0, pluggy-1.6.0 ... ../../../ortools/pdlp/python/pdlp_test.py .....F.... [100%] =================================== FAILURES =================================== ___________________________________ test_lp ____________________________________ Expected None, but test returned . Did you mean to use `assert` instead of `return`? =========================== short test summary info ============================ FAILED ../../../ortools/pdlp/python/pdlp_test.py::test_lp - Failed: Expected ... ========================= 1 failed, 9 passed in 1.40s ========================== ``` --- ortools/pdlp/python/pdlp_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/pdlp/python/pdlp_test.py b/ortools/pdlp/python/pdlp_test.py index b812e41527..5464385a82 100644 --- a/ortools/pdlp/python/pdlp_test.py +++ b/ortools/pdlp/python/pdlp_test.py @@ -157,7 +157,7 @@ def tiny_lp(): return qp -def test_lp(): +def small_lp(): """Returns a small LP with all 4 patterns lower and upper bounds. min 5.5 x_0 - 2 x_1 - x_2 + x_3 - 14 s.t. @@ -221,7 +221,7 @@ class PrimalDualHybridGradientTest(absltest.TestCase): opt_criteria = params.termination_criteria.simple_optimality_criteria opt_criteria.eps_optimal_relative = 0.0 opt_criteria.eps_optimal_absolute = 1.0e-10 - result = pdlp.primal_dual_hybrid_gradient(test_lp(), params) + result = pdlp.primal_dual_hybrid_gradient(small_lp(), params) self.assertEqual( result.solve_log.termination_reason, solve_log_pb2.TERMINATION_REASON_OPTIMAL, From f500358d19ca5a17572e9b50149b45ff6a93720e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 3 Jun 2025 17:25:06 +0200 Subject: [PATCH 058/509] tools/docker: fix images --- tools/docker/Makefile | 2 +- tools/docker/images/debian-13.Dockerfile | 14 +++++----- tools/docker/images/debian-sid.Dockerfile | 27 +++++++------------ tools/docker/images/fedora-41.Dockerfile | 2 +- tools/docker/images/fedora-42.Dockerfile | 4 +-- tools/docker/images/ubuntu-22.04.Dockerfile | 13 ++++++--- tools/docker/test/fedora-41/cpp.Dockerfile | 2 +- tools/docker/test/fedora-41/dotnet.Dockerfile | 2 +- tools/docker/test/fedora-41/java.Dockerfile | 2 +- tools/docker/test/fedora-41/python.Dockerfile | 2 +- tools/docker/test/fedora-42/cpp.Dockerfile | 2 +- tools/docker/test/fedora-42/dotnet.Dockerfile | 2 +- tools/docker/test/fedora-42/java.Dockerfile | 4 +-- tools/docker/test/fedora-42/python.Dockerfile | 2 +- 14 files changed, 40 insertions(+), 40 deletions(-) diff --git a/tools/docker/Makefile b/tools/docker/Makefile index ca398cbc73..70c88c92db 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -93,7 +93,7 @@ help: @echo -e "\t\t${BOLD}alpine-edge${RESET} (latest)" @echo -e "\t\t${BOLD}archlinux${RESET} (latest)" @echo -e "\t\t${BOLD}debian-sid${RESET} (unstable)" -# @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" + #@echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" @echo -e "\t\t${BOLD}debian-12${RESET} (Bookworm)" @echo -e "\t\t${BOLD}debian-11${RESET} (Bullseye)" @echo -e "\t\t${BOLD}fedora-42${RESET}" diff --git a/tools/docker/images/debian-13.Dockerfile b/tools/docker/images/debian-13.Dockerfile index 1616e3c48f..5bb761afed 100644 --- a/tools/docker/images/debian-13.Dockerfile +++ b/tools/docker/images/debian-13.Dockerfile @@ -4,10 +4,10 @@ FROM debian:13 AS env ############# ## SETUP ## ############# -RUN apt-get update -qq \ -&& apt-get install -qq \ - git pkg-config wget cmake make autoconf libtool zlib1g-dev gawk g++ curl subversion \ - swig lsb-release \ +RUN apt update -qq \ +&& apt install -yq \ + git pkg-config wget cmake build-essential zlib1g-dev \ + swig lsb-release libicu-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ENTRYPOINT ["/bin/bash", "-c"] @@ -30,10 +30,12 @@ ENV JAVA_HOME=/usr/lib/jvm/default-java # Install Python RUN apt-get update -qq \ -&& apt-get install -qq python3 python3-dev python3-pip python3-venv \ +&& apt-get install -qq python3 python3-dev python3-pip \ + python3-venv python3-virtualenv \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -RUN python3 -m pip install absl-py mypy mypy-protobuf +RUN python3 -m pip install --break-system-package \ + absl-py mypy mypy-protobuf ENV TZ=America/Los_Angeles RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone diff --git a/tools/docker/images/debian-sid.Dockerfile b/tools/docker/images/debian-sid.Dockerfile index 1b4803b773..27fc2bb075 100644 --- a/tools/docker/images/debian-sid.Dockerfile +++ b/tools/docker/images/debian-sid.Dockerfile @@ -5,29 +5,19 @@ FROM debian:sid AS env ## SETUP ## ############# RUN apt update -qq \ -&& apt install -yq git wget build-essential cmake lsb-release zlib1g-dev \ +&& apt install -yq \ + git pkg-config wget cmake build-essential zlib1g-dev \ + swig libicu-dev \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ENTRYPOINT ["/bin/bash", "-c"] CMD ["/bin/bash"] -# Install SWIG -RUN apt update -qq \ -&& apt install -yq swig \ -&& apt-get clean \ -&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - # Install .Net -# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-debian#debian-13- -RUN apt-get update -qq \ -&& apt-get install -qq gpg apt-transport-https \ -&& wget -q "https://packages.microsoft.com/config/debian/13/packages-microsoft-prod.deb" -O packages-microsoft-prod.deb \ -&& dpkg -i packages-microsoft-prod.deb \ -&& rm packages-microsoft-prod.deb \ -&& apt-get update -qq \ -&& apt-get install -qq dotnet-sdk-8.0 \ -&& apt-get clean \ -&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# see: https://learn.microsoft.com/en-us/dotnet/core/install/linux-scripted-manual#scripted-install +RUN wget -q "https://dot.net/v1/dotnet-install.sh" \ +&& chmod a+x dotnet-install.sh \ +&& ./dotnet-install.sh -c 8.0 -i /usr/local/bin # Trigger first run experience by running arbitrary cmd RUN dotnet --info @@ -44,7 +34,8 @@ RUN apt-get update -qq \ python3-venv python3-virtualenv \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -RUN python3 -m pip install --break-system-package absl-py mypy mypy-protobuf +RUN python3 -m pip install --break-system-package \ + absl-py mypy mypy-protobuf ENV TZ=America/Los_Angeles RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone diff --git a/tools/docker/images/fedora-41.Dockerfile b/tools/docker/images/fedora-41.Dockerfile index 4f5efeaef3..9db9337a66 100644 --- a/tools/docker/images/fedora-41.Dockerfile +++ b/tools/docker/images/fedora-41.Dockerfile @@ -6,7 +6,7 @@ FROM fedora:41 AS env ############# RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/images/fedora-42.Dockerfile b/tools/docker/images/fedora-42.Dockerfile index 78472a4412..485022bca2 100644 --- a/tools/docker/images/fedora-42.Dockerfile +++ b/tools/docker/images/fedora-42.Dockerfile @@ -6,7 +6,7 @@ FROM fedora:42 AS env ############# RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all @@ -30,7 +30,7 @@ RUN dotnet --info # Install Java RUN dnf -y update \ -&& dnf -y install java-11-openjdk java-11-openjdk-devel maven \ +&& dnf -y install java-21-openjdk java-21-openjdk-devel maven \ && dnf clean all ENV JAVA_HOME=/usr/lib/jvm/java-openjdk diff --git a/tools/docker/images/ubuntu-22.04.Dockerfile b/tools/docker/images/ubuntu-22.04.Dockerfile index 2b6a884c10..1b32512b26 100644 --- a/tools/docker/images/ubuntu-22.04.Dockerfile +++ b/tools/docker/images/ubuntu-22.04.Dockerfile @@ -8,11 +8,18 @@ FROM ubuntu:22.04 AS env ############# ENV DEBIAN_FRONTEND=noninteractive RUN apt update -qq \ -&& apt install -yq git wget build-essential cmake lsb-release zlib1g-dev \ +&& apt install -yq git wget build-essential lsb-release zlib1g-dev \ && apt clean \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -ENTRYPOINT ["/usr/bin/bash", "-c"] -CMD ["/usr/bin/bash"] +ENTRYPOINT ["/bin/bash", "-c"] +CMD ["/bin/bash"] + +# Install CMake 3.31.0 +RUN ARCH=$(uname -m) \ +&& wget -q "https://cmake.org/files/v3.31/cmake-3.31.0-linux-${ARCH}.sh" \ +&& chmod a+x cmake-3.31.0-linux-${ARCH}.sh \ +&& ./cmake-3.31.0-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ +&& rm cmake-3.31.0-linux-${ARCH}.sh # Install SWIG RUN apt-get update -qq \ diff --git a/tools/docker/test/fedora-41/cpp.Dockerfile b/tools/docker/test/fedora-41/cpp.Dockerfile index 631db505b2..f1de2d071c 100644 --- a/tools/docker/test/fedora-41/cpp.Dockerfile +++ b/tools/docker/test/fedora-41/cpp.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:41 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-41/dotnet.Dockerfile b/tools/docker/test/fedora-41/dotnet.Dockerfile index 293a7194f2..5fe20ec661 100644 --- a/tools/docker/test/fedora-41/dotnet.Dockerfile +++ b/tools/docker/test/fedora-41/dotnet.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:41 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-41/java.Dockerfile b/tools/docker/test/fedora-41/java.Dockerfile index 24d33e64c1..9d53bd8730 100644 --- a/tools/docker/test/fedora-41/java.Dockerfile +++ b/tools/docker/test/fedora-41/java.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:41 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-41/python.Dockerfile b/tools/docker/test/fedora-41/python.Dockerfile index c9cc24ca1f..45b07aa0e3 100644 --- a/tools/docker/test/fedora-41/python.Dockerfile +++ b/tools/docker/test/fedora-41/python.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:41 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-42/cpp.Dockerfile b/tools/docker/test/fedora-42/cpp.Dockerfile index 80356489bd..b6511cb9fc 100644 --- a/tools/docker/test/fedora-42/cpp.Dockerfile +++ b/tools/docker/test/fedora-42/cpp.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:42 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-42/dotnet.Dockerfile b/tools/docker/test/fedora-42/dotnet.Dockerfile index 118af294a8..b1b201202d 100644 --- a/tools/docker/test/fedora-42/dotnet.Dockerfile +++ b/tools/docker/test/fedora-42/dotnet.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:42 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all diff --git a/tools/docker/test/fedora-42/java.Dockerfile b/tools/docker/test/fedora-42/java.Dockerfile index 95666161de..11b5281629 100644 --- a/tools/docker/test/fedora-42/java.Dockerfile +++ b/tools/docker/test/fedora-42/java.Dockerfile @@ -3,14 +3,14 @@ FROM fedora:42 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all # Java Install RUN dnf -y update \ -&& dnf -y install java-11-openjdk java-11-openjdk-devel maven \ +&& dnf -y install java-21-openjdk java-21-openjdk-devel maven \ && dnf clean all ENV JAVA_HOME=/usr/lib/jvm/java-openjdk diff --git a/tools/docker/test/fedora-42/python.Dockerfile b/tools/docker/test/fedora-42/python.Dockerfile index a5d0bf0ba7..5a9748e320 100644 --- a/tools/docker/test/fedora-42/python.Dockerfile +++ b/tools/docker/test/fedora-42/python.Dockerfile @@ -3,7 +3,7 @@ FROM fedora:42 RUN dnf -y update \ && dnf -y install git \ - wget which redhat-lsb-core pkgconfig autoconf libtool zlib-devel \ + wget which lsb_release pkgconfig autoconf libtool zlib-devel \ && dnf -y install @development-tools \ && dnf -y install gcc-c++ cmake \ && dnf clean all From 285be225193b0221e3bfd03182246de6018b26a9 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 3 Jun 2025 17:10:59 +0200 Subject: [PATCH 059/509] linear_solver: reapply xpress_interface fix (#4382) * Also add few fix from rte-france/or-tools fork (`main` branch) --- ortools/linear_solver/xpress_interface.cc | 43 ++++++++++------------- 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 0fa2b23465..633581e338 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -21,8 +21,8 @@ #include #include #include -#include +#include "absl/strings/numbers.h" #include "absl/strings/str_format.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" @@ -2093,29 +2093,21 @@ void splitMyString(const std::string& str, Container& cont, char delim = ' ') { } } -const char* stringToCharPtr(std::string& var) { return var.c_str(); } +bool stringToCharPtr(const std::string& var, const char** out) { + *out = var.c_str(); + return true; +} -// Save the existing locale, use the "C" locale to ensure that -// string -> double conversion is done ignoring the locale. -struct ScopedLocale { - ScopedLocale() { - oldLocale = std::setlocale(LC_NUMERIC, nullptr); - auto newLocale = std::setlocale(LC_NUMERIC, "C"); - CHECK_EQ(std::string(newLocale), "C"); - } - ~ScopedLocale() { std::setlocale(LC_NUMERIC, oldLocale); } - - private: - const char* oldLocale; -}; - -#define setParamIfPossible_MACRO(target_map, setter, converter) \ +#define setParamIfPossible_MACRO(target_map, setter, converter, type) \ { \ auto matchingParamIter = (target_map).find(paramAndValuePair.first); \ if (matchingParamIter != (target_map).end()) { \ - const auto convertedValue = converter(paramAndValuePair.second); \ - VLOG(1) << "Setting parameter " << paramAndValuePair.first \ - << " to value " << convertedValue << std::endl; \ + type convertedValue; \ + bool ret = converter(paramAndValuePair.second, &convertedValue); \ + if (ret) { \ + VLOG(1) << "Setting parameter " << paramAndValuePair.first \ + << " to value " << convertedValue << std::endl; \ + } \ setter(mLp, matchingParamIter->second, convertedValue); \ continue; \ } \ @@ -2140,14 +2132,15 @@ bool XpressInterface::SetSolverSpecificParametersAsString( } } - ScopedLocale locale; for (auto& paramAndValuePair : paramAndValuePairList) { - setParamIfPossible_MACRO(mapIntegerControls_, XPRSsetintcontrol, std::stoi); - setParamIfPossible_MACRO(mapDoubleControls_, XPRSsetdblcontrol, std::stod); + setParamIfPossible_MACRO(mapIntegerControls_, XPRSsetintcontrol, + absl::SimpleAtoi, int); + setParamIfPossible_MACRO(mapDoubleControls_, XPRSsetdblcontrol, + absl::SimpleAtod, double); setParamIfPossible_MACRO(mapStringControls_, XPRSsetstrcontrol, - stringToCharPtr); + stringToCharPtr, const char*); setParamIfPossible_MACRO(mapInteger64Controls_, XPRSsetintcontrol64, - std::stoll); + absl::SimpleAtoi, int64_t); LOG(ERROR) << "Unknown parameter " << paramName << " : function " << __FUNCTION__ << std::endl; return false; From 26b2071f30895f2c5eb97f5115af2bc3819d3bfb Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 4 Jun 2025 09:36:33 +0200 Subject: [PATCH 060/509] cmake: format fix --- cmake/dotnet.cmake | 22 +++++++++++++--------- cmake/java.cmake | 30 ++++++++++++++++++------------ cmake/python.cmake | 4 ++-- 3 files changed, 33 insertions(+), 23 deletions(-) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 82c42cc56e..c6fca998bf 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -241,7 +241,9 @@ function(add_dotnet_test) add_custom_command( OUTPUT ${DOTNET_TEST_DIR}/${TEST_NAME}.cs COMMAND ${CMAKE_COMMAND} -E make_directory ${DOTNET_TEST_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${TEST_FILE_NAME} ${DOTNET_TEST_DIR}/ + COMMAND ${CMAKE_COMMAND} -E copy + ${TEST_FILE_NAME} + ${DOTNET_TEST_DIR}/ MAIN_DEPENDENCY ${TEST_FILE_NAME} VERBATIM WORKING_DIRECTORY ${DOTNET_TEST_DIR}) @@ -252,19 +254,19 @@ function(add_dotnet_test) ${DOTNET_EXECUTABLE} build --nologo -c Release ${TEST_NAME}.csproj COMMAND ${CMAKE_COMMAND} -E touch ${DOTNET_TEST_DIR}/timestamp DEPENDS - ${DOTNET_TEST_DIR}/${TEST_NAME}.csproj - ${DOTNET_TEST_DIR}/${TEST_NAME}.cs - dotnet_package + ${DOTNET_TEST_DIR}/${TEST_NAME}.csproj + ${DOTNET_TEST_DIR}/${TEST_NAME}.cs + dotnet_package BYPRODUCTS - ${DOTNET_TEST_DIR}/bin - ${DOTNET_TEST_DIR}/obj + ${DOTNET_TEST_DIR}/bin + ${DOTNET_TEST_DIR}/obj VERBATIM COMMENT "Compiling .Net ${COMPONENT_NAME}/${TEST_NAME}.cs (${DOTNET_TEST_DIR}/timestamp)" WORKING_DIRECTORY ${DOTNET_TEST_DIR}) add_custom_target(dotnet_${COMPONENT_NAME}_${TEST_NAME} ALL DEPENDS - ${DOTNET_TEST_DIR}/timestamp + ${DOTNET_TEST_DIR}/timestamp WORKING_DIRECTORY ${DOTNET_TEST_DIR}) if(BUILD_TESTING) @@ -699,8 +701,10 @@ if(NOT EXAMPLE_FILE_NAME) add_custom_command( OUTPUT ${DOTNET_EXAMPLE_DIR}/${EXAMPLE_NAME}.cs COMMAND ${CMAKE_COMMAND} -E make_directory ${DOTNET_EXAMPLE_DIR} - COMMAND ${CMAKE_COMMAND} -E copy ${EXAMPLE_FILE_NAME} ${DOTNET_EXAMPLE_DIR}/ - MAIN_DEPENDENCY ${EXAMPLE_FILE_NAME} + COMMAND ${CMAKE_COMMAND} -E copy + ${EXAMPLE_FILE_NAME} + ${DOTNET_EXAMPLE_DIR}/ + MAIN_DEPENDENCY ${EXAMPLE_FILE_NAME} VERBATIM WORKING_DIRECTORY ${DOTNET_EXAMPLE_DIR}) diff --git a/cmake/java.cmake b/cmake/java.cmake index ac4ca6bdae..c515c0c17b 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -207,11 +207,13 @@ function(add_java_test) add_custom_command( OUTPUT ${JAVA_TEST_DIR}/${JAVA_TEST_PATH}/${TEST_NAME}.java COMMAND ${CMAKE_COMMAND} -E make_directory - ${JAVA_TEST_DIR}/${JAVA_TEST_PATH} - COMMAND ${CMAKE_COMMAND} -E copy ${TEST_FILE_NAME} ${JAVA_TEST_DIR}/${JAVA_TEST_PATH}/ + ${JAVA_TEST_DIR}/${JAVA_TEST_PATH} + COMMAND ${CMAKE_COMMAND} -E copy + ${TEST_FILE_NAME} + ${JAVA_TEST_DIR}/${JAVA_TEST_PATH}/ MAIN_DEPENDENCY ${TEST_FILE_NAME} VERBATIM - ) + ) string(TOLOWER ${TEST_NAME} JAVA_TEST_PROJECT) configure_file( @@ -224,17 +226,17 @@ function(add_java_test) COMMAND ${MAVEN_EXECUTABLE} compile -B COMMAND ${CMAKE_COMMAND} -E touch ${JAVA_TEST_DIR}/timestamp DEPENDS - ${JAVA_TEST_DIR}/pom.xml - ${JAVA_TEST_DIR}/${JAVA_TEST_PATH}/${TEST_NAME}.java - java_package + ${JAVA_TEST_DIR}/pom.xml + ${JAVA_TEST_DIR}/${JAVA_TEST_PATH}/${TEST_NAME}.java + java_package BYPRODUCTS - ${JAVA_TEST_DIR}/target + ${JAVA_TEST_DIR}/target COMMENT "Compiling Java ${COMPONENT_NAME}/${TEST_NAME}.java (${JAVA_TEST_DIR}/timestamp)" WORKING_DIRECTORY ${JAVA_TEST_DIR}) add_custom_target(java_${COMPONENT_NAME}_${TEST_NAME} ALL DEPENDS - ${JAVA_TEST_DIR}/timestamp + ${JAVA_TEST_DIR}/timestamp WORKING_DIRECTORY ${JAVA_TEST_DIR}) if(BUILD_TESTING) @@ -657,8 +659,10 @@ function(add_java_sample) OUTPUT ${SAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME_LOWER}/samples/${SAMPLE_NAME}.java COMMAND ${CMAKE_COMMAND} -E make_directory ${SAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME_LOWER}/samples - COMMAND ${CMAKE_COMMAND} -E copy ${SAMPLE_FILE_NAME} ${SAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME_LOWER}/samples/ - MAIN_DEPENDENCY ${SAMPLE_FILE_NAME} + COMMAND ${CMAKE_COMMAND} -E copy + ${SAMPLE_FILE_NAME} + ${SAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME_LOWER}/samples/ + MAIN_DEPENDENCY ${SAMPLE_FILE_NAME} VERBATIM ) @@ -745,8 +749,10 @@ if(NOT EXAMPLE_FILE_NAME) OUTPUT ${JAVA_EXAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME}/${EXAMPLE_NAME}.java COMMAND ${CMAKE_COMMAND} -E make_directory ${JAVA_EXAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME} - COMMAND ${CMAKE_COMMAND} -E copy ${EXAMPLE_FILE_NAME} ${JAVA_EXAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME}/ - MAIN_DEPENDENCY ${EXAMPLE_FILE_NAME} + COMMAND ${CMAKE_COMMAND} -E copy + ${EXAMPLE_FILE_NAME} + ${JAVA_EXAMPLE_DIR}/${JAVA_SRC_PATH}/${COMPONENT_NAME}/ + MAIN_DEPENDENCY ${EXAMPLE_FILE_NAME} VERBATIM ) diff --git a/cmake/python.cmake b/cmake/python.cmake index 3240539174..0a4641faa3 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -639,8 +639,8 @@ add_custom_command( ${PYTHON_PROJECT}/.libs COMMAND ${CMAKE_COMMAND} -E - $,copy,true> - $<${need_unix_scip_lib}:$> + $,copy,true> + $<${need_unix_scip_lib}:$> $<${need_windows_scip_lib}:$> ${PYTHON_PROJECT}/.libs From 1b9c659cf4b46fa1a7df8d086024638036112ad6 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 4 Jun 2025 20:41:01 +0200 Subject: [PATCH 061/509] improve xpress --- ortools/linear_solver/xpress_interface.cc | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 633581e338..0f88bc861c 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include "absl/strings/numbers.h" #include "absl/strings/str_format.h" @@ -422,6 +423,13 @@ class XpressInterface : public MPSolverInterface { private: XPRSprob mLp; bool const mMip; + + // Looping on MPConstraint::coefficients_ yields non-reproducible results + // since is uses pointer addresses as keys, the value of which is + // non-deterministic, especially their order. + absl::btree_map > + fixedOrderCoefficientsPerConstraint; + // Incremental extraction. // Without incremental extraction we have to re-extract the model every // time we perform a solve. Due to the way the Reset() function is @@ -1086,6 +1094,8 @@ void XpressInterface::SetCoefficient(MPConstraint* const constraint, double new_value, double) { InvalidateSolutionSynchronization(); + fixedOrderCoefficientsPerConstraint[constraint->index()][variable->index()] = new_value; + // Changing a single coefficient in the matrix is potentially pretty // slow since that coefficient has to be found in the sparse matrix // representation. So by default we don't perform this update immediately @@ -1118,6 +1128,8 @@ void XpressInterface::ClearConstraint(MPConstraint* const constraint) { // There is nothing to do if the constraint was not even extracted. return; + fixedOrderCoefficientsPerConstraint.erase(constraint->index()); + // Clearing a constraint means setting all coefficients in the corresponding // row to 0 (we cannot just delete the row since that would renumber all // the constraints/rows after it). @@ -1552,14 +1564,13 @@ void XpressInterface::ExtractNewConstraints() { // Setup left-hand side of constraint. rmatbeg[nextRow] = nextNz; - const auto& coeffs = ct->coefficients_; - for (auto coeff : coeffs) { - int const idx = coeff.first->index(); + const auto& coeffs = fixedOrderCoefficientsPerConstraint[ct->index()]; + for (auto [idx, coeff] : coeffs) { if (variable_is_extracted(idx)) { DCHECK_LT(nextNz, cols); DCHECK_LT(idx, cols); rmatind[nextNz] = idx; - rmatval[nextNz] = coeff.second; + rmatval[nextNz] = coeff; ++nextNz; } } From 115261b48f56729e62149c8343af696259ed61dd Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 5 Jun 2025 08:41:39 +0200 Subject: [PATCH 062/509] bazel: regenerate notebook_requirements.txt --- bazel/notebook_requirements.in | 1 - bazel/notebook_requirements.txt | 178 ++++++++++++++++---------------- 2 files changed, 88 insertions(+), 91 deletions(-) diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index 946cbc100f..d7c30c0201 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -22,7 +22,6 @@ plotly==5.15.0 # Notebook jupyterlab==4.4.3 -setuptools==78.1.1 # jupyterlab require 74.0.0 which contains vulnerabilities notebook==7.4.2 jupyter-server==2.16.0 tornado==6.5.0 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index 500b45e7ee..b8a7a5aa7a 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -6,70 +6,68 @@ # absl-py==2.2.2 # via -r bazel/notebook_requirements.in -anyio==4.0.0 +anyio==4.9.0 # via # httpx # jupyter-server -argon2-cffi==23.1.0 +argon2-cffi==25.1.0 # via jupyter-server argon2-cffi-bindings==21.2.0 # via argon2-cffi -arrow==1.2.3 +arrow==1.3.0 # via isoduration -asttokens==2.4.0 +asttokens==3.0.0 # via stack-data -async-lru==2.0.4 +async-lru==2.0.5 # via jupyterlab -attrs==23.1.0 +attrs==25.3.0 # via # jsonschema # referencing -babel==2.12.1 +babel==2.17.0 # via jupyterlab-server -backcall==0.2.0 - # via ipython -beautifulsoup4==4.12.2 +beautifulsoup4==4.13.4 # via nbconvert black==24.8.0 # via -r bazel/notebook_requirements.in -bleach==6.0.0 +bleach[css]==6.2.0 # via nbconvert -certifi==2024.7.4 +certifi==2025.4.26 # via # httpcore # httpx # requests -cffi==1.15.1 +cffi==1.17.1 # via argon2-cffi-bindings -charset-normalizer==3.2.0 +charset-normalizer==3.4.2 # via requests -click==8.1.3 +click==8.2.1 # via black -comm==0.1.4 +comm==0.2.2 # via ipykernel -debugpy==1.6.7.post1 +debugpy==1.8.14 # via ipykernel -decorator==5.1.1 +decorator==5.2.1 # via ipython defusedxml==0.7.1 # via nbconvert -distlib==0.3.7 +distlib==0.3.9 # via virtualenv -executing==1.2.0 +executing==2.2.0 # via stack-data -fastjsonschema==2.18.0 +fastjsonschema==2.21.1 # via nbformat -filelock==3.12.2 +filelock==3.18.0 # via virtualenv fqdn==1.5.1 # via jsonschema -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.5 +httpcore==1.0.9 # via httpx -httpx==0.27.2 +httpx==0.28.1 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx @@ -77,13 +75,15 @@ idna==3.7 # requests immutabledict==4.2.1 # via -r bazel/notebook_requirements.in -ipykernel==6.25.2 +ipykernel==6.29.5 # via jupyterlab -ipython==8.15.0 +ipython==9.3.0 # via ipykernel +ipython-pygments-lexers==1.1.1 + # via ipython isoduration==20.11.0 # via jsonschema -jedi==0.19.0 +jedi==0.19.2 # via ipython jinja2==3.1.6 # via @@ -92,9 +92,9 @@ jinja2==3.1.6 # jupyterlab # jupyterlab-server # nbconvert -json5==0.9.14 +json5==0.12.0 # via jupyterlab-server -jsonpointer==2.4 +jsonpointer==3.0.0 # via jsonschema jsonschema[format-nongpl]==4.23.0 # via @@ -102,14 +102,14 @@ jsonschema[format-nongpl]==4.23.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.7.1 +jsonschema-specifications==2025.4.1 # via jsonschema -jupyter-client==8.3.1 +jupyter-client==8.6.3 # via # ipykernel # jupyter-server # nbclient -jupyter-core==5.3.1 +jupyter-core==5.8.1 # via # ipykernel # jupyter-client @@ -120,7 +120,7 @@ jupyter-core==5.3.1 # nbformat jupyter-events==0.12.0 # via jupyter-server -jupyter-lsp==2.2.2 +jupyter-lsp==2.2.5 # via jupyterlab jupyter-server==2.16.0 # via @@ -130,50 +130,50 @@ jupyter-server==2.16.0 # jupyterlab-server # notebook # notebook-shim -jupyter-server-terminals==0.4.4 +jupyter-server-terminals==0.5.3 # via jupyter-server jupyterlab==4.4.3 # via # -r bazel/notebook_requirements.in # notebook -jupyterlab-pygments==0.2.2 +jupyterlab-pygments==0.3.0 # via nbconvert jupyterlab-server==2.27.3 # via # jupyterlab # notebook -markupsafe==2.1.3 +markupsafe==3.0.2 # via # jinja2 # nbconvert -matplotlib-inline==0.1.6 +matplotlib-inline==0.1.7 # via # ipykernel # ipython -mistune==3.0.1 +mistune==3.1.3 # via nbconvert mypy==1.6.1 # via -r bazel/notebook_requirements.in -mypy-extensions==1.0.0 +mypy-extensions==1.1.0 # via # black # mypy mypy-protobuf==3.5.0 # via -r bazel/notebook_requirements.in -nbclient==0.8.0 +nbclient==0.10.2 # via nbconvert -nbconvert==7.8.0 +nbconvert==7.16.6 # via jupyter-server -nbformat==5.9.2 +nbformat==5.10.4 # via # jupyter-server # nbclient # nbconvert -nest-asyncio==1.5.7 +nest-asyncio==1.6.0 # via ipykernel notebook==7.4.2 # via -r bazel/notebook_requirements.in -notebook-shim==0.2.3 +notebook-shim==0.2.4 # via # jupyterlab # notebook @@ -182,9 +182,9 @@ numpy==2.2.0 # -r bazel/notebook_requirements.in # pandas # scipy -overrides==7.4.0 +overrides==7.7.0 # via jupyter-server -packaging==23.1 +packaging==25.0 # via # black # ipykernel @@ -196,63 +196,62 @@ packaging==23.1 # plotly pandas==2.2.3 # via -r bazel/notebook_requirements.in -pandocfilters==1.5.0 +pandocfilters==1.5.1 # via nbconvert -parso==0.8.3 +parso==0.8.4 # via jedi -pathspec==0.11.1 +pathspec==0.12.1 # via black -pexpect==4.8.0 +pexpect==4.9.0 # via ipython -pickleshare==0.7.5 - # via ipython -platformdirs==3.10.0 +platformdirs==4.3.8 # via # black # jupyter-core # virtualenv plotly==5.15.0 # via -r bazel/notebook_requirements.in -prometheus-client==0.17.1 +prometheus-client==0.22.1 # via jupyter-server -prompt-toolkit==3.0.39 +prompt-toolkit==3.0.51 # via ipython protobuf==6.31.0 # via # -r bazel/notebook_requirements.in # mypy-protobuf -psutil==5.9.5 +psutil==7.0.0 # via ipykernel ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pycparser==2.21 +pycparser==2.22 # via cffi pygments==2.19.1 # via # -r bazel/notebook_requirements.in # ipython + # ipython-pygments-lexers # nbconvert -python-dateutil==2.8.2 +python-dateutil==2.9.0.post0 # via # arrow # jupyter-client # pandas -python-json-logger==2.0.7 +python-json-logger==3.3.0 # via jupyter-events -pytz==2022.7.1 +pytz==2025.2 # via pandas -pyyaml==6.0.1 +pyyaml==6.0.2 # via jupyter-events -pyzmq==25.1.1 +pyzmq==26.4.0 # via # ipykernel # jupyter-client # jupyter-server -referencing==0.30.2 +referencing==0.36.2 # via # jsonschema # jsonschema-specifications @@ -269,38 +268,34 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rpds-py==0.10.2 +rpds-py==0.25.1 # via # jsonschema # referencing scipy==1.14.1 # via -r bazel/notebook_requirements.in -send2trash==1.8.2 +send2trash==1.8.3 # via jupyter-server -six==1.16.0 +six==1.17.0 # via - # asttokens - # bleach # python-dateutil # rfc3339-validator -sniffio==1.3.0 - # via - # anyio - # httpx -soupsieve==2.5 +sniffio==1.3.1 + # via anyio +soupsieve==2.7 # via beautifulsoup4 -stack-data==0.6.2 +stack-data==0.6.3 # via ipython svgwrite==1.4.3 # via -r bazel/notebook_requirements.in -tenacity==8.2.1 +tenacity==9.1.2 # via plotly -terminado==0.17.1 +terminado==0.18.1 # via # jupyter-server # jupyter-server-terminals -tinycss2==1.2.1 - # via nbconvert +tinycss2==1.4.0 + # via bleach tornado==6.5 # via # -r bazel/notebook_requirements.in @@ -310,7 +305,7 @@ tornado==6.5 # jupyterlab # notebook # terminado -traitlets==5.9.0 +traitlets==5.14.3 # via # comm # ipykernel @@ -324,21 +319,26 @@ traitlets==5.9.0 # nbclient # nbconvert # nbformat -types-protobuf==4.24.0.0 +types-protobuf==6.30.2.20250516 # via mypy-protobuf +types-python-dateutil==2.9.0.20250516 + # via arrow typing-extensions==4.13.1 # via # -r bazel/notebook_requirements.in + # anyio + # beautifulsoup4 # mypy -tzdata==2023.3 + # referencing +tzdata==2025.2 # via pandas uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.4.0 # via requests virtualenv==20.28.1 # via -r bazel/notebook_requirements.in -wcwidth==0.2.6 +wcwidth==0.2.13 # via prompt-toolkit webcolors==24.11.1 # via jsonschema @@ -350,7 +350,5 @@ websocket-client==1.8.0 # via jupyter-server # The following packages are considered to be unsafe in a requirements file: -setuptools==78.1.1 - # via - # -r bazel/notebook_requirements.in - # jupyterlab +setuptools==80.9.0 + # via jupyterlab From b448993cae83b13a93d2ffc7ac7075b085673418 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 5 Jun 2025 11:33:19 +0200 Subject: [PATCH 063/509] fix --- ortools/sat/scheduling_cuts.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 9842654ca0..e51929b16d 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1456,7 +1456,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( helper, min_sum_of_ends, min_sum_of_weighted_ends, cut_use_precedences, exploration_limit); if (status == CompletionTimeExplorationStatus::NO_VALID_PERMUTATION) { - return false; + break; } else if (status == CompletionTimeExplorationStatus::ABORTED) { break; } From c9a3da1a2bd9ac89658661b9d6d7acb53f50bf45 Mon Sep 17 00:00:00 2001 From: galabovaa Date: Fri, 6 Jun 2025 15:04:13 +0300 Subject: [PATCH 064/509] cmake: HiGHS v1.11.0 (#4670) * no longer needs patch ref: https://github.com/ERGO-Code/HiGHS/releases/tag/v1.11.0 --- cmake/dependencies/CMakeLists.txt | 4 +- patches/highs-v1.10.0.patch | 169 ------------------------------ 2 files changed, 1 insertion(+), 172 deletions(-) delete mode 100644 patches/highs-v1.10.0.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index bdd1d4362c..2b461eacd3 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -285,10 +285,8 @@ if(BUILD_HIGHS) FetchContent_Declare( highs GIT_REPOSITORY "https://github.com/ERGO-Code/HiGHS.git" - GIT_TAG "v1.10.0" + GIT_TAG "v1.11.0" GIT_SHALLOW TRUE - PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/highs-v1.10.0.patch" ) FetchContent_MakeAvailable(highs) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/patches/highs-v1.10.0.patch b/patches/highs-v1.10.0.patch deleted file mode 100644 index e7f58d1962..0000000000 --- a/patches/highs-v1.10.0.patch +++ /dev/null @@ -1,169 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index ffaf5290..bf7d1f56 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -194,11 +194,11 @@ if (BUILD_CXX) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - # for multi-config build system (e.g. xcode) -- foreach(OUTPUTCONFIG IN LISTS CMAKE_CONFIGURATION_TYPES) -- string(TOUPPER ${OUTPUTCONFIG} OUTPUTCONFIG) -- set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_LIBDIR}) -- set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_LIBDIR}) -- set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -+ foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) -+ string(TOUPPER ${OutputConfig} OUTPUTCONFIG) -+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) -+ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) -+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) - endforeach() - else() - option(BUILD_SHARED_LIBS "Build shared libraries (.dll)." OFF) -@@ -206,14 +206,11 @@ if (BUILD_CXX) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - # for multi-config builds (e.g. msvc) -- foreach(OUTPUTCONFIG IN LISTS CMAKE_CONFIGURATION_TYPES) -- string(TOLOWER ${OUTPUTCONFIG} OUTPUTCONFIG) -- set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- # set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -- # set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -- # set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -+ foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) -+ string(TOUPPER ${OutputConfig} OUTPUTCONFIG) -+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) -+ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) -+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) - endforeach() - endif() - -diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index e390ac4b..0e2c470c 100644 ---- a/src/CMakeLists.txt -+++ b/src/CMakeLists.txt -@@ -1,7 +1,7 @@ - if (NOT BUILD_CXX) - return() - endif() -- -+ - # Define library. - include(sources) - set(sources ${highs_sources} ${cupdlp_sources} ${ipx_sources} ${basiclu_sources}) -@@ -84,7 +84,7 @@ if(NOT FAST_BUILD) - # target_compile_options(libipx PRIVATE "-Wno-sign-compare") - # target_compile_options(libipx PRIVATE "-Wno-logical-op-parentheses") - endif() -- -+ - install(TARGETS libhighs EXPORT highs-targets - LIBRARY - ARCHIVE -@@ -154,8 +154,6 @@ else() - # $ - ) - -- target_include_directories(highs PUBLIC "${CMAKE_CUDA_PATH}/include") -- - # target_include_directories(highs PRIVATE - # $ - # $ -@@ -180,8 +178,8 @@ else() - # $) - - target_sources(highs PRIVATE ${sources} ${headers} ${win_version_file}) -- -- # Optional Cuda -+ -+ # Optional Cuda - if (CUPDLP_GPU) - # enable_language(CXX CUDA) - # target_sources(highs PRIVATE ${cuda_sources}) -@@ -189,9 +187,11 @@ else() - # set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) - - # target_link_libraries(highs ${CUDA_LIBRARY} m) -- -+ - # target_include_directories(highs PUBLIC "/usr/local/include") - -+ target_include_directories(highs PUBLIC -+ $) - set(CUPDLP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/src/pdlp/cupdlp/") - - add_subdirectory(pdlp/cupdlp/cuda) -@@ -201,7 +201,7 @@ else() - else() - target_link_libraries(highs cudalin ${CUDA_LIBRARY} m) - endif() -- -+ - set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) - - endif() -@@ -257,13 +257,13 @@ else() - $ - ) - target_link_libraries(highs ZLIB::ZLIB) -- set(CONF_DEPS -+ set(CONF_DEPS - "include(CMakeFindDependencyMacro)\nfind_dependency(Threads)\nfind_dependency(ZLIB)") - set(CONF_DEPENDENCIES ${CONF_DEPS}) -- else() -+ else() - set(CONF_DEPENDENCIES "include(CMakeFindDependencyMacro)\nfind_dependency(Threads)") - endif() -- -+ - - # # on UNIX system the 'lib' prefix is automatically added - # set_target_properties(highs PROPERTIES -@@ -274,7 +274,7 @@ else() - # set_target_properties(highs PROPERTIES - # LIBRARY_OUTPUT_DIRECTORY "${HIGHS_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}") - # endif() -- -+ - # set_target_properties(highs PROPERTIES PUBLIC_HEADER "src/Highs.h;src/lp_data/HighsLp.h;src/lp_data/HighsLpSolverObject.h") - - # install the header files of highs -@@ -291,7 +291,7 @@ else() - - # target_compile_options(highs PRIVATE "-Wall") - # target_compile_options(highs PRIVATE "-Wunused") -- -+ - if (UNIX) - target_compile_options(highs PRIVATE "-Wno-unused-variable") - target_compile_options(highs PRIVATE "-Wno-unused-const-variable") -@@ -324,7 +324,7 @@ else() - - - if (BUILD_DOTNET) -- -+ - # see: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog - if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") - set(DOTNET_PLATFORM arm64) -@@ -355,8 +355,8 @@ else() - set(TARGET_FILE_NAME "highs.dll") - endif() - -- add_custom_command(TARGET highs POST_BUILD -- COMMAND "${CMAKE_COMMAND}" -E copy -+ add_custom_command(TARGET highs POST_BUILD -+ COMMAND "${CMAKE_COMMAND}" -E copy - "$" - ${DOTNET_PROJECT_DIR}/runtimes/${DOTNET_RID}/native/${TARGET_FILE_NAME} - COMMENT "Copying to output directory") -@@ -375,7 +375,7 @@ if(FORTRAN_FOUND) - target_link_libraries(FortranHighs PUBLIC highs) - endif() - -- install(TARGETS FortranHighs -+ install(TARGETS FortranHighs - LIBRARY - ARCHIVE - RUNTIME From 212665fd5512af076b919f722b3cace545fe0246 Mon Sep 17 00:00:00 2001 From: galabovaa Date: Fri, 6 Jun 2025 15:05:39 +0300 Subject: [PATCH 065/509] bazel: update HiGHS to v1.11.0 ref: Update highs bazelbuild/bazel-central-registry#4770 --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index 2454934bb8..05c94ada19 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -24,7 +24,7 @@ bazel_dep(name = "gazelle", version = "0.43.0") bazel_dep(name = "glpk", version = "5.0.bcr.4") bazel_dep(name = "google_benchmark", version = "1.9.2") bazel_dep(name = "googletest", version = "1.17.0") -bazel_dep(name = "highs", version = "1.10.0") +bazel_dep(name = "highs", version = "1.11.0") bazel_dep(name = "platforms", version = "0.0.11") bazel_dep(name = "protobuf", version = "31.0") bazel_dep(name = "pybind11_abseil", version = "202402.0") From 91d20f0350f075c204d90aa7c31eabfab9590a85 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Jun 2025 14:09:15 +0200 Subject: [PATCH 066/509] HiGHS cleanup --- Dependencies.txt | 2 +- patches/BUILD.bazel | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Dependencies.txt b/Dependencies.txt index b1dc1ed9b7..f0813f2c1d 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -9,7 +9,7 @@ Clp=1.17.10 Cgl=0.60.9 Cbc=2.10.12 GLPK=5.0 -HiGHS=v1.10.0 +HiGHS=v1.11.0 Scip=v922 # Python pybind11=v2.13.6 diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 22f2795c84..28b25b4abe 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -13,7 +13,6 @@ exports_files([ "abseil-cpp-20250512.0.patch", - "highs-v1.10.patch", "protobuf-v31.0.patch", "pybind11_bazel.patch", "pybind11_abseil.patch", From 7c96dedea363bab9cc77b470a54ac08ed062504d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 10 Jun 2025 17:25:25 +0200 Subject: [PATCH 067/509] new minizinc challenge entry --- ortools/flatzinc/challenge/Makefile | 2 +- ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile | 6 ++++-- ortools/flatzinc/challenge/minizinc-challenge.Dockerfile | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ortools/flatzinc/challenge/Makefile b/ortools/flatzinc/challenge/Makefile index 3dae17137d..de38f0d3e6 100644 --- a/ortools/flatzinc/challenge/Makefile +++ b/ortools/flatzinc/challenge/Makefile @@ -18,7 +18,7 @@ DOCKER_BUILD_CMD := docker build endif DOCKER_RUN_CMD := docker run --rm --init -MZN_SUFFIX=2024v5 +MZN_SUFFIX=2025v1 DOCKER_NAME=cp-sat-minizinc-challenge MZN_TAG=${DOCKER_NAME}:${MZN_SUFFIX} MZN_LS_TAG=${DOCKER_NAME}-ls:${MZN_SUFFIX} diff --git a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile index 81db76a0ed..86b9c8034c 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile @@ -1,4 +1,4 @@ -FROM minizinc/mznc2024:latest AS env +FROM minizinc/mznc2025:latest AS env ENV SRC_GIT_BRANCH v99bugfix @@ -29,4 +29,6 @@ RUN ln -s /root/or-tools/bazel-bin/ortools/flatzinc/fz /entry_data/fzn-exec RUN cp /root/or-tools/ortools/flatzinc/mznlib/*mzn /entry_data/mzn-lib # Patch the run scripts -RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-* \ No newline at end of file +RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -p 1 -G/g" /minizinc/mzn-exec-fd +RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true,num_workers:3 -G/g" /minizinc/mzn-exec-free +RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-par \ No newline at end of file diff --git a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile index d111f1e5f8..1113ff8778 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile @@ -1,4 +1,4 @@ -FROM minizinc/mznc2024:latest AS env +FROM minizinc/mznc2025:latest AS env ENV SRC_GIT_BRANCH v99bugfix From 64fcba4be7559c5ad47a782f7b46bae362c1aa60 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 10 Jun 2025 17:25:55 +0200 Subject: [PATCH 068/509] [CP-SAT] work on precedences; fix bug in scheduling cuts --- ortools/sat/2d_distances_propagator.cc | 22 +- ortools/sat/2d_distances_propagator.h | 7 +- ortools/sat/BUILD.bazel | 9 +- ortools/sat/combine_solutions.cc | 21 +- ortools/sat/combine_solutions.h | 3 +- ortools/sat/cp_model_lns.cc | 2 +- ortools/sat/cp_model_lns_test.cc | 8 +- ortools/sat/cp_model_loader.cc | 7 +- ortools/sat/cp_model_presolve.cc | 24 +- ortools/sat/cp_model_search.cc | 3 +- ortools/sat/cp_model_solver.cc | 52 +- ortools/sat/cp_model_solver_helpers.cc | 12 +- ortools/sat/cp_model_solver_test.cc | 10 +- ortools/sat/cumulative.cc | 4 +- ortools/sat/cumulative_energy_test.cc | 10 +- ortools/sat/diffn.cc | 2 +- ortools/sat/disjunctive.cc | 12 +- ortools/sat/disjunctive.h | 10 +- ortools/sat/disjunctive_test.cc | 30 +- ortools/sat/feasibility_jump.cc | 23 +- ortools/sat/feasibility_jump.h | 2 +- ortools/sat/integer.h | 12 + ortools/sat/integer_base.cc | 102 ++- ortools/sat/integer_base.h | 34 +- ortools/sat/integer_base_test.cc | 21 +- ortools/sat/integer_search.cc | 4 +- ortools/sat/intervals.cc | 24 +- ortools/sat/intervals.h | 3 +- ortools/sat/linear_propagation.cc | 11 +- ortools/sat/linear_propagation.h | 4 +- ortools/sat/linear_relaxation.cc | 4 +- ortools/sat/precedences.cc | 502 ++++++++------ ortools/sat/precedences.h | 904 +++++++++++++------------ ortools/sat/precedences_test.cc | 176 ++--- ortools/sat/python/cp_model.py | 8 + ortools/sat/python/cp_model_test.py | 9 +- ortools/sat/rins.cc | 7 +- ortools/sat/rins_test.cc | 16 +- ortools/sat/sat_parameters.proto | 11 +- ortools/sat/scheduling_cuts.cc | 32 +- ortools/sat/scheduling_helpers.cc | 8 +- ortools/sat/scheduling_helpers.h | 3 +- ortools/sat/shaving_solver.cc | 6 +- ortools/sat/synchronization.cc | 176 ++++- ortools/sat/synchronization.h | 332 +++++++-- ortools/sat/util.cc | 45 ++ ortools/sat/util.h | 16 +- ortools/sat/util_test.cc | 89 +++ 48 files changed, 1843 insertions(+), 989 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 71b44cabc3..e6d2658635 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -39,7 +39,9 @@ namespace sat { Precedences2DPropagator::Precedences2DPropagator( NoOverlap2DConstraintHelper* helper, Model* model) : helper_(*helper), - binary_relations_maps_(model->GetOrCreate()), + linear2_bounds_from_linear3_( + model->GetOrCreate()), + linear2_bounds_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()) { model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); } @@ -71,9 +73,10 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { } VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " - << binary_relations_maps_->GetAllExpressionsWithAffineBounds().size(); + << linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds() + .size(); for (const LinearExpression2& expr : - binary_relations_maps_->GetAllExpressionsWithAffineBounds()) { + linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds()) { auto it1 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[0])); auto it2 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[1])); if (it1 == var_to_box_and_coeffs.end() || @@ -118,10 +121,10 @@ bool Precedences2DPropagator::Propagate() { if (last_helper_inprocessing_count_ != helper_.InProcessingCount() || helper_.x_helper().CurrentDecisionLevel() == 0 || last_num_expressions_ != - binary_relations_maps_->NumExpressionsWithAffineBounds()) { + linear2_bounds_from_linear3_->NumExpressionsWithAffineBounds()) { last_helper_inprocessing_count_ = helper_.InProcessingCount(); last_num_expressions_ = - binary_relations_maps_->NumExpressionsWithAffineBounds(); + linear2_bounds_from_linear3_->NumExpressionsWithAffineBounds(); CollectPairsOfBoxesWithNonTrivialDistance(); } @@ -153,8 +156,8 @@ bool Precedences2DPropagator::Propagate() { expr.coeffs[0] = helper->Starts()[b1].coeff; expr.coeffs[1] = -helper->Ends()[b2].coeff; const IntegerValue ub_of_start_minus_end_value = - binary_relations_maps_->UpperBound(expr) + - helper->Starts()[b1].constant - helper->Ends()[b2].constant; + linear2_bounds_->UpperBound(expr) + helper->Starts()[b1].constant - + helper->Ends()[b2].constant; if (ub_of_start_minus_end_value >= 0) { is_unfeasible = false; break; @@ -182,7 +185,7 @@ bool Precedences2DPropagator::Propagate() { expr.vars[1] = helper->Ends()[b2].var; expr.coeffs[0] = helper->Starts()[b1].coeff; expr.coeffs[1] = -helper->Ends()[b2].coeff; - binary_relations_maps_->AddReasonForUpperBoundLowerThan( + linear2_bounds_->AddReasonForUpperBoundLowerThan( expr, -(helper->Starts()[b1].constant - helper->Ends()[b2].constant) - 1, helper_.x_helper().MutableLiteralReason(), @@ -199,7 +202,8 @@ bool Precedences2DPropagator::Propagate() { int Precedences2DPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_.WatchAllBoxes(id); - binary_relations_maps_->WatchAllLinearExpressions2(id); + linear2_bounds_from_linear3_->WatchAllLinearExpressions2(id); + // TODO(user): Implement a Linear2Bounds watcher. return id; } diff --git a/ortools/sat/2d_distances_propagator.h b/ortools/sat/2d_distances_propagator.h index b05e6b1a3d..f2e46ca1d9 100644 --- a/ortools/sat/2d_distances_propagator.h +++ b/ortools/sat/2d_distances_propagator.h @@ -28,10 +28,10 @@ namespace operations_research { namespace sat { // This class implements a propagator for non_overlap_2d constraints that uses -// the BinaryRelationsMaps to detect precedences between pairs of boxes and +// the Linear2Bounds to detect precedences between pairs of boxes and // detect a conflict if the precedences implies an overlap between the two // boxes. For doing this efficiently, it keep track of pairs of boxes that have -// non-fixed precedences in the BinaryRelationsMaps and only check those in the +// non-fixed precedences in the Linear2Bounds and only check those in the // propagation. class Precedences2DPropagator : public PropagatorInterface { public: @@ -48,7 +48,8 @@ class Precedences2DPropagator : public PropagatorInterface { std::vector> non_trivial_pairs_; NoOverlap2DConstraintHelper& helper_; - BinaryRelationsMaps* binary_relations_maps_; + Linear2BoundsFromLinear3* linear2_bounds_from_linear3_; + Linear2Bounds* linear2_bounds_; SharedStatistics* shared_stats_; int last_helper_inprocessing_count_ = -1; diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index ba675c7341..b7de5d2357 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -126,7 +126,6 @@ cc_library( ":model", ":no_overlap_2d_helper", ":precedences", - ":sat_base", ":scheduling_helpers", ":synchronization", "//ortools/base:stl_util", @@ -313,8 +312,10 @@ cc_library( "@abseil-cpp//absl/hash", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:bit_gen_ref", + "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/status", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", @@ -1095,6 +1096,7 @@ cc_library( ":integer", ":integer_base", ":model", + ":precedences", ":presolve_context", ":presolve_util", ":probing", @@ -1132,7 +1134,6 @@ cc_library( "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:vlog_is_on", - "@abseil-cpp//absl/meta:type_traits", "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random:distributions", "@abseil-cpp//absl/status:statusor", @@ -2314,6 +2315,7 @@ cc_test( ":disjunctive", ":integer", ":integer_base", + ":integer_expr", ":integer_search", ":intervals", ":model", @@ -2568,7 +2570,6 @@ cc_library( "//ortools/base:stl_util", "//ortools/base:strong_vector", "//ortools/util:logging", - "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", "//ortools/util:strong_integers", "@abseil-cpp//absl/base:core_headers", @@ -3172,6 +3173,7 @@ cc_library( "@abseil-cpp//absl/container:inlined_vector", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:log_streamer", + "@abseil-cpp//absl/numeric:bits", "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random", "@abseil-cpp//absl/random:bit_gen_ref", @@ -3849,6 +3851,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/base:parse_test_proto", "//ortools/util:random_engine", + "@abseil-cpp//absl/strings", "@abseil-cpp//absl/types:span", ], ) diff --git a/ortools/sat/combine_solutions.cc b/ortools/sat/combine_solutions.cc index 679074675e..872a9e4b4f 100644 --- a/ortools/sat/combine_solutions.cc +++ b/ortools/sat/combine_solutions.cc @@ -53,7 +53,9 @@ std::optional> FindCombinedSolution( CHECK_EQ(new_solution.size(), base_solution.size()); const std::vector< std::shared_ptr::Solution>> - solutions = response_manager->SolutionsRepository().GetBestNSolutions(10); + solutions = + response_manager->SolutionPool().BestSolutions().GetBestNSolutions( + 10); for (int sol_idx = 0; sol_idx < solutions.size(); ++sol_idx) { std::shared_ptr::Solution> s = @@ -79,18 +81,21 @@ std::optional> FindCombinedSolution( PushedSolutionPointers PushAndMaybeCombineSolution( SharedResponseManager* response_manager, const CpModelProto& model_proto, absl::Span new_solution, const std::string& solution_info, - absl::Span base_solution, Model* model) { + std::shared_ptr::Solution> + base_solution) { PushedSolutionPointers result = {nullptr, nullptr}; - result.pushed_solution = - response_manager->NewSolution(new_solution, solution_info, model); - if (!base_solution.empty()) { + result.pushed_solution = response_manager->NewSolution( + new_solution, solution_info, nullptr, + base_solution == nullptr ? -1 : base_solution->source_id); + if (base_solution != nullptr) { std::string combined_solution_info = solution_info; std::optional> combined_solution = - FindCombinedSolution(model_proto, new_solution, base_solution, - response_manager, &combined_solution_info); + FindCombinedSolution(model_proto, new_solution, + base_solution->variable_values, response_manager, + &combined_solution_info); if (combined_solution.has_value()) { result.improved_solution = response_manager->NewSolution( - combined_solution.value(), combined_solution_info, model); + combined_solution.value(), combined_solution_info); } } return result; diff --git a/ortools/sat/combine_solutions.h b/ortools/sat/combine_solutions.h index 7106e9939e..259e1cbca9 100644 --- a/ortools/sat/combine_solutions.h +++ b/ortools/sat/combine_solutions.h @@ -49,7 +49,8 @@ struct PushedSolutionPointers { PushedSolutionPointers PushAndMaybeCombineSolution( SharedResponseManager* response_manager, const CpModelProto& model_proto, absl::Span new_solution, const std::string& solution_info, - absl::Span base_solution = {}, Model* model = nullptr); + std::shared_ptr::Solution> + base_solution); } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 5281a662a4..36990c16a9 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -1237,7 +1237,7 @@ CpModelProto NeighborhoodGeneratorHelper::UpdatedModelProtoCopy() const { } bool NeighborhoodGenerator::ReadyToGenerate() const { - return (helper_.shared_response().SolutionsRepository().NumSolutions() > 0); + return helper_.shared_response().HasFeasibleSolution(); } double NeighborhoodGenerator::GetUCBScore(int64_t total_num_calls) const { diff --git a/ortools/sat/cp_model_lns_test.cc b/ortools/sat/cp_model_lns_test.cc index ac44009b93..68dc701a14 100644 --- a/ortools/sat/cp_model_lns_test.cc +++ b/ortools/sat/cp_model_lns_test.cc @@ -201,8 +201,10 @@ TYPED_TEST(GeneratorTest, ReadyToGenerate) { EXPECT_FALSE(generator.ReadyToGenerate()); shared_response_manager->NewSolution(solution.solution(), solution.solution_info(), &model); - shared_response_manager->MutableSolutionsRepository()->Synchronize(); - EXPECT_EQ(1, shared_response_manager->SolutionsRepository().NumSolutions()); + shared_response_manager->Synchronize(); + EXPECT_EQ( + 1, + shared_response_manager->SolutionPool().BestSolutions().NumSolutions()); EXPECT_TRUE(generator.ReadyToGenerate()); } @@ -301,7 +303,7 @@ TEST(RelaxationInducedNeighborhoodGeneratorTest, NoNeighborhoodGeneratedRINS) { solution.add_solution(0); shared_response_manager->NewSolution(solution.solution(), solution.solution_info(), &model); - shared_response_manager->MutableSolutionsRepository()->Synchronize(); + shared_response_manager->Synchronize(); lp_solutions.NewLPSolution({0.0}); lp_solutions.Synchronize(); diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index f053299657..3a691a1ddf 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1261,7 +1261,7 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { // Load precedences. if (!HasEnforcementLiteral(ct)) { - auto* precedences = m->GetOrCreate(); + auto* root_level_lin2_bounds = m->GetOrCreate(); // To avoid overflow in the code below, we tighten the bounds. // Note that we detect and do not add trivial relation. @@ -1272,7 +1272,7 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { if (vars.size() == 2) { LinearExpression2 expr(vars[0], vars[1], coeffs[0], coeffs[1]); - precedences->AddBounds(expr, rhs_min, rhs_max); + root_level_lin2_bounds->Add(expr, rhs_min, rhs_max); } else if (vars.size() == 3) { // TODO(user): This is a weaker duplication of the logic of // BinaryRelationsMaps, but is is useful for the transitive closure in @@ -1293,7 +1293,8 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { ? coeff * integer_trail->UpperBound(vars[other]).value() : coeff * integer_trail->LowerBound(vars[other]).value(); LinearExpression2 expr(vars[i], vars[j], coeffs[i], coeffs[j]); - precedences->AddBounds(expr, rhs_min - other_ub, rhs_max - other_lb); + root_level_lin2_bounds->Add(expr, rhs_min - other_ub, + rhs_max - other_lb); } } } diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index d835856bf1..9cfb4f05be 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -40,7 +40,6 @@ #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/log/vlog_is_on.h" -#include "absl/meta/type_traits.h" #include "absl/numeric/int128.h" #include "absl/random/distributions.h" #include "absl/status/statusor.h" @@ -74,6 +73,7 @@ #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/presolve_context.h" #include "ortools/sat/presolve_util.h" #include "ortools/sat/probing.h" @@ -7886,6 +7886,28 @@ void CpModelPresolver::Probe() { prober->ProbeBooleanVariables( context_->params().probing_deterministic_time_limit()); + for (const auto& [expr, ub] : model.GetOrCreate() + ->GetSortedNonTrivialBounds()) { + if (expr.vars[0] == kNoIntegerVariable || + expr.vars[1] == kNoIntegerVariable) { + continue; + } + const IntegerVariable var0 = PositiveVariable(expr.vars[0]); + const IntegerVariable var1 = PositiveVariable(expr.vars[1]); + const int proto_var0 = mapping->GetProtoVariableFromIntegerVariable(var0); + const int proto_var1 = mapping->GetProtoVariableFromIntegerVariable(var1); + if (proto_var0 < 0 || proto_var1 < 0) continue; + const int64_t coeff0 = VariableIsPositive(expr.vars[0]) + ? expr.coeffs[0].value() + : -expr.coeffs[0].value(); + const int64_t coeff1 = VariableIsPositive(expr.vars[1]) + ? expr.coeffs[1].value() + : -expr.coeffs[1].value(); + known_linear2_.Add( + GetLinearExpression2FromProto(proto_var0, coeff0, proto_var1, coeff1), + kMinIntegerValue, ub); + } + probing_timer->AddCounter("probed", prober->num_decisions()); probing_timer->AddToWork( model.GetOrCreate()->GetElapsedDeterministicTime()); diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index ff253b4041..e6b29ef50b 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -758,7 +758,8 @@ absl::flat_hash_map GetNamedParameters( lns_params.set_log_search_progress(false); lns_params.set_debug_crash_on_bad_hint(false); // Can happen in lns. - lns_params.set_solution_pool_size(1); // Keep the best solution found. + lns_params.set_solution_pool_size(1); // Keep the best solution found. + lns_params.set_alternative_pool_size(0); // Disable. strategies["lns"] = lns_params; // Note that we only do this for the derived parameters. The strategy "lns" diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 8aef798e1f..647d15efb0 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -804,7 +804,7 @@ void LogFinalStatistics(SharedClasses* shared) { std::vector> table; table.push_back({"Solution repositories", "Added", "Queried", "Synchro"}); - table.push_back(shared->response->SolutionsRepository().TableLineStats()); + shared->response->SolutionPool().AddTableStats(&table); table.push_back(shared->ls_hints->TableLineStats()); if (shared->lp_solutions != nullptr) { table.push_back(shared->lp_solutions->TableLineStats()); @@ -1348,36 +1348,29 @@ class LnsSolver : public SubSolver { data.task_id = task_id; data.difficulty = generator_->difficulty(); data.deterministic_limit = generator_->deterministic_limit(); + data.initial_best_objective = + shared_->response->GetBestSolutionObjective(); // Choose a base solution for this neighborhood. + const auto base_solution = + shared_->response->SolutionPool().GetSolutionToImprove(random); CpSolverResponse base_response; - { - const SharedSolutionRepository& repo = - shared_->response->SolutionsRepository(); - if (repo.NumSolutions() > 0) { - base_response.set_status(CpSolverStatus::FEASIBLE); - std::shared_ptr::Solution> - solution = repo.GetRandomBiasedSolution(random); - base_response.mutable_solution()->Assign( - solution->variable_values.begin(), - solution->variable_values.end()); + if (base_solution != nullptr) { + base_response.set_status(CpSolverStatus::FEASIBLE); + base_response.mutable_solution()->Assign( + base_solution->variable_values.begin(), + base_solution->variable_values.end()); - // Note: We assume that the solution rank is the solution internal - // objective. - data.initial_best_objective = repo.GetSolution(0)->rank; - data.base_objective = solution->rank; - } else { - base_response.set_status(CpSolverStatus::UNKNOWN); + // Note: We assume that the solution rank is the solution internal + // objective. + data.base_objective = base_solution->rank; + } else { + base_response.set_status(CpSolverStatus::UNKNOWN); - // If we do not have a solution, we use the current objective upper - // bound so that our code that compute an "objective" improvement - // works. - // - // TODO(user): this is non-deterministic. Fix. - data.initial_best_objective = - shared_->response->GetInnerObjectiveUpperBound(); - data.base_objective = data.initial_best_objective; - } + // If we do not have a solution, we use the current objective upper + // bound so that our code that compute an "objective" improvement + // works. + data.base_objective = data.initial_best_objective; } Neighborhood neighborhood = @@ -1667,9 +1660,9 @@ class LnsSolver : public SubSolver { if (absl::MakeSpan(solution_values) != absl::MakeSpan(base_response.solution())) { new_solution = true; - PushAndMaybeCombineSolution( - shared_->response, shared_->model_proto, solution_values, - solution_info, base_response.solution(), /*model=*/nullptr); + PushAndMaybeCombineSolution(shared_->response, shared_->model_proto, + solution_values, solution_info, + base_solution); } } if (!neighborhood.is_reduced && @@ -1782,7 +1775,6 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { subsolvers.push_back(std::make_unique( "synchronization_agent", [shared]() { shared->response->Synchronize(); - shared->response->MutableSolutionsRepository()->Synchronize(); shared->ls_hints->Synchronize(); if (shared->bounds != nullptr) { shared->bounds->Synchronize(); diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 030cf124b5..b2016a01d5 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -1073,7 +1073,7 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, auto* encoder = model->GetOrCreate(); auto* mapping = model->GetOrCreate(); auto* repository = model->GetOrCreate(); - auto* relations_maps = model->GetOrCreate(); + auto* root_level_lin2_bounds = model->GetOrCreate(); for (const ConstraintProto& ct : model_proto.constraints()) { // Load conditional precedences and always true binary relations. @@ -1145,7 +1145,7 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, expr.vars[1] = vars[1]; expr.coeffs[0] = coeffs[0]; expr.coeffs[1] = coeffs[1]; - relations_maps->AddRelationBounds(expr, rhs_min, rhs_max); + root_level_lin2_bounds->Add(expr, rhs_min, rhs_max); } } else { const Literal lit = mapping->Literal(ct.enforcement_literal(0)); @@ -1216,10 +1216,6 @@ void LoadBaseModel(const CpModelProto& model_proto, Model* model) { AddFullEncodingFromSearchBranching(model_proto, model); if (sat_solver->ModelIsUnsat()) return unsat(); - // Reserve space for the precedence relations. - model->GetOrCreate()->Resize( - model->GetOrCreate()->NumIntegerVariables().value()); - FillBinaryRelationRepository(model_proto, model); if (time_limit->LimitReached()) return; @@ -1292,7 +1288,7 @@ void LoadBaseModel(const CpModelProto& model_proto, Model* model) { model->GetOrCreate()->ProcessImplicationGraph( model->GetOrCreate()); - model->GetOrCreate()->Build(); + model->GetOrCreate()->Build(); } void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model) { @@ -1794,7 +1790,7 @@ void QuickSolveWithHint(const CpModelProto& model_proto, Model* model) { // Tricky: We can only test that if we don't already have a feasible solution // like we do if the hint is complete. if (parameters->debug_crash_on_bad_hint() && - shared_response_manager->SolutionsRepository().NumSolutions() == 0 && + shared_response_manager->HasFeasibleSolution() && !model->GetOrCreate()->LimitReached() && status != SatSolver::Status::FEASIBLE) { LOG(FATAL) << "QuickSolveWithHint() didn't find a feasible solution." diff --git a/ortools/sat/cp_model_solver_test.cc b/ortools/sat/cp_model_solver_test.cc index 63ab2fae0f..d48d8ef4d0 100644 --- a/ortools/sat/cp_model_solver_test.cc +++ b/ortools/sat/cp_model_solver_test.cc @@ -1070,10 +1070,12 @@ TEST(SolveCpModelTest, SolutionHintMinimizeL1DistanceTest) { // TODO(user): Instead, we might change the presolve to always try to keep the // given hint feasible. Model model; - model.Add( - NewSatParameters("repair_hint:true, stop_after_first_solution:true, " - "keep_all_feasible_solutions_in_presolve:true " - "num_workers:1")); + SatParameters params; + params.set_repair_hint(true); + params.set_stop_after_first_solution(true); + params.set_keep_all_feasible_solutions_in_presolve(true); + params.set_num_workers(1); + model.Add(NewSatParameters(params)); const CpSolverResponse response = SolveCpModel(model_proto, &model); EXPECT_THAT(response.status(), AnyOf(Eq(CpSolverStatus::OPTIMAL), Eq(CpSolverStatus::FEASIBLE))); diff --git a/ortools/sat/cumulative.cc b/ortools/sat/cumulative.cc index 3d910e779d..8cde3428c0 100644 --- a/ortools/sat/cumulative.cc +++ b/ortools/sat/cumulative.cc @@ -212,8 +212,8 @@ std::function Cumulative( // having two independent constraint doing the same propagation. std::vector full_precedences; if (parameters.exploit_all_precedences()) { - model->GetOrCreate()->ComputeFullPrecedences( - index_to_end_vars, &full_precedences); + model->GetOrCreate() + ->ComputeFullPrecedences(index_to_end_vars, &full_precedences); } for (const FullIntegerPrecedence& data : full_precedences) { const int size = data.indices.size(); diff --git a/ortools/sat/cumulative_energy_test.cc b/ortools/sat/cumulative_energy_test.cc index a8b56e9905..27f4cb929c 100644 --- a/ortools/sat/cumulative_energy_test.cc +++ b/ortools/sat/cumulative_energy_test.cc @@ -176,8 +176,8 @@ bool SolveUsingNaiveModel(const EnergyInstance& instance) { std::vector intervals; std::vector consumptions; IntegerVariable one = model.Add(ConstantIntegerVariable(1)); - IntervalsRepository* intervals_repository = - model.GetOrCreate(); + auto* intervals_repository = model.GetOrCreate(); + auto* precedences = model.GetOrCreate(); for (const auto& task : instance.tasks) { if (task.is_optional) { @@ -207,7 +207,7 @@ bool SolveUsingNaiveModel(const EnergyInstance& instance) { CHECK_NE(start_expr.var, kNoIntegerVariable); const IntegerVariable start = start_expr.var; if (previous_start != kNoIntegerVariable) { - model.Add(LowerOrEqual(previous_start, start)); + precedences->AddPrecedence(previous_start, start); } else { first_start = start; } @@ -215,8 +215,8 @@ bool SolveUsingNaiveModel(const EnergyInstance& instance) { } // start[last] <= start[0] + duration_max - 1 if (previous_start != kNoIntegerVariable) { - model.Add(LowerOrEqualWithOffset(previous_start, first_start, - -task.duration_max + 1)); + precedences->AddPrecedenceWithOffset(previous_start, first_start, + -task.duration_max + 1); } } } diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index b848740606..fde8ee6a46 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -307,7 +307,7 @@ void AddNonOverlappingRectangles(const std::vector& x, return; } - // At least one of the 4 options is true. + // At least one of the 4 options is true if all boxes are present. std::vector clause = {x_ij, x_ji, y_ij, y_ji}; if (repository->IsOptional(x[i])) { clause.push_back(repository->PresenceLiteral(x[i]).Negated()); diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 0faa84e760..3098a21a98 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -1210,13 +1210,13 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // TODO(user): we should probably change the api to return a Span. // // TODO(user): If more than one set of task push the same variable, we - // probabaly only want to keep the best push? Maybe we want to process them + // probably only want to keep the best push? Maybe we want to process them // in reverse order of what we do here? indices_before_.clear(); IntegerValue local_start; IntegerValue local_end; for (; global_i < size; ++global_i) { - const PrecedenceRelations::PrecedenceData& data = before_[global_i]; + const EnforcedLinear2Bounds::PrecedenceData& data = before_[global_i]; if (data.var != var) break; const int index = data.index; const auto [t, start_of_t] = window_[index]; @@ -1259,7 +1259,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // the offset as much as possible. Note that the alternative of storing it // in PrecedenceData is not necessarily better and harder to update as we // dive/backtrack. - const IntegerValue inner_offset = -precedence_relations_->UpperBound( + const IntegerValue inner_offset = -linear2_bounds_->UpperBound( LinearExpression2::Difference(end_exp.var, var)); DCHECK_NE(inner_offset, kMinIntegerValue); @@ -1275,7 +1275,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // This is true if we skipped all task so far in this block. if (min_offset == kMaxIntegerValue) { // If only one task is left, we can abort. - // This avoid a GetConditionalOffset() lookup. + // This avoid a linear2_bounds_ lookup. if (i == 1) break; // Lower the end_min_when_all_present for better filtering later. @@ -1314,8 +1314,8 @@ bool DisjunctivePrecedences::PropagateSubwindow() { const AffineExpression& end_exp = helper_->Ends()[ct]; const LinearExpression2 expr = LinearExpression2::Difference(end_exp.var, var); - precedence_relations_->AddReasonForUpperBoundLowerThan( - expr, precedence_relations_->UpperBound(expr), + linear2_bounds_->AddReasonForUpperBoundLowerThan( + expr, linear2_bounds_->UpperBound(expr), helper_->MutableLiteralReason(), helper_->MutableIntegerReason()); } ++stats_.num_propagations; diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index bb77c41b12..8550fc2dd1 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -353,7 +353,8 @@ class DisjunctivePrecedences : public PropagatorInterface { : time_direction_(time_direction), helper_(helper), integer_trail_(model->GetOrCreate()), - precedence_relations_(model->GetOrCreate()), + precedence_relations_(model->GetOrCreate()), + linear2_bounds_(model->GetOrCreate()), stats_("DisjunctivePrecedences", model) { window_.ClearAndReserve(helper->NumTasks()); index_to_end_vars_.ClearAndReserve(helper->NumTasks()); @@ -369,20 +370,21 @@ class DisjunctivePrecedences : public PropagatorInterface { const bool time_direction_; SchedulingConstraintHelper* helper_; IntegerTrail* integer_trail_; - PrecedenceRelations* precedence_relations_; + EnforcedLinear2Bounds* precedence_relations_; + Linear2Bounds* linear2_bounds_; FixedCapacityVector window_; FixedCapacityVector index_to_end_vars_; FixedCapacityVector indices_before_; std::vector skip_; - std::vector before_; + std::vector before_; PropagationStatistics stats_; }; // This is an optimization for the case when we have a big number of such -// pairwise constraints. This should be roughtly equivalent to what the general +// pairwise constraints. This should be roughly equivalent to what the general // disjunctive case is doing, but it dealt with variable size better and has a // lot less overhead. class DisjunctiveWithTwoItems : public PropagatorInterface { diff --git a/ortools/sat/disjunctive_test.cc b/ortools/sat/disjunctive_test.cc index 85c1db3204..5fa0063cf8 100644 --- a/ortools/sat/disjunctive_test.cc +++ b/ortools/sat/disjunctive_test.cc @@ -31,6 +31,7 @@ #include "ortools/base/logging.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" +#include "ortools/sat/integer_expr.h" #include "ortools/sat/integer_search.h" #include "ortools/sat/intervals.h" #include "ortools/sat/model.h" @@ -238,8 +239,8 @@ TEST(DisjunctiveConstraintTest, Precedences) { Trail* trail = model.GetOrCreate(); IntegerTrail* integer_trail = model.GetOrCreate(); auto* precedences = model.GetOrCreate(); - auto* relations = model.GetOrCreate(); auto* intervals = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); const auto add_affine_coeff_one_precedence = [&](const AffineExpression e1, const AffineExpression& e2) { @@ -249,8 +250,8 @@ TEST(DisjunctiveConstraintTest, Precedences) { CHECK_EQ(e2.coeff, 1); precedences->AddPrecedenceWithOffset(e1.var, e2.var, e1.constant - e2.constant); - relations->AddUpperBound(LinearExpression2::Difference(e1.var, e2.var), - e2.constant - e1.constant); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(e1.var, e2.var), + e2.constant - e1.constant); }; const int kStart(0); @@ -483,6 +484,22 @@ TEST(DisjunctiveTest, TwoIntervalsTest) { EXPECT_EQ(12, CountAllSolutions(instance, AddDisjunctive)); } +namespace { + +void AddLowerOrEqualWithOffset(AffineExpression a, IntegerVariable b, + int64_t offset, Model* model) { + const int64_t rhs = -a.constant.value() - offset; + std::vector vars = {a.var, b}; + std::vector coeffs = {a.coeff.value(), -1}; + AddWeightedSumLowerOrEqual({}, vars, coeffs, rhs, model); + + // We also need to register them. + model->GetOrCreate()->AddUpperBound( + LinearExpression2::Difference(a.var, b), rhs); +} + +} // namespace + TEST(DisjunctiveTest, Precedences) { Model model; @@ -493,10 +510,9 @@ TEST(DisjunctiveTest, Precedences) { const IntegerVariable var = model.Add(NewIntegerVariable(0, 10)); IntervalsRepository* intervals = model.GetOrCreate(); - model.Add( - AffineCoeffOneLowerOrEqualWithOffset(intervals->End(ids[0]), var, 5)); - model.Add( - AffineCoeffOneLowerOrEqualWithOffset(intervals->End(ids[1]), var, 4)); + + AddLowerOrEqualWithOffset(intervals->End(ids[0]), var, 5, &model); + AddLowerOrEqualWithOffset(intervals->End(ids[1]), var, 4, &model); EXPECT_TRUE(model.GetOrCreate()->Propagate()); EXPECT_EQ(model.Get(LowerBound(var)), (3 + 2) + std::min(4, 5)); diff --git a/ortools/sat/feasibility_jump.cc b/ortools/sat/feasibility_jump.cc index 3d57b0ff5b..64ae336a88 100644 --- a/ortools/sat/feasibility_jump.cc +++ b/ortools/sat/feasibility_jump.cc @@ -364,8 +364,7 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { // still finish each batch though). We will also reset the luby sequence. bool new_best_solution_was_found = false; if (type() == SubSolver::INCOMPLETE) { - const int64_t best = - shared_response_->SolutionsRepository().GetBestRank(); + const int64_t best = shared_response_->GetBestSolutionObjective().value(); if (best < state_->last_solution_rank) { states_->ResetLubyCounter(); new_best_solution_was_found = true; @@ -394,11 +393,9 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { new_best_solution_was_found) { if (type() == SubSolver::INCOMPLETE) { // Choose a base solution for this neighborhood. - std::shared_ptr::Solution> - solution = shared_response_->SolutionsRepository() - .GetRandomBiasedSolution(random_); - state_->solution = solution->variable_values; - state_->base_solution = solution; + state_->base_solution = + shared_response_->SolutionPool().GetSolutionToImprove(random_); + state_->solution = state_->base_solution->variable_values; ++state_->num_solutions_imported; } else { if (!first_time) { @@ -427,6 +424,10 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { } // Between chunk, we synchronize bounds. + // + // TODO(user): This do not play well with optimizing solution whose + // objective lag behind... Basically, we can run LS on old solution but will + // only consider it feasible if it improve the best known solution. bool recompute_compound_weights = false; if (linear_model_->model_proto().has_objective()) { const IntegerValue lb = shared_response_->GetInnerObjectiveLowerBound(); @@ -500,15 +501,15 @@ std::function FeasibilityJumpSolver::GenerateTask(int64_t /*task_id*/) { ++state_->counters.num_batches; if (DoSomeLinearIterations() && DoSomeGeneralIterations()) { // Checks for infeasibility induced by the non supported constraints. + // + // TODO(user): Checking the objective is faster and we could avoid to + // check feasibility if we are not going to keep the solution anyway. if (SolutionIsFeasible(linear_model_->model_proto(), state_->solution)) { auto pointers = PushAndMaybeCombineSolution( shared_response_, linear_model_->model_proto(), state_->solution, absl::StrCat(name(), "_", state_->options.name(), "(", OneLineStats(), ")"), - state_->base_solution == nullptr - ? absl::Span() - : state_->base_solution->variable_values, - /*model=*/nullptr); + state_->base_solution); // If we pushed a new solution, we use it as a new "base" so that we // will have a smaller delta on the next solution we find. state_->base_solution = pointers.pushed_solution; diff --git a/ortools/sat/feasibility_jump.h b/ortools/sat/feasibility_jump.h index d1975161c7..e40f5d42b9 100644 --- a/ortools/sat/feasibility_jump.h +++ b/ortools/sat/feasibility_jump.h @@ -510,7 +510,7 @@ class FeasibilityJumpSolver : public SubSolver { if (shared_response_->ProblemIsSolved()) return false; if (shared_time_limit_->LimitReached()) return false; - return (shared_response_->SolutionsRepository().NumSolutions() > 0) == + return shared_response_->HasFeasibleSolution() == (type() == SubSolver::INCOMPLETE); } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 1c8cbf1438..9802f74a75 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -505,6 +505,7 @@ class IntegerTrail final : public SatPropagator { // Same as above for an affine expression. IntegerValue LowerBound(AffineExpression expr) const; IntegerValue UpperBound(AffineExpression expr) const; + IntegerValue UpperBound(LinearExpression2 expr) const; bool IsFixed(AffineExpression expr) const; IntegerValue FixedValue(AffineExpression expr) const; @@ -1375,6 +1376,17 @@ inline IntegerValue IntegerTrail::UpperBound(AffineExpression expr) const { return UpperBound(expr.var) * expr.coeff + expr.constant; } +inline IntegerValue IntegerTrail::UpperBound(LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + IntegerValue result = 0; + for (int i = 0; i < 2; ++i) { + if (expr.coeffs[i] != 0) { + result += expr.coeffs[i] * UpperBound(expr.vars[i]); + } + } + return result; +} + inline bool IntegerTrail::IsFixed(AffineExpression expr) const { if (expr.var == kNoIntegerVariable) return true; return IsFixed(expr.var); diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index 8af3a695ca..7c04db31d4 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -13,9 +13,11 @@ #include "ortools/sat/integer_base.h" +#include #include #include #include +#include #include "absl/log/check.h" @@ -79,14 +81,15 @@ bool LinearExpression2::NegateForCanonicalization() { return negate; } -void LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, +bool LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, bool allow_negation) { SimpleCanonicalization(); - if (coeffs[0] == 0 || coeffs[1] == 0) return; // abort. + if (coeffs[0] == 0 || coeffs[1] == 0) return false; // abort. + bool negated = false; if (allow_negation) { - const bool negated = NegateForCanonicalization(); + negated = NegateForCanonicalization(); if (negated) { // We need to be able to negate without overflow. CHECK_GE(lb, kMinIntegerValue); @@ -108,33 +111,68 @@ void LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, CHECK(coeffs[0] != 0 || vars[0] == kNoIntegerVariable); CHECK(coeffs[1] != 0 || vars[1] == kNoIntegerVariable); + + return negated; } -bool BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub) { - expr.CanonicalizeAndUpdateBounds(lb, ub); - if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; +bool LinearExpression2::IsCanonicalized() const { + for (int i : {0, 1}) { + if ((vars[i] == kNoIntegerVariable) != (coeffs[i] == 0)) { + return false; + } + } + if (vars[0] >= vars[1]) return false; + + if (vars[0] == kNoIntegerVariable) return true; + + return coeffs[0] > 0 && coeffs[1] > 0; +} + +std::pair +BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub) { + const bool negated = + expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/true); + + // We only store proper linear2. + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + return {AddResult::INVALID, AddResult::INVALID}; + } auto [it, inserted] = best_bounds_.insert({expr, {lb, ub}}); - if (inserted) return true; + if (inserted) { + std::pair result = { + lb > kMinIntegerValue ? AddResult::ADDED : AddResult::INVALID, + ub < kMaxIntegerValue ? AddResult::ADDED : AddResult::INVALID}; + if (negated) std::swap(result.first, result.second); + return result; + } const auto [known_lb, known_ub] = it->second; - bool restricted = false; + + std::pair result = { + lb > kMinIntegerValue ? AddResult::NOT_BETTER : AddResult::INVALID, + ub < kMaxIntegerValue ? AddResult::NOT_BETTER : AddResult::INVALID}; if (lb > known_lb) { + result.first = (it->second.first == kMinIntegerValue) ? AddResult::ADDED + : AddResult::UPDATED; it->second.first = lb; - restricted = true; } if (ub < known_ub) { + result.second = (it->second.second == kMaxIntegerValue) + ? AddResult::ADDED + : AddResult::UPDATED; it->second.second = ub; - restricted = true; } - return restricted; + if (negated) std::swap(result.first, result.second); + return result; } RelationStatus BestBinaryRelationBounds::GetStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const { - expr.CanonicalizeAndUpdateBounds(lb, ub); + expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/true); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { return RelationStatus::IS_UNKNOWN; } @@ -165,4 +203,42 @@ IntegerValue BestBinaryRelationBounds::GetUpperBound( return kMaxIntegerValue; } +// TODO(user): Maybe introduce a CanonicalizedLinear2 class so we automatically +// get the better function, and it documents when we have canonicalized +// expression. +IntegerValue BestBinaryRelationBounds::UpperBoundWhenCanonicalized( + LinearExpression2 expr) const { + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK(expr.IsCanonicalized()); + const bool negated = expr.NegateForCanonicalization(); + const auto it = best_bounds_.find(expr); + if (it != best_bounds_.end()) { + const auto [known_lb, known_ub] = it->second; + if (negated) { + return -known_lb; + } else { + return known_ub; + } + } + return kMaxIntegerValue; +} + +std::vector> +BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { + std::vector> root_relations_sorted; + root_relations_sorted.reserve(2 * best_bounds_.size()); + for (const auto& [expr, bounds] : best_bounds_) { + if (bounds.first != kMinIntegerValue) { + LinearExpression2 negated_expr = expr; + negated_expr.Negate(); + root_relations_sorted.push_back({negated_expr, -bounds.first}); + } + if (bounds.second != kMaxIntegerValue) { + root_relations_sorted.push_back({expr, bounds.second}); + } + } + std::sort(root_relations_sorted.begin(), root_relations_sorted.end()); + return root_relations_sorted; +} + } // namespace operations_research::sat diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index a86d15eb07..c6f3ba4427 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -369,14 +369,15 @@ struct LinearExpression2 { // This will not change any bounds on the LinearExpression2. // That is we will not potentially Negate() the expression like // CanonicalizeAndUpdateBounds() might do. - // Note that since kNoIntegerVariable=-1 and we sort the variables, if we any + // Note that since kNoIntegerVariable=-1 and we sort the variables, if we have // one zero and one non-zero we will always have the zero first. void SimpleCanonicalization(); // Fully canonicalizes the expression and updates the given bounds // accordingly. This is the same as SimpleCanonicalization(), DivideByGcd() // and the NegateForCanonicalization() with a proper updates of the bounds. - void CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, + // Returns whether the expression was negated. + bool CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, bool allow_negation = false); // Divides the expression by the gcd of both coefficients, and returns it. @@ -384,6 +385,8 @@ struct LinearExpression2 { // zero. IntegerValue DivideByGcd(); + bool IsCanonicalized() const; + // Makes sure expr and -expr have the same canonical representation by // negating the expression of it is in the non-canonical form. Returns true if // the expression was negated. @@ -437,9 +440,21 @@ class BestBinaryRelationBounds { public: // Register the fact that expr \in [lb, ub] is true. // - // Returns true if this fact is new, that is if the bounds are tighter than - // the current ones. - bool Add(LinearExpression2 expr, IntegerValue lb, IntegerValue ub); + // If lb==kMinIntegerValue it only register that expr <= ub (and symmetrically + // for ub==kMaxIntegerValue). + // + // Returns for each of the bound if it was restricted (added/updated), if it + // was ignored because a better or equal bound was already present, or if it + // was rejected because it was invalid (e.g. the expression was a degenerate + // linear2 or the bound was a min/max value). + enum class AddResult { + ADDED, + UPDATED, + NOT_BETTER, + INVALID, + }; + std::pair Add(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub); // Returns the known status of expr <= bound. RelationStatus GetStatus(LinearExpression2 expr, IntegerValue lb, @@ -450,6 +465,15 @@ class BestBinaryRelationBounds { // entry in the hash-map. IntegerValue GetUpperBound(LinearExpression2 expr) const; + // Same as GetUpperBound() but assume the expression is already canonicalized. + // This is slighlty faster. + IntegerValue UpperBoundWhenCanonicalized(LinearExpression2 expr) const; + + int64_t num_bounds() const { return best_bounds_.size(); } + + std::vector> + GetSortedNonTrivialBounds() const; + private: // The best bound on the given "canonicalized" expression. absl::flat_hash_map> diff --git a/ortools/sat/integer_base_test.cc b/ortools/sat/integer_base_test.cc index e3b069cd02..10774a554a 100644 --- a/ortools/sat/integer_base_test.cc +++ b/ortools/sat/integer_base_test.cc @@ -13,6 +13,8 @@ #include "ortools/sat/integer_base.h" +#include + #include "gtest/gtest.h" namespace operations_research::sat { @@ -59,12 +61,17 @@ TEST(BestBinaryRelationBoundsTest, Basic) { expr.coeffs[0] = IntegerValue(1); expr.coeffs[1] = IntegerValue(-1); + using AddResult = BestBinaryRelationBounds::AddResult; + BestBinaryRelationBounds best_bounds; - EXPECT_TRUE(best_bounds.Add(expr, IntegerValue(0), IntegerValue(5))); - EXPECT_TRUE(best_bounds.Add(expr, IntegerValue(3), IntegerValue(8))); - EXPECT_TRUE(best_bounds.Add(expr, IntegerValue(-1), IntegerValue(4))); - EXPECT_FALSE( - best_bounds.Add(expr, IntegerValue(3), IntegerValue(4))); // best + EXPECT_EQ(best_bounds.Add(expr, IntegerValue(0), IntegerValue(5)), + std::make_pair(AddResult::ADDED, AddResult::ADDED)); + EXPECT_EQ(best_bounds.Add(expr, IntegerValue(3), IntegerValue(8)), + std::make_pair(AddResult::UPDATED, AddResult::NOT_BETTER)); + EXPECT_EQ(best_bounds.Add(expr, IntegerValue(-1), IntegerValue(4)), + std::make_pair(AddResult::NOT_BETTER, AddResult::UPDATED)); + EXPECT_EQ(best_bounds.Add(expr, IntegerValue(3), IntegerValue(4)), // best + std::make_pair(AddResult::NOT_BETTER, AddResult::NOT_BETTER)); EXPECT_EQ(RelationStatus::IS_TRUE, best_bounds.GetStatus(expr, IntegerValue(-10), IntegerValue(4))); @@ -85,8 +92,10 @@ TEST(BestBinaryRelationBoundsTest, UpperBound) { expr.coeffs[0] = IntegerValue(1); expr.coeffs[1] = IntegerValue(-1); + using AddResult = BestBinaryRelationBounds::AddResult; BestBinaryRelationBounds best_bounds; - EXPECT_TRUE(best_bounds.Add(expr, IntegerValue(0), IntegerValue(5))); + EXPECT_EQ(best_bounds.Add(expr, IntegerValue(0), IntegerValue(5)), + std::make_pair(AddResult::ADDED, AddResult::ADDED)); EXPECT_EQ(best_bounds.GetUpperBound(expr), IntegerValue(5)); diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 7e095a2eba..5af0f3cc82 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -394,7 +394,7 @@ std::function IntegerValueSelectionHeuristic( value_selection_heuristics.push_back( [model, response_manager](IntegerVariable var) { return SplitUsingBestSolutionValueInRepository( - var, response_manager->SolutionsRepository(), model); + var, response_manager->SolutionPool().BestSolutions(), model); }); } } @@ -1026,7 +1026,7 @@ std::function RandomizeOnRestartHeuristic( value_selection_heuristics.push_back( [model, response_manager](IntegerVariable var) { return SplitUsingBestSolutionValueInRepository( - var, response_manager->SolutionsRepository(), model); + var, response_manager->SolutionPool().BestSolutions(), model); }); value_selection_weight.push_back(5); } diff --git a/ortools/sat/intervals.cc b/ortools/sat/intervals.cc index c50429e71f..113ad4e5d9 100644 --- a/ortools/sat/intervals.cc +++ b/ortools/sat/intervals.cc @@ -43,7 +43,7 @@ IntervalsRepository::IntervalsRepository(Model* model) sat_solver_(model->GetOrCreate()), implications_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), - relations_maps_(model->GetOrCreate()) {} + reified_precedences_(model->GetOrCreate()) {} IntervalVariable IntervalsRepository::CreateInterval(IntegerVariable start, IntegerVariable end, @@ -155,9 +155,9 @@ IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( } // Abort if the relation is already known. - if (relations_maps_->GetLevelZeroPrecedenceStatus(a.end, b.start) == + if (reified_precedences_->GetLevelZeroPrecedenceStatus(a.end, b.start) == RelationStatus::IS_TRUE || - relations_maps_->GetLevelZeroPrecedenceStatus(b.end, a.start) == + reified_precedences_->GetLevelZeroPrecedenceStatus(b.end, a.start) == RelationStatus::IS_TRUE) { return kNoLiteralIndex; } @@ -181,10 +181,10 @@ IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( // Also insert it in precedences. if (enforcement_literals.empty()) { - relations_maps_->AddReifiedPrecedenceIfNonTrivial(a_before_b, a.end, - b.start); - relations_maps_->AddReifiedPrecedenceIfNonTrivial(a_before_b.Negated(), - b.end, a.start); + reified_precedences_->AddReifiedPrecedenceIfNonTrivial(a_before_b, a.end, + b.start); + reified_precedences_->AddReifiedPrecedenceIfNonTrivial(a_before_b.Negated(), + b.end, a.start); } enforcement_literals.push_back(a_before_b); @@ -212,12 +212,12 @@ IntervalsRepository::GetOrCreateDisjunctivePrecedenceLiteralIfNonTrivial( bool IntervalsRepository::CreatePrecedenceLiteralIfNonTrivial( AffineExpression x, AffineExpression y) { - const LiteralIndex index = relations_maps_->GetReifiedPrecedence(x, y); + const LiteralIndex index = reified_precedences_->GetReifiedPrecedence(x, y); if (index != kNoLiteralIndex) return false; // We want l => x <= y and not(l) => x > y <=> y + 1 <= x // Do not create l if the relation is always true or false. - if (relations_maps_->GetLevelZeroPrecedenceStatus(x, y) != + if (reified_precedences_->GetLevelZeroPrecedenceStatus(x, y) != RelationStatus::IS_UNKNOWN) { return false; } @@ -225,7 +225,7 @@ bool IntervalsRepository::CreatePrecedenceLiteralIfNonTrivial( // Create a new literal. const BooleanVariable boolean_var = sat_solver_->NewBooleanVariable(); const Literal x_before_y = Literal(boolean_var, true); - relations_maps_->AddReifiedPrecedenceIfNonTrivial(x_before_y, x, y); + reified_precedences_->AddReifiedPrecedenceIfNonTrivial(x_before_y, x, y); AffineExpression y_plus_one = y; y_plus_one.constant += 1; @@ -236,7 +236,7 @@ bool IntervalsRepository::CreatePrecedenceLiteralIfNonTrivial( LiteralIndex IntervalsRepository::GetPrecedenceLiteral( AffineExpression x, AffineExpression y) const { - return relations_maps_->GetReifiedPrecedence(x, y); + return reified_precedences_->GetReifiedPrecedence(x, y); } Literal IntervalsRepository::GetOrCreatePrecedenceLiteral(AffineExpression x, @@ -247,7 +247,7 @@ Literal IntervalsRepository::GetOrCreatePrecedenceLiteral(AffineExpression x, } CHECK(CreatePrecedenceLiteralIfNonTrivial(x, y)); - const LiteralIndex index = relations_maps_->GetReifiedPrecedence(x, y); + const LiteralIndex index = reified_precedences_->GetReifiedPrecedence(x, y); CHECK_NE(index, kNoLiteralIndex); return Literal(index); } diff --git a/ortools/sat/intervals.h b/ortools/sat/intervals.h index 8b36fd47d2..fe4f0fde0b 100644 --- a/ortools/sat/intervals.h +++ b/ortools/sat/intervals.h @@ -28,6 +28,7 @@ #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" #include "ortools/sat/no_overlap_2d_helper.h" +#include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" #include "ortools/sat/sat_solver.h" #include "ortools/sat/scheduling_helpers.h" @@ -189,7 +190,7 @@ class IntervalsRepository { SatSolver* sat_solver_; BinaryImplicationGraph* implications_; IntegerTrail* integer_trail_; - BinaryRelationsMaps* relations_maps_; + ReifiedLinear2Bounds* reified_precedences_; // Literal indicating if the tasks is executed. Tasks that are always executed // will have a kNoLiteralIndex entry in this vector. diff --git a/ortools/sat/linear_propagation.cc b/ortools/sat/linear_propagation.cc index c77ff22752..330483c928 100644 --- a/ortools/sat/linear_propagation.cc +++ b/ortools/sat/linear_propagation.cc @@ -384,8 +384,8 @@ LinearPropagator::LinearPropagator(Model* model) rev_int_repository_(model->GetOrCreate()), rev_integer_value_repository_( model->GetOrCreate()), - precedences_(model->GetOrCreate()), - binary_relations_(model->GetOrCreate()), + precedences_(model->GetOrCreate()), + linear3_bounds_(model->GetOrCreate()), random_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), watcher_id_(watcher_->Register(this)), @@ -538,7 +538,8 @@ bool LinearPropagator::Propagate() { // - Z + Y >= 6 ==> Z >= 1 // - (1) again to push T <= 10 and reach the propagation fixed point. Bitset64::View in_queue = in_queue_.view(); - const bool push_affine_ub = push_affine_ub_for_binary_relations_; + const bool push_affine_ub = push_affine_ub_for_binary_relations_ || + trail_->CurrentDecisionLevel() == 0; while (true) { // We always process the whole queue in FIFO order. // Note that the order really only matter for infeasible constraint so it @@ -612,7 +613,7 @@ bool LinearPropagator::Propagate() { // The rev_rhs was updated to: initial_rhs - lb(vars[2]) * coeffs[2]. const IntegerValue initial_rhs = info.rev_rhs + coeffs[2] * integer_trail_->LowerBound(vars[2]); - binary_relations_->AddAffineUpperBound( + linear3_bounds_->AddAffineUpperBound( expr, AffineExpression(vars[2], -coeffs[2], initial_rhs)); } else if (info.rev_size == 3) { for (int i = 0; i < 3; ++i) { @@ -623,7 +624,7 @@ bool LinearPropagator::Propagate() { expr.vars[1] = vars[b]; expr.coeffs[0] = coeffs[a]; expr.coeffs[1] = coeffs[b]; - binary_relations_->AddAffineUpperBound( + linear3_bounds_->AddAffineUpperBound( expr, AffineExpression(vars[i], -coeffs[i], info.rev_rhs)); } } diff --git a/ortools/sat/linear_propagation.h b/ortools/sat/linear_propagation.h index b98f46711e..ab4027b665 100644 --- a/ortools/sat/linear_propagation.h +++ b/ortools/sat/linear_propagation.h @@ -421,8 +421,8 @@ class LinearPropagator : public PropagatorInterface, TimeLimit* time_limit_; RevIntRepository* rev_int_repository_; RevIntegerValueRepository* rev_integer_value_repository_; - PrecedenceRelations* precedences_; - BinaryRelationsMaps* binary_relations_; + EnforcedLinear2Bounds* precedences_; + Linear2BoundsFromLinear3* linear3_bounds_; ModelRandomGenerator* random_; SharedStatistics* shared_stats_ = nullptr; const int watcher_id_; diff --git a/ortools/sat/linear_relaxation.cc b/ortools/sat/linear_relaxation.cc index 2a96fdb1f7..0bd15c832a 100644 --- a/ortools/sat/linear_relaxation.cc +++ b/ortools/sat/linear_relaxation.cc @@ -699,8 +699,8 @@ std::optional DetectMakespanFromPrecedences( } std::vector output; - auto* precedences = model->GetOrCreate(); - precedences->ComputeFullPrecedences(end_vars, &output); + auto* evaluator = model->GetOrCreate(); + evaluator->ComputeFullPrecedences(end_vars, &output); for (const auto& p : output) { // TODO(user): What if we have more than one candidate makespan ? if (p.indices.size() != ends.size()) continue; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 562795dd3c..66fd26ee3a 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -53,53 +54,103 @@ namespace operations_research { namespace sat { -bool PrecedenceRelations::AddBounds(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub) { +std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, + IntegerValue lb, + IntegerValue ub) { + const IntegerValue zero_level_lb = integer_trail_->LevelZeroLowerBound(expr); + const IntegerValue zero_level_ub = integer_trail_->LevelZeroUpperBound(expr); + if (lb <= zero_level_lb && ub >= zero_level_ub) { + return {false, false}; + } + // Don't store one of the bounds if it is trivial. + if (lb <= zero_level_lb) { + lb = kMinIntegerValue; + } + if (ub >= zero_level_ub) { + ub = kMaxIntegerValue; + } expr.CanonicalizeAndUpdateBounds(lb, ub); + const auto [status_lb, status_ub] = root_level_relations_.Add(expr, lb, ub); + const bool lb_restricted = + status_lb == BestBinaryRelationBounds::AddResult::ADDED || + status_lb == BestBinaryRelationBounds::AddResult::UPDATED; + const bool ub_restricted = + status_ub == BestBinaryRelationBounds::AddResult::ADDED || + status_ub == BestBinaryRelationBounds::AddResult::UPDATED; + if (!lb_restricted && !ub_restricted) return {false, false}; + + ++num_updates_; + + if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { + // +2 to handle possible negation. + const int new_size = + std::max(expr.vars[0].value(), expr.vars[1].value()) + 2; + if (new_size > coeff_one_var_lookup_.size()) { + coeff_one_var_lookup_.resize(new_size); + } + if (status_lb == BestBinaryRelationBounds::AddResult::ADDED) { + // First time added to root_level_relations_. + coeff_one_var_lookup_[NegationOf(expr.vars[0])].push_back( + NegationOf(expr.vars[1])); + coeff_one_var_lookup_[NegationOf(expr.vars[1])].push_back( + NegationOf(expr.vars[0])); + } + if (status_ub == BestBinaryRelationBounds::AddResult::ADDED) { + coeff_one_var_lookup_[expr.vars[0]].push_back(expr.vars[1]); + coeff_one_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); + } + } + + return {lb_restricted, ub_restricted}; +} + +IntegerValue RootLevelLinear2Bounds::LevelZeroUpperBound( + LinearExpression2 expr) const { + // TODO(user): Remove the expression from the root_level_relations_ if the + // zero-level bound got more restrictive. + return std::min(integer_trail_->LevelZeroUpperBound(expr), + root_level_relations_.GetUpperBound(expr)); +} + +RootLevelLinear2Bounds::~RootLevelLinear2Bounds() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"RootLevelLinear2Bounds/num_updates", num_updates_}); + shared_stats_->AddStats(stats); +} + +RelationStatus RootLevelLinear2Bounds::GetLevelZeroStatus( + LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const { + expr.SimpleCanonicalization(); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { - // This class handles only binary relationships, let something else handle - // the case where there is actually a single variable. - return false; + return RelationStatus::IS_UNKNOWN; } + const IntegerValue known_ub = LevelZeroUpperBound(expr); + expr.Negate(); + const IntegerValue known_lb = -LevelZeroUpperBound(expr); + if (lb <= known_lb && ub >= known_ub) return RelationStatus::IS_TRUE; + if (lb > known_ub || ub < known_lb) return RelationStatus::IS_FALSE; - // Add to root_relations_. - // - // TODO(user): AddInternal() only returns true if this is the first relation - // between head and tail. But we can still avoid an extra lookup. - const bool add_ub = ub < LevelZeroUpperBound(expr); - LinearExpression2 expr_for_lb = expr; - expr_for_lb.Negate(); - const bool add_lb = lb > -LevelZeroUpperBound(expr_for_lb); - if (!add_ub && !add_lb) { - return false; - } - - if (add_ub) { - AddInternal(expr, ub); - } - if (add_lb) { - AddInternal(expr_for_lb, -lb); - } - - // If we are not built, make sure there is enough room in the graph. - // TODO(user): Alternatively, force caller to do a Resize(). - const int max_node = - std::max(PositiveVariable(expr.vars[0]), PositiveVariable(expr.vars[1])) - .value() + - 1; - if (!is_built_ && max_node >= graph_.num_nodes()) { - graph_.AddNode(max_node); - } - return true; + return RelationStatus::IS_UNKNOWN; } -bool PrecedenceRelations::AddUpperBound(LinearExpression2 expr, - IntegerValue ub) { - return AddBounds(expr, kMinIntegerValue, ub); +IntegerValue RootLevelLinear2Bounds::GetUpperBoundNoTrail( + LinearExpression2 expr) const { + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK(expr.IsCanonicalized()); + return root_level_relations_.UpperBoundWhenCanonicalized(expr); } -void PrecedenceRelations::PushConditionalRelation( +EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { + if (!VLOG_IS_ON(1)) return; + std::vector> stats; + stats.push_back({"EnforcedLinear2Bounds/num_conditional_relation_updates", + num_conditional_relation_updates_}); + shared_stats_->AddStats(stats); +} + +void EnforcedLinear2Bounds::PushConditionalRelation( absl::Span enforcements, LinearExpression2 expr, IntegerValue rhs) { expr.SimpleCanonicalization(); @@ -115,21 +166,16 @@ void PrecedenceRelations::PushConditionalRelation( } if (enforcements.empty() || trail_->CurrentDecisionLevel() == 0) { - AddUpperBound(expr, rhs); + root_level_bounds_->AddUpperBound(expr, rhs); return; } const IntegerValue gcd = expr.DivideByGcd(); rhs = FloorRatio(rhs, gcd); - // Ignore if no better than best_relations, otherwise increase it. - { - const auto [it, inserted] = best_relations_.insert({expr, rhs}); - if (!inserted) { - if (rhs >= it->second) return; // Ignore. - it->second = rhs; - } - } + if (rhs >= root_level_bounds_->LevelZeroUpperBound(expr)) return; + + ++num_conditional_relation_updates_; const int new_index = conditional_stack_.size(); const auto [it, inserted] = conditional_relations_.insert({expr, new_index}); @@ -140,17 +186,15 @@ void PrecedenceRelations::PushConditionalRelation( if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { const int new_size = std::max(expr.vars[0].value(), expr.vars[1].value()) + 1; - if (new_size > conditional_after_.size()) { - conditional_after_.resize(new_size); + if (new_size > conditional_var_lookup_.size()) { + conditional_var_lookup_.resize(new_size); } - conditional_after_[expr.vars[0]].push_back(NegationOf(expr.vars[1])); - conditional_after_[expr.vars[1]].push_back(NegationOf(expr.vars[0])); + conditional_var_lookup_[expr.vars[0]].push_back(expr.vars[1]); + conditional_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); } } else { - // We should only decrease because we ignored entry worse than the one in - // best_relations_. const int prev_entry = it->second; - DCHECK_LT(rhs, conditional_stack_[prev_entry].rhs); + if (rhs >= conditional_stack_[prev_entry].rhs) return; // Update. it->second = new_index; @@ -159,7 +203,7 @@ void PrecedenceRelations::PushConditionalRelation( } } -void PrecedenceRelations::CreateLevelEntryIfNeeded() { +void EnforcedLinear2Bounds::CreateLevelEntryIfNeeded() { const int current = trail_->CurrentDecisionLevel(); if (!level_to_stack_size_.empty() && level_to_stack_size_.back().first == current) @@ -168,7 +212,7 @@ void PrecedenceRelations::CreateLevelEntryIfNeeded() { } // We only pop what is needed. -void PrecedenceRelations::SetLevel(int level) { +void EnforcedLinear2Bounds::SetLevel(int level) { while (!level_to_stack_size_.empty() && level_to_stack_size_.back().first > level) { const int target = level_to_stack_size_.back().second; @@ -177,18 +221,16 @@ void PrecedenceRelations::SetLevel(int level) { const ConditionalEntry& back = conditional_stack_.back(); if (back.prev_entry != -1) { conditional_relations_[back.key] = back.prev_entry; - UpdateBestRelation(back.key, conditional_stack_[back.prev_entry].rhs); } else { - UpdateBestRelation(back.key, kMaxIntegerValue); conditional_relations_.erase(back.key); if (back.key.coeffs[0] == 1 && back.key.coeffs[1] == 1) { - DCHECK_EQ(conditional_after_[back.key.vars[0]].back(), - NegationOf(back.key.vars[1])); - DCHECK_EQ(conditional_after_[back.key.vars[1]].back(), - NegationOf(back.key.vars[0])); - conditional_after_[back.key.vars[0]].pop_back(); - conditional_after_[back.key.vars[1]].pop_back(); + DCHECK_EQ(conditional_var_lookup_[back.key.vars[0]].back(), + back.key.vars[1]); + DCHECK_EQ(conditional_var_lookup_[back.key.vars[1]].back(), + back.key.vars[0]); + conditional_var_lookup_[back.key.vars[0]].pop_back(); + conditional_var_lookup_[back.key.vars[1]].pop_back(); } } conditional_stack_.pop_back(); @@ -197,18 +239,7 @@ void PrecedenceRelations::SetLevel(int level) { } } -IntegerValue PrecedenceRelations::LevelZeroUpperBound( - LinearExpression2 expr) const { - expr.SimpleCanonicalization(); - const IntegerValue gcd = expr.DivideByGcd(); - const auto it = root_relations_.find(expr); - if (it != root_relations_.end()) { - return CapProdI(it->second, gcd); - } - return kMaxIntegerValue; -} - -void PrecedenceRelations::AddReasonForUpperBoundLowerThan( +void EnforcedLinear2Bounds::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* /*unused*/) const { @@ -231,22 +262,58 @@ void PrecedenceRelations::AddReasonForUpperBoundLowerThan( } } -IntegerValue PrecedenceRelations::UpperBound(LinearExpression2 expr) const { +IntegerValue EnforcedLinear2Bounds::UpperBound(LinearExpression2 expr) const { expr.SimpleCanonicalization(); const IntegerValue gcd = expr.DivideByGcd(); - const auto it = best_relations_.find(expr); - if (it != best_relations_.end()) { - return CapProdI(gcd, it->second); + + const auto it = conditional_relations_.find(expr); + if (it != conditional_relations_.end()) { + const ConditionalEntry& entry = conditional_stack_[it->second]; + if (DEBUG_MODE) { + for (const Literal l : entry.enforcements) { + CHECK(trail_->Assignment().LiteralIsTrue(l)); + } + } + DCHECK_LT(entry.rhs, root_level_bounds_->LevelZeroUpperBound(expr)); + return CapProdI(gcd, entry.rhs); } - DCHECK(!root_relations_.contains(expr)); - DCHECK(!conditional_relations_.contains(expr)); - return kMaxIntegerValue; + return CapProdI(gcd, root_level_bounds_->LevelZeroUpperBound(expr)); } -void PrecedenceRelations::Build() { +IntegerValue EnforcedLinear2Bounds::GetUpperBoundFromEnforced( + LinearExpression2 expr) const { + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK(expr.IsCanonicalized()); + const auto it = conditional_relations_.find(expr); + if (it == conditional_relations_.end()) { + return kMaxIntegerValue; + } else { + const ConditionalEntry& entry = conditional_stack_[it->second]; + if (DEBUG_MODE) { + for (const Literal l : entry.enforcements) { + CHECK(trail_->Assignment().LiteralIsTrue(l)); + } + } + DCHECK_LT(entry.rhs, root_level_bounds_->LevelZeroUpperBound(expr)); + return entry.rhs; + } +} + +void TransitivePrecedencesEvaluator::Build() { if (is_built_) return; is_built_ = true; + const std::vector> + root_relations_sorted = root_level_bounds_->GetSortedNonTrivialBounds(); + int max_node = 0; + for (const auto [expr, _] : root_relations_sorted) { + max_node = std::max(max_node, PositiveVariable(expr.vars[0]).value()); + max_node = std::max(max_node, PositiveVariable(expr.vars[1]).value()); + } + max_node++; + if (max_node >= graph_.num_nodes()) { + graph_.AddNode(max_node); + } const int num_nodes = graph_.num_nodes(); util_intops::StrongVector> before(num_nodes); @@ -254,15 +321,12 @@ void PrecedenceRelations::Build() { // We will construct a graph with the current relation from all_relations_. // And use this to compute the "closure". CHECK(arc_offsets_.empty()); - graph_.ReserveArcs(2 * root_relations_.size()); - std::vector> root_relations_sorted( - root_relations_.begin(), root_relations_.end()); - std::sort(root_relations_sorted.begin(), root_relations_sorted.end()); + graph_.ReserveArcs(2 * root_relations_sorted.size()); for (const auto [var_pair, negated_offset] : root_relations_sorted) { // TODO(user): Support negative offset? // // Note that if we only have >= 0 ones, if we do have a cycle, we could - // make sure all variales are the same, and otherwise, we have a DAG or a + // make sure all variables are the same, and otherwise, we have a DAG or a // conflict. const IntegerValue offset = -negated_offset; if (offset < 0) continue; @@ -334,19 +398,18 @@ void PrecedenceRelations::Build() { const IntegerValue arc_offset = arc_offsets_[arc]; if (++work > kWorkLimit) break; - if (AddInternal(LinearExpression2::Difference(tail_var, head_var), - -arc_offset)) { + if (root_level_bounds_->AddUpperBound( + LinearExpression2::Difference(tail_var, head_var), -arc_offset)) { before[head_var].push_back(tail_var); } for (const IntegerVariable before_var : before[tail_var]) { if (++work > kWorkLimit) break; - LinearExpression2 expr_for_key(before_var, tail_var, 1, -1); - expr_for_key.SimpleCanonicalization(); + const LinearExpression2 expr_for_key(before_var, tail_var, 1, -1); const IntegerValue offset = - -root_relations_.at(expr_for_key) + arc_offset; - if (AddInternal(LinearExpression2::Difference(before_var, head_var), - -offset)) { + -root_level_bounds_->LevelZeroUpperBound(expr_for_key) + arc_offset; + if (root_level_bounds_->AddUpperBound( + LinearExpression2::Difference(before_var, head_var), -offset)) { before[head_var].push_back(before_var); } } @@ -354,10 +417,10 @@ void PrecedenceRelations::Build() { } VLOG(2) << "Full precedences. Work=" << work - << " Relations=" << root_relations_.size(); + << " Relations=" << root_level_bounds_->num_bounds(); } -void PrecedenceRelations::ComputeFullPrecedences( +void TransitivePrecedencesEvaluator::ComputeFullPrecedences( absl::Span vars, std::vector* output) { output->clear(); @@ -451,12 +514,10 @@ void PrecedenceRelations::ComputeFullPrecedences( } } -void PrecedenceRelations::CollectPrecedences( +void EnforcedLinear2Bounds::CollectPrecedences( absl::Span vars, std::vector* output) { - // +1 for the negation. - const int needed_size = - std::max(after_.size(), conditional_after_.size()) + 1; + const int needed_size = integer_trail_->NumIntegerVariables().value(); var_to_degree_.resize(needed_size); var_to_last_index_.resize(needed_size); var_with_positive_degree_.resize(needed_size); @@ -469,7 +530,8 @@ void PrecedenceRelations::CollectPrecedences( int* var_to_degree = var_to_degree_.data(); int* var_to_last_index = var_to_last_index_.data(); const auto process = [&](int index, absl::Span v) { - for (const IntegerVariable after : v) { + for (const IntegerVariable other : v) { + const IntegerVariable after = NegationOf(other); DCHECK_LT(after, needed_size); if (var_to_degree[after.value()] == 0) { var_with_positive_degree[num_relevants++] = after; @@ -486,11 +548,9 @@ void PrecedenceRelations::CollectPrecedences( for (int index = 0; index < vars.size(); ++index) { const IntegerVariable var = vars[index]; - if (var < after_.size()) { - process(index, after_[var]); - } - if (var < conditional_after_.size()) { - process(index, conditional_after_[var]); + process(index, root_level_bounds_->GetVariablesInSimpleRelation(var)); + if (var < conditional_var_lookup_.size()) { + process(index, conditional_var_lookup_[var]); } } @@ -498,8 +558,9 @@ void PrecedenceRelations::CollectPrecedences( // For that we transform var_to_degree to point to the first position of // each lbvar in the output vector. int start = 0; - for (int i = 0; i < num_relevants; ++i) { - const IntegerVariable var = var_with_positive_degree[i]; + const absl::Span relevant_variables = + absl::MakeSpan(var_with_positive_degree, num_relevants); + for (const IntegerVariable var : relevant_variables) { const int degree = var_to_degree[var.value()]; if (degree > 1) { var_to_degree[var.value()] = start; @@ -520,12 +581,21 @@ void PrecedenceRelations::CollectPrecedences( // Cleanup var_to_degree, note that we don't need to clean // var_to_last_index_. - for (int i = 0; i < num_relevants; ++i) { - const IntegerVariable var = var_with_positive_degree[i]; + for (const IntegerVariable var : relevant_variables) { var_to_degree[var.value()] = 0; } } +std::vector +EnforcedLinear2Bounds::GetAllExpressionsWithConditionalBounds() const { + std::vector result; + result.reserve(conditional_stack_.size()); + for (const auto& entry : conditional_stack_) { + result.push_back(entry.key); + } + return result; +} + namespace { void AppendLowerBoundReasonIfValid(IntegerVariable var, @@ -1531,11 +1601,9 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( return num_added_constraints; } -BinaryRelationsMaps::BinaryRelationsMaps(Model* model) - : integer_trail_(model->GetOrCreate()), - integer_encoder_(model->GetOrCreate()), - watcher_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()) { +ReifiedLinear2Bounds::ReifiedLinear2Bounds(Model* model) + : integer_encoder_(model->GetOrCreate()), + best_root_level_bounds_(model->GetOrCreate()) { int index = 0; model->GetOrCreate()->callbacks.push_back( [index = index, trail = model->GetOrCreate(), this]() mutable { @@ -1552,11 +1620,11 @@ BinaryRelationsMaps::BinaryRelationsMaps(Model* model) // Linear scan. for (const auto [l, expr, ub] : all_reified_relations_) { if (relevant_true_literals.contains(l)) { - AddRelationBounds(expr, kMinIntegerValue, ub); + best_root_level_bounds_->Add(expr, kMinIntegerValue, ub); VLOG(2) << "New fixed precedence: " << expr << " <= " << ub << " (was reified by " << l << ")"; } else if (relevant_true_literals.contains(l.Negated())) { - AddRelationBounds(expr, ub + 1, kMaxIntegerValue); + best_root_level_bounds_->Add(expr, ub + 1, kMaxIntegerValue); VLOG(2) << "New fixed precedence: " << expr << " > " << ub << " (was reified by not(" << l << "))"; } @@ -1565,89 +1633,15 @@ BinaryRelationsMaps::BinaryRelationsMaps(Model* model) }); } -BinaryRelationsMaps::~BinaryRelationsMaps() { +Linear2BoundsFromLinear3::~Linear2BoundsFromLinear3() { if (!VLOG_IS_ON(1)) return; std::vector> stats; - stats.push_back({"BinaryRelationsMaps/num_relations", num_updates_}); stats.push_back( - {"BinaryRelationsMaps/num_affine_updates", num_affine_updates_}); + {"Linear2BoundsFromLinear3/num_affine_updates", num_affine_updates_}); shared_stats_->AddStats(stats); } -IntegerValue BinaryRelationsMaps::GetImpliedUpperBound( - const LinearExpression2& expr) const { - DCHECK_GE(expr.coeffs[0], 0); - DCHECK_GE(expr.coeffs[1], 0); - IntegerValue implied_ub = 0; - for (const int i : {0, 1}) { - if (expr.coeffs[i] > 0) { - implied_ub += expr.coeffs[i] * integer_trail_->UpperBound(expr.vars[i]); - } - } - return implied_ub; -} - -std::pair -BinaryRelationsMaps::GetImpliedLevelZeroBounds( - const LinearExpression2& expr) const { - // Compute the implied bounds on the expression. - IntegerValue implied_lb = 0; - IntegerValue implied_ub = 0; - if (expr.coeffs[0] != 0) { - CHECK_GE(expr.vars[0], 0); - implied_lb += - expr.coeffs[0] * integer_trail_->LevelZeroLowerBound(expr.vars[0]); - implied_ub += - expr.coeffs[0] * integer_trail_->LevelZeroUpperBound(expr.vars[0]); - } - if (expr.coeffs[1] != 0) { - CHECK_GE(expr.vars[1], 0); - implied_lb += - expr.coeffs[1] * integer_trail_->LevelZeroLowerBound(expr.vars[1]); - implied_ub += - expr.coeffs[1] * integer_trail_->LevelZeroUpperBound(expr.vars[1]); - } - - return {implied_lb, implied_ub}; -} - -void BinaryRelationsMaps::AddRelationBounds(LinearExpression2 expr, - IntegerValue lb, IntegerValue ub) { - expr.CanonicalizeAndUpdateBounds(lb, ub); - const auto [implied_lb, implied_ub] = GetImpliedLevelZeroBounds(expr); - lb = std::max(lb, implied_lb); - ub = std::min(ub, implied_ub); - - if (lb > ub) return; // unsat ?? - if (lb == implied_lb && ub == implied_ub) return; // trivially true. - - if (best_root_level_bounds_.Add(expr, lb, ub)) { - // TODO(user): Also push them to a global shared repository after - // remapping IntegerVariable to proto indices. - ++num_updates_; - } -} - -RelationStatus BinaryRelationsMaps::GetLevelZeroStatus(LinearExpression2 expr, - IntegerValue lb, - IntegerValue ub) const { - expr.CanonicalizeAndUpdateBounds(lb, ub); - const auto [implied_lb, implied_ub] = GetImpliedLevelZeroBounds(expr); - lb = std::max(lb, implied_lb); - ub = std::min(ub, implied_ub); - - // Returns directly if the status can be derived from the implied bounds. - if (lb > ub) return RelationStatus::IS_FALSE; - if (lb == implied_lb && ub == implied_ub) return RelationStatus::IS_TRUE; - - // Relax as best_root_level_bounds_.GetStatus() might have older bounds. - if (lb == implied_lb) lb = kMinIntegerValue; - if (ub == implied_ub) ub = kMaxIntegerValue; - - return best_root_level_bounds_.GetStatus(expr, lb, ub); -} - -std::pair BinaryRelationsMaps::FromDifference( +std::pair ReifiedLinear2Bounds::FromDifference( const AffineExpression& a, const AffineExpression& b) const { LinearExpression2 expr; expr.vars[0] = a.var; @@ -1660,17 +1654,18 @@ std::pair BinaryRelationsMaps::FromDifference( return {std::move(expr), ub}; } -RelationStatus BinaryRelationsMaps::GetLevelZeroPrecedenceStatus( +RelationStatus ReifiedLinear2Bounds::GetLevelZeroPrecedenceStatus( AffineExpression a, AffineExpression b) const { const auto [expr, ub] = FromDifference(a, b); - return GetLevelZeroStatus(expr, kMinIntegerValue, ub); + return best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, + ub); } -void BinaryRelationsMaps::AddReifiedPrecedenceIfNonTrivial(Literal l, - AffineExpression a, - AffineExpression b) { +void ReifiedLinear2Bounds::AddReifiedPrecedenceIfNonTrivial( + Literal l, AffineExpression a, AffineExpression b) { const auto [expr, ub] = FromDifference(a, b); - const RelationStatus status = GetLevelZeroStatus(expr, kMinIntegerValue, ub); + const RelationStatus status = + best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status != RelationStatus::IS_UNKNOWN) return; relation_to_lit_.insert({{expr, ub}, l}); @@ -1679,10 +1674,11 @@ void BinaryRelationsMaps::AddReifiedPrecedenceIfNonTrivial(Literal l, all_reified_relations_.push_back({l, expr, ub}); } -LiteralIndex BinaryRelationsMaps::GetReifiedPrecedence(AffineExpression a, - AffineExpression b) { +LiteralIndex ReifiedLinear2Bounds::GetReifiedPrecedence(AffineExpression a, + AffineExpression b) { const auto [expr, ub] = FromDifference(a, b); - const RelationStatus status = GetLevelZeroStatus(expr, kMinIntegerValue, ub); + const RelationStatus status = + best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status == RelationStatus::IS_TRUE) { return integer_encoder_->GetTrueLiteral().Index(); } @@ -1695,16 +1691,32 @@ LiteralIndex BinaryRelationsMaps::GetReifiedPrecedence(AffineExpression a, return it->second; } -bool BinaryRelationsMaps::AddAffineUpperBound(LinearExpression2 expr, - AffineExpression affine_ub) { +Linear2BoundsFromLinear3::Linear2BoundsFromLinear3(Model* model) + : integer_trail_(model->GetOrCreate()), + trail_(model->GetOrCreate()), + watcher_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()), + best_root_level_bounds_(model->GetOrCreate()) {} + +bool Linear2BoundsFromLinear3::AddAffineUpperBound(LinearExpression2 expr, + AffineExpression affine_ub) { const IntegerValue new_ub = integer_trail_->UpperBound(affine_ub); expr.SimpleCanonicalization(); // Not better than trivial upper bound. - if (GetImpliedUpperBound(expr) <= new_ub) return false; + if (integer_trail_->UpperBound(expr) <= new_ub) return false; + + if (trail_->CurrentDecisionLevel() == 0) { + best_root_level_bounds_->Add( + expr, kMinIntegerValue, integer_trail_->LevelZeroUpperBound(affine_ub)); + NotifyWatchingPropagators(); + return false; + } // Not better than the root level upper bound. - if (best_root_level_bounds_.GetUpperBound(expr) <= new_ub) return false; + if (best_root_level_bounds_->LevelZeroUpperBound(expr) <= new_ub) { + return false; + } const IntegerValue gcd = expr.DivideByGcd(); @@ -1731,18 +1743,19 @@ bool BinaryRelationsMaps::AddAffineUpperBound(LinearExpression2 expr, return true; } -void BinaryRelationsMaps::NotifyWatchingPropagators() const { +void Linear2BoundsFromLinear3::NotifyWatchingPropagators() const { for (const int id : propagator_ids_) { watcher_->CallOnNextPropagate(id); } } -IntegerValue BinaryRelationsMaps::UpperBound(LinearExpression2 expr) const { +IntegerValue Linear2BoundsFromLinear3::UpperBound( + LinearExpression2 expr) const { expr.SimpleCanonicalization(); - const IntegerValue trivial_ub = GetImpliedUpperBound(expr); + const IntegerValue trivial_ub = integer_trail_->UpperBound(expr); const IntegerValue root_level_ub = - best_root_level_bounds_.GetUpperBound(expr); + best_root_level_bounds_->LevelZeroUpperBound(expr); const IntegerValue best_ub = std::min(root_level_ub, trivial_ub); const IntegerValue gcd = expr.DivideByGcd(); @@ -1757,8 +1770,21 @@ IntegerValue BinaryRelationsMaps::UpperBound(LinearExpression2 expr) const { } } +IntegerValue Linear2BoundsFromLinear3::GetUpperBoundFromLinear3( + LinearExpression2 expr) const { + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK(expr.IsCanonicalized()); + const auto it = best_affine_ub_.find(expr); + if (it == best_affine_ub_.end()) { + return kMaxIntegerValue; + } else { + const auto [affine, divisor] = it->second; + return FloorRatio(integer_trail_->UpperBound(affine), divisor); + } +} + // TODO(user): If the trivial bound is better, its explanation is different... -void BinaryRelationsMaps::AddReasonForUpperBoundLowerThan( +void Linear2BoundsFromLinear3::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* /*literal_reason*/, std::vector* integer_reason) const { @@ -1767,10 +1793,10 @@ void BinaryRelationsMaps::AddReasonForUpperBoundLowerThan( if (expr.coeffs[0] == 0 && expr.coeffs[1] == 0) return; // trivially zero // Starts by simple bounds. - if (best_root_level_bounds_.GetUpperBound(expr) <= ub) return; + if (best_root_level_bounds_->LevelZeroUpperBound(expr) <= ub) return; // Add explanation if it is a trivial bound. - const IntegerValue implied_ub = GetImpliedUpperBound(expr); + const IntegerValue implied_ub = integer_trail_->UpperBound(expr); if (implied_ub <= ub) { const IntegerValue slack = ub - implied_ub; expr.Negate(); // AppendRelaxedLinearReason() explains a lower bound. @@ -1798,7 +1824,7 @@ void BinaryRelationsMaps::AddReasonForUpperBoundLowerThan( } std::vector -BinaryRelationsMaps::GetAllExpressionsWithAffineBounds() const { +Linear2BoundsFromLinear3::GetAllExpressionsWithAffineBounds() const { std::vector result; for (const auto [expr, info] : best_affine_ub_) { result.push_back(expr); @@ -1806,5 +1832,47 @@ BinaryRelationsMaps::GetAllExpressionsWithAffineBounds() const { return result; } +IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0) { + return integer_trail_->UpperBound(expr); + } + DCHECK_NE(expr.coeffs[1], 0); + const IntegerValue gcd = expr.DivideByGcd(); + IntegerValue ub = integer_trail_->UpperBound(expr); + ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(expr)); + ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(expr)); + ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); + return CapProdI(gcd, ub); +} + +void Linear2Bounds::AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* integer_reason) const { + if (root_level_bounds_->LevelZeroUpperBound(expr) <= ub) { + return; + } + if (enforced_bounds_->UpperBound(expr) <= ub) { + enforced_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, literal_reason, + integer_reason); + } else { + linear3_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, literal_reason, + integer_reason); + } +} + +std::vector +Linear2Bounds::GetAllExpressionsWithPotentialNonTrivialBounds() const { + std::vector result = + enforced_bounds_->GetAllExpressionsWithConditionalBounds(); + std::vector binary_relations_result = + linear3_bounds_->GetAllExpressionsWithAffineBounds(); + result.insert(result.end(), binary_relations_result.begin(), + binary_relations_result.end()); + gtl::STLSortAndRemoveDuplicates(&result); + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 4ded3dbc4d..57e75abf64 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -18,10 +18,13 @@ #include #include #include +#include #include #include +#include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/types/span.h" @@ -47,51 +50,82 @@ struct FullIntegerPrecedence { std::vector offsets; }; -// Stores all the precedences relation of the form "a*x + b*y <= ub" -// that we could extract from the linear constraint of the model. These are -// stored in a directed graph. +class RootLevelLinear2Bounds { + public: + explicit RootLevelLinear2Bounds(Model* model) + : integer_trail_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()) {} + + ~RootLevelLinear2Bounds(); + + // Add a relation lb <= expr <= ub. If expr is not a proper linear2 expression + // (e.g. 0*x + y, y + y, y - y) it will be ignored. + // Returns a pair saying whether the lower/upper bounds for this expr became + // more restricted than what was currently stored. + std::pair Add(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub); + + // Same as above, but only update the upper bound. + bool AddUpperBound(LinearExpression2 expr, IntegerValue ub) { + return Add(expr, kMinIntegerValue, ub).second; + } + + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const; + + int64_t num_bounds() const { return root_level_relations_.num_bounds(); } + + // Return a list of (expr <= ub) sorted by expr. + std::vector> + GetSortedNonTrivialBounds() const { + return root_level_relations_.GetSortedNonTrivialBounds(); + } + + // For a given variable `var`, return all variables `other` so that + // LinearExpression2(var, other, 1, 1) has a non trivial upper bound. + // Note that using negation one can also recover x + y >= lb and x - y <= ub. + absl::Span GetVariablesInSimpleRelation( + IntegerVariable var) const { + if (var >= coeff_one_var_lookup_.size()) return {}; + return coeff_one_var_lookup_[var]; + } + + RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, + IntegerValue ub) const; + + // Low-level function that returns the zero-level upper bound if it is + // non-trivial. Otherwise returns kMaxIntegerValue. This is a different + // behavior from LevelZeroUpperBound() that would return the implied + // zero-level bound from the trail for trivial ones. `expr` must be + // canonicalized and gcd-reduced. + IntegerValue GetUpperBoundNoTrail(LinearExpression2 expr) const; + + private: + IntegerTrail* integer_trail_; + SharedStatistics* shared_stats_; + + // Lookup table to find all the LinearExpression2 with a given variable and + // having both coefficient 1. + util_intops::StrongVector> + coeff_one_var_lookup_; + + // TODO(user): Also push them to a global shared repository after + // remapping IntegerVariable to proto indices. + BestBinaryRelationBounds root_level_relations_; + int64_t num_updates_ = 0; +}; + +// This class is used to compute the transitive closure of the level-zero +// precedence relations. // // TODO(user): Support conditional relation. // TODO(user): Support non-DAG like graph. // TODO(user): Support variable offset that can be updated as search progress. -class PrecedenceRelations : public ReversibleInterface { +class TransitivePrecedencesEvaluator { public: - explicit PrecedenceRelations(Model* model) - : params_(*model->GetOrCreate()), - trail_(model->GetOrCreate()), - integer_trail_(model->GetOrCreate()) { - integer_trail_->RegisterReversibleClass(this); - } - - void Resize(int num_variables) { - graph_.ReserveNodes(num_variables); - graph_.AddNode(num_variables - 1); - } - - // Add a relation lb <= expr <= ub. If expr is not a proper linear2 expression - // (e.g. 0*x + y, y + y, y - y) it will be ignored. Returns true if it was - // added and is considered "new". - bool AddBounds(LinearExpression2 expr, IntegerValue lb, IntegerValue ub); - - // Same as above, but only for the upper bound. - bool AddUpperBound(LinearExpression2 expr, IntegerValue ub); - - // Adds add relation (enf => expr <= rhs) that is assumed to be true at - // the current level. - // - // It will be automatically reverted via the SetLevel() functions that is - // called before any integer propagations trigger. - // - // This is assumed to be called when a relation becomes true (enforcement are - // assigned) and when it becomes false in reverse order (CHECKed). - // - // If expr is not a proper linear2 expression (e.g. 0*x + y, y + y, y - y) it - // will be ignored. - void PushConditionalRelation(absl::Span enforcements, - LinearExpression2 expr, IntegerValue rhs); - - // Called each time we change decision level. - void SetLevel(int level) final; + explicit TransitivePrecedencesEvaluator(Model* model) + : integer_trail_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()), + root_level_bounds_(model->GetOrCreate()) {} // Returns a set of relations var >= max_i(vars[index[i]] + offsets[i]). // @@ -112,13 +146,76 @@ class PrecedenceRelations : public ReversibleInterface { void ComputeFullPrecedences(absl::Span vars, std::vector* output); - // Returns a set of precedences (var, index) such that var is after - // vars[index]. All entries for the same variable will be contiguous and - // sorted by index. We only list variable with at least two entries. The - // offset can be retrieved via UpperBound(vars[index], var). + // The current code requires the internal data to be processed once all + // root-level relations are loaded. // - // For more efficiency, this method ignores all linear2 expressions with any - // coefficient different from 1. + // If we don't have too many variable, we compute the full transitive closure + // and then push back to RootLevelLinear2Bounds if there is a relation between + // two variables. This can be used to optimize some scheduling propagation and + // reasons. + // + // Warning: If there are too many, this will NOT contain all relations. + // + // Returns kMaxIntegerValue if there are none, otherwise return an upper bound + // such that expr <= ub. + // + // TODO(user): Be more dynamic as we start to add relations during search. + void Build(); + + private: + IntegerTrail* integer_trail_; + SharedStatistics* shared_stats_; + RootLevelLinear2Bounds* root_level_bounds_; + + util::StaticGraph<> graph_; + std::vector arc_offsets_; + + bool is_built_ = false; + bool is_dag_ = false; + std::vector topological_order_; +}; + +// Stores all the precedences relation of the form "{lits} => a*x + b*y <= ub" +// that we could extract from the model. +class EnforcedLinear2Bounds : public ReversibleInterface { + public: + explicit EnforcedLinear2Bounds(Model* model) + : params_(*model->GetOrCreate()), + trail_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()), + root_level_bounds_(model->GetOrCreate()), + shared_stats_(model->GetOrCreate()) { + integer_trail_->RegisterReversibleClass(this); + } + + ~EnforcedLinear2Bounds() override; + + // Adds add relation (enf => expr <= rhs) that is assumed to be true at + // the current level. + // + // It will be automatically reverted via the SetLevel() functions that is + // called before any integer propagations trigger. + // + // This is assumed to be called when a relation becomes true (enforcement are + // assigned) and when it becomes false in reverse order (CHECKed). + // + // If expr is not a proper linear2 expression (e.g. 0*x + y, y + y, y - y) it + // will be ignored. + void PushConditionalRelation(absl::Span enforcements, + LinearExpression2 expr, IntegerValue rhs); + + // Called each time we change decision level. + void SetLevel(int level) final; + + // Returns a set of precedences (var, index) such that we have a relation + // of the form var[index] <= var + offset. + // + // All entries for the same variable will be contiguous and sorted by index. + // We only list variable with at least two entries. The offset can be + // retrieved via Linear2Bounds::UpperBound(Difference(vars[index]), var)). + // + // This method currently ignores all linear2 expressions with any coefficient + // different from 1. struct PrecedenceData { IntegerVariable var; int index; @@ -126,15 +223,9 @@ class PrecedenceRelations : public ReversibleInterface { void CollectPrecedences(absl::Span vars, std::vector* output); - // If we don't have too many variable, we compute the full transitive closure - // and can query in O(1) if there is a relation between two variables. - // This can be used to optimize some scheduling propagation and reasons. - // - // Warning: If there are too many, this will NOT contain all relations. - // - // Returns kMaxIntegerValue if there are none, otherwise return an upper bound - // such that expr <= ub. - IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const; + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const { + return root_level_bounds_->LevelZeroUpperBound(expr); + } // Returns the maximum value for expr, and the reason for it (all // true). Note that we always check LevelZeroUpperBound() so if it is better, @@ -144,76 +235,32 @@ class PrecedenceRelations : public ReversibleInterface { // which happen less often, so we don't mind doing two hash lookups, and we // really want to optimize the UpperBound() instead. // - // Important: This doesn't contains the transitive closure. - // Important: The span is only valid in a narrow scope. + // NOTE: most users will want to call Linear2Bounds::UpperBound() instead. IntegerValue UpperBound(LinearExpression2 expr) const; + // Low-level function that returns the upper bound if there is some enforced + // relations only. Otherwise always returns kMaxIntegerValue. + // `expr` must be canonicalized and gcd-reduced. + IntegerValue GetUpperBoundFromEnforced(LinearExpression2 expr) const; + void AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const; - // The current code requires the internal data to be processed once all - // relations are loaded. - // - // TODO(user): Be more dynamic as we start to add relations during search. - void Build(); + // Note: might contain duplicate expressions. + std::vector GetAllExpressionsWithConditionalBounds() const; private: void CreateLevelEntryIfNeeded(); - // expr <= ub. - bool AddInternal(LinearExpression2 expr, IntegerValue ub) { - expr.SimpleCanonicalization(); - if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { - return false; - } - const auto [it, inserted] = root_relations_.insert({expr, ub}); - UpdateBestRelationIfBetter(expr, ub); - if (inserted) { - if (expr.coeffs[0] != 1 || expr.coeffs[1] != 1) { - return true; - } - const int new_size = - std::max(expr.vars[0].value(), expr.vars[1].value()) + 1; - if (new_size > after_.size()) after_.resize(new_size); - after_[expr.vars[0]].push_back(NegationOf(expr.vars[1])); - after_[expr.vars[1]].push_back(NegationOf(expr.vars[0])); - return true; - } - it->second = std::min(it->second, ub); - return false; - } - - void UpdateBestRelationIfBetter(LinearExpression2 expr, IntegerValue rhs) { - const auto [it, inserted] = best_relations_.insert({expr, rhs}); - if (!inserted) { - it->second = std::min(it->second, rhs); - } - } - - void UpdateBestRelation(LinearExpression2 expr, IntegerValue rhs) { - const auto it = root_relations_.find(expr); - if (it != root_relations_.end()) { - rhs = std::min(rhs, it->second); - } - if (rhs == kMaxIntegerValue) { - best_relations_.erase(expr); - } else { - best_relations_[expr] = rhs; - } - } - const SatParameters& params_; Trail* trail_; IntegerTrail* integer_trail_; + RootLevelLinear2Bounds* root_level_bounds_; + SharedStatistics* shared_stats_; - util::StaticGraph<> graph_; - std::vector arc_offsets_; - - bool is_built_ = false; - bool is_dag_ = false; - std::vector topological_order_; + int64_t num_conditional_relation_updates_ = 0; // Conditional stack for push/pop of conditional relations. // @@ -234,21 +281,13 @@ class PrecedenceRelations : public ReversibleInterface { // This is always stored in the form (expr <= rhs). // The conditional relations contains indices in the conditional_stack_. - absl::flat_hash_map root_relations_; absl::flat_hash_map conditional_relations_; - // Contains std::min() of the offset from root_relations_ and - // conditional_relations_. - absl::flat_hash_map best_relations_; - // Store for each variable x, the variables y that appears alongside it in - // LevelZeroUpperBound(expr) or UpperBound(expr). That is the variable - // that are after x with an offset. Note that conditional_after_ is updated on + // lit => x + y <= ub. Note that conditional_var_lookup_ is updated on // dive/backtrack. util_intops::StrongVector> - after_; - util_intops::StrongVector> - conditional_after_; + conditional_var_lookup_; // Temp data for CollectPrecedences. std::vector var_with_positive_degree_; @@ -257,6 +296,304 @@ class PrecedenceRelations : public ReversibleInterface { std::vector tmp_precedences_; }; +// Similar to AffineExpression, but with a zero constant. +// If coeff is zero, then this is always zero and var is ignored. +struct LinearTerm { + LinearTerm() = default; + LinearTerm(IntegerVariable v, IntegerValue c) : var(v), coeff(c) {} + + void MakeCoeffPositive() { + if (coeff < 0) { + coeff = -coeff; + var = NegationOf(var); + } + } + + bool operator==(const LinearTerm& other) const { + return var == other.var && coeff == other.coeff; + } + + IntegerVariable var = kNoIntegerVariable; + IntegerValue coeff = IntegerValue(0); +}; + +// A relation of the form enforcement => a + b \in [lhs, rhs]. +// Note that the [lhs, rhs] interval should always be within [min_activity, +// max_activity] where the activity is the value of a + b. +struct Relation { + Literal enforcement; + LinearTerm a; + LinearTerm b; + IntegerValue lhs; + IntegerValue rhs; + + bool operator==(const Relation& other) const { + return enforcement == other.enforcement && a == other.a && b == other.b && + lhs == other.lhs && rhs == other.rhs; + } +}; + +// A repository of all the enforced linear constraints of size 1 or 2, and of +// all the non-enforced linear constraints of size 2. +// +// TODO(user): This is not always needed, find a way to clean this once we +// don't need it. +class BinaryRelationRepository { + public: + int size() const { return relations_.size(); } + + // The returned relation is guaranteed to only have positive variables. + const Relation& relation(int index) const { return relations_[index]; } + + // Returns the indices of the relations that are enforced by the given + // literal. + absl::Span IndicesOfRelationsEnforcedBy(LiteralIndex lit) const { + if (lit >= lit_to_relations_.size()) return {}; + return lit_to_relations_[lit]; + } + + // Returns the indices of the non-enforced relations that contain the given + // (positive) variable. + absl::Span IndicesOfRelationsContaining( + IntegerVariable var) const { + if (var >= var_to_relations_.size()) return {}; + return var_to_relations_[var]; + } + + // Returns the indices of the non-enforced relations that contain the given + // (positive) variables. + absl::Span IndicesOfRelationsBetween(IntegerVariable var1, + IntegerVariable var2) const { + if (var1 > var2) std::swap(var1, var2); + const std::pair key(var1, var2); + const auto it = var_pair_to_relations_.find(key); + if (it == var_pair_to_relations_.end()) return {}; + return it->second; + } + + // Adds a conditional relation lit => a + b \in [lhs, rhs] (one of the terms + // can be zero), or an always true binary relation a + b \in [lhs, rhs] (both + // terms must be non-zero). + void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, + IntegerValue rhs); + + // Adds a partial conditional relation between two variables, with unspecified + // coefficients and bounds. + void AddPartialRelation(Literal lit, IntegerVariable a, IntegerVariable b); + + // Builds the literal to relations mapping. This should be called once all the + // relations have been added. + void Build(); + + // Assuming level-zero bounds + any (var >= value) in the input map, + // fills "output" with a "propagated" set of bounds assuming lit is true (by + // using the relations enforced by lit, as well as the non-enforced ones). + // Note that we will only fill bounds > level-zero ones in output. + // + // Returns false if the new bounds are infeasible at level zero. + // + // Important: by default this does not call output->clear() so we can take + // the max with already inferred bounds. + bool PropagateLocalBounds( + const IntegerTrail& integer_trail, Literal lit, + const absl::flat_hash_map& input, + absl::flat_hash_map* output) const; + + private: + bool is_built_ = false; + int num_enforced_relations_ = 0; + std::vector relations_; + CompactVectorVector lit_to_relations_; + CompactVectorVector var_to_relations_; + absl::flat_hash_map, + std::vector> + var_pair_to_relations_; +}; + +// Class that keeps the best upper bound for a*x + b*y by using all the linear3 +// relations of the form a*x + b*y + c*z <= d. +class Linear2BoundsFromLinear3 { + public: + explicit Linear2BoundsFromLinear3(Model* model); + ~Linear2BoundsFromLinear3(); + + // If the given upper bound evaluate better than the current one we have, this + // will replace it and returns true, otherwise it returns false. + // + // Note that we never store trivial upper bound (using the current variable + // domain). + bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); + + // Returns the best known upper-bound of the given LinearExpression2 at the + // current decision level. If its explanation is needed, it can be queried + // with the second function. + // + // NOTE: most users will want to call Linear2Bounds::UpperBound() instead. + IntegerValue UpperBound(LinearExpression2 expr) const; + void AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* integer_reason) const; + + // Warning, the order will not be deterministic. + std::vector GetAllExpressionsWithAffineBounds() const; + + int NumExpressionsWithAffineBounds() const { return best_affine_ub_.size(); } + + void WatchAllLinearExpressions2(int id) { propagator_ids_.insert(id); } + + // Low-level function that returns the upper bound only if there is some + // relations coming from a linear3. Otherwise always returns kMaxIntegerValue. + // `expr` must be canonicalized and gcd-reduced. + IntegerValue GetUpperBoundFromLinear3(LinearExpression2 expr) const; + + private: + void NotifyWatchingPropagators() const; + + IntegerTrail* integer_trail_; + Trail* trail_; + GenericLiteralWatcher* watcher_; + SharedStatistics* shared_stats_; + RootLevelLinear2Bounds* best_root_level_bounds_; + + int64_t num_affine_updates_ = 0; + + // This stores linear2 <= AffineExpression / divisor. + // + // Note(user): This is a "cheap way" to not have to deal with backtracking, If + // we have many possible AffineExpression that bounds a LinearExpression2, we + // keep the best one during "search dive" but on backtrack we might have a + // sub-optimal relation. + absl::flat_hash_map> + best_affine_ub_; + + absl::btree_set propagator_ids_; +}; + +// TODO(user): Merge with BinaryRelationRepository. Note that this one provides +// different indexing though, so it could be kept separate. +// TODO(user): Use LinearExpression2 instead of pairs of AffineExpression for +// consistency with other classes. +class ReifiedLinear2Bounds { + public: + explicit ReifiedLinear2Bounds(Model* model); + + // Return the status of a <= b; + RelationStatus GetLevelZeroPrecedenceStatus(AffineExpression a, + AffineExpression b) const; + + // Register the fact that l <=> ( a <= b ). + // These are considered equivalence relation. + void AddReifiedPrecedenceIfNonTrivial(Literal l, AffineExpression a, + AffineExpression b); + + // Returns kNoLiteralIndex if we don't have a literal <=> ( a <= b ), or + // returns that literal if we have one. Note that we will return the + // true/false literal if the status is known at level zero. + LiteralIndex GetReifiedPrecedence(AffineExpression a, AffineExpression b); + + private: + // Return the pair (a - b) <= rhs. + std::pair FromDifference( + const AffineExpression& a, const AffineExpression& b) const; + + IntegerEncoder* integer_encoder_; + RootLevelLinear2Bounds* best_root_level_bounds_; + + // This stores relations l <=> (linear2 <= rhs). + absl::flat_hash_map, Literal> + relation_to_lit_; + + // This is used to detect relations that become fixed at level zero and + // "upgrade" them to non-enforced relations. Because we only do that when + // we fix variable, a linear scan shouldn't be too bad and is relatively + // compact memory wise. + absl::flat_hash_set variable_appearing_in_reified_relations_; + std::vector> + all_reified_relations_; +}; + +// Simple wrapper around the different repositories for bounds of linear2. +// This should provide the best bounds. +class Linear2Bounds { + public: + explicit Linear2Bounds(Model* model) + : root_level_bounds_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()), + enforced_bounds_(model->GetOrCreate()), + linear3_bounds_(model->GetOrCreate()) {} + + // Returns the best known upper-bound of the given LinearExpression2 at the + // current decision level. If its explanation is needed, it can be queried + // with the second function. + IntegerValue UpperBound(LinearExpression2 expr) const; + void AddReasonForUpperBoundLowerThan( + LinearExpression2 expr, IntegerValue ub, + std::vector* literal_reason, + std::vector* integer_reason) const; + + std::vector + GetAllExpressionsWithPotentialNonTrivialBounds() const; + + private: + RootLevelLinear2Bounds* root_level_bounds_; + IntegerTrail* integer_trail_; + EnforcedLinear2Bounds* enforced_bounds_; + Linear2BoundsFromLinear3* linear3_bounds_; +}; + +// Detects if at least one of a subset of linear of size 2 or 1, touching the +// same variable, must be true. When this is the case we add a new propagator to +// propagate that fact. +// +// TODO(user): Shall we do that on the main thread before the workers are +// spawned? note that the probing version need the model to be loaded though. +class GreaterThanAtLeastOneOfDetector { + public: + explicit GreaterThanAtLeastOneOfDetector(Model* model) + : repository_(*model->GetOrCreate()) {} + + // Advanced usage. To be called once all the constraints have been added to + // the model. This will detect GreaterThanAtLeastOneOfConstraint(). + // Returns the number of added constraint. + // + // TODO(user): This can be quite slow, add some kind of deterministic limit + // so that we can use it all the time. + int AddGreaterThanAtLeastOneOfConstraints(Model* model, + bool auto_detect_clauses = false); + + private: + // Given an existing clause, sees if it can be used to add "greater than at + // least one of" type of constraints. Returns the number of such constraint + // added. + int AddGreaterThanAtLeastOneOfConstraintsFromClause( + absl::Span clause, Model* model); + + // Another approach for AddGreaterThanAtLeastOneOfConstraints(), this one + // might be a bit slow as it relies on the propagation engine to detect + // clauses between incoming arcs presence literals. + // Returns the number of added constraints. + int AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection( + Model* model); + + // Once we identified a clause and relevant indices, this build the + // constraint. Returns true if we actually add it. + bool AddRelationFromIndices(IntegerVariable var, + absl::Span clause, + absl::Span indices, Model* model); + + BinaryRelationRepository& repository_; +}; + +// ============================================================================= +// Old precedences propagator. +// +// This is superseded by the new LinearPropagator and should only be used if the +// option 'new_linear_propagation' is false. We still keep it around to +// benchmark and test the new code vs this one. +// ============================================================================= + // This class implement a propagator on simple inequalities between integer // variables of the form (i1 + offset <= i2). The offset can be constant or // given by the value of a third integer variable. Offsets can also be negative. @@ -277,7 +614,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { public: explicit PrecedencesPropagator(Model* model) : SatPropagator("PrecedencesPropagator"), - relations_(model->GetOrCreate()), + relations_(model->GetOrCreate()), trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), shared_stats_(model->Mutable()), @@ -405,7 +742,7 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { // External class needed to get the IntegerVariable lower bounds and Enqueue // new ones. - PrecedenceRelations* relations_; + EnforcedLinear2Bounds* relations_; Trail* trail_; IntegerTrail* integer_trail_; SharedStatistics* shared_stats_ = nullptr; @@ -471,261 +808,6 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { int64_t num_enforcement_pushes_ = 0; }; -// Similar to AffineExpression, but with a zero constant. -// If coeff is zero, then this is always zero and var is ignored. -struct LinearTerm { - LinearTerm() = default; - LinearTerm(IntegerVariable v, IntegerValue c) : var(v), coeff(c) {} - - void MakeCoeffPositive() { - if (coeff < 0) { - coeff = -coeff; - var = NegationOf(var); - } - } - - bool operator==(const LinearTerm& other) const { - return var == other.var && coeff == other.coeff; - } - - IntegerVariable var = kNoIntegerVariable; - IntegerValue coeff = IntegerValue(0); -}; - -// A relation of the form enforcement => a + b \in [lhs, rhs]. -// Note that the [lhs, rhs] interval should always be within [min_activity, -// max_activity] where the activity is the value of a + b. -struct Relation { - Literal enforcement; - LinearTerm a; - LinearTerm b; - IntegerValue lhs; - IntegerValue rhs; - - bool operator==(const Relation& other) const { - return enforcement == other.enforcement && a == other.a && b == other.b && - lhs == other.lhs && rhs == other.rhs; - } -}; - -// A repository of all the enforced linear constraints of size 1 or 2, and of -// all the non-enforced linear constraints of size 2. -// -// TODO(user): This is not always needed, find a way to clean this once we -// don't need it. -class BinaryRelationRepository { - public: - int size() const { return relations_.size(); } - - // The returned relation is guaranteed to only have positive variables. - const Relation& relation(int index) const { return relations_[index]; } - - // Returns the indices of the relations that are enforced by the given - // literal. - absl::Span IndicesOfRelationsEnforcedBy(LiteralIndex lit) const { - if (lit >= lit_to_relations_.size()) return {}; - return lit_to_relations_[lit]; - } - - // Returns the indices of the non-enforced relations that contain the given - // (positive) variable. - absl::Span IndicesOfRelationsContaining( - IntegerVariable var) const { - if (var >= var_to_relations_.size()) return {}; - return var_to_relations_[var]; - } - - // Returns the indices of the non-enforced relations that contain the given - // (positive) variables. - absl::Span IndicesOfRelationsBetween(IntegerVariable var1, - IntegerVariable var2) const { - if (var1 > var2) std::swap(var1, var2); - const std::pair key(var1, var2); - const auto it = var_pair_to_relations_.find(key); - if (it == var_pair_to_relations_.end()) return {}; - return it->second; - } - - // Adds a conditional relation lit => a + b \in [lhs, rhs] (one of the terms - // can be zero), or an always true binary relation a + b \in [lhs, rhs] (both - // terms must be non-zero). - void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, - IntegerValue rhs); - - // Adds a partial conditional relation between two variables, with unspecified - // coefficients and bounds. - void AddPartialRelation(Literal lit, IntegerVariable a, IntegerVariable b); - - // Builds the literal to relations mapping. This should be called once all the - // relations have been added. - void Build(); - - // Assuming level-zero bounds + any (var >= value) in the input map, - // fills "output" with a "propagated" set of bounds assuming lit is true (by - // using the relations enforced by lit, as well as the non-enforced ones). - // Note that we will only fill bounds > level-zero ones in output. - // - // Returns false if the new bounds are infeasible at level zero. - // - // Important: by default this does not call output->clear() so we can take - // the max with already inferred bounds. - bool PropagateLocalBounds( - const IntegerTrail& integer_trail, Literal lit, - const absl::flat_hash_map& input, - absl::flat_hash_map* output) const; - - private: - bool is_built_ = false; - int num_enforced_relations_ = 0; - std::vector relations_; - CompactVectorVector lit_to_relations_; - CompactVectorVector var_to_relations_; - absl::flat_hash_map, - std::vector> - var_pair_to_relations_; -}; - -// TODO(user): Merge with BinaryRelationRepository. Note that this one provides -// different indexing though, so it could be kept separate. The -// LinearExpression2 data structure is also slightly more efficient. -class BinaryRelationsMaps { - public: - explicit BinaryRelationsMaps(Model* model); - ~BinaryRelationsMaps(); - - // This mainly wraps BestBinaryRelationBounds, but in addition it checks the - // current LevelZero variable bounds to detect trivially true or false - // relation. - void AddRelationBounds(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub); - RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub) const; - - // Return the status of a <= b; - RelationStatus GetLevelZeroPrecedenceStatus(AffineExpression a, - AffineExpression b) const; - - // Register the fact that l <=> ( a <= b ). - // These are considered equivalence relation. - void AddReifiedPrecedenceIfNonTrivial(Literal l, AffineExpression a, - AffineExpression b); - - // Returns kNoLiteralIndex if we don't have a literal <=> ( a <= b ), or - // returns that literal if we have one. Note that we will return the - // true/false literal if the status is known at level zero. - LiteralIndex GetReifiedPrecedence(AffineExpression a, AffineExpression b); - - // If the given upper bound evaluate better than the current one we have, this - // will replace it and returns true, otherwise it returns false. - // - // Note that we never store trivial upper bound (using the current variable - // domain). - bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); - - // Returns the best known upper-bound of the given LinearExpression2 at the - // current decision level. If its explanation is needed, it can be queried - // with the second function. - IntegerValue UpperBound(LinearExpression2 expr) const; - void AddReasonForUpperBoundLowerThan( - LinearExpression2 expr, IntegerValue ub, - std::vector* literal_reason, - std::vector* integer_reason) const; - - // Warning, the order will not be deterministic. - std::vector GetAllExpressionsWithAffineBounds() const; - - int NumExpressionsWithAffineBounds() const { return best_affine_ub_.size(); } - - void WatchAllLinearExpressions2(int id) { propagator_ids_.insert(id); } - - private: - void NotifyWatchingPropagators() const; - - // Return the pair (a - b) <= rhs. - std::pair FromDifference( - const AffineExpression& a, const AffineExpression& b) const; - - IntegerValue GetImpliedUpperBound(const LinearExpression2& expr) const; - std::pair GetImpliedLevelZeroBounds( - const LinearExpression2& expr) const; - - IntegerTrail* integer_trail_; - IntegerEncoder* integer_encoder_; - GenericLiteralWatcher* watcher_; - SharedStatistics* shared_stats_; - BestBinaryRelationBounds best_root_level_bounds_; - - int64_t num_updates_ = 0; - int64_t num_affine_updates_ = 0; - - // This stores relations l <=> (linear2 <= rhs). - absl::flat_hash_map, Literal> - relation_to_lit_; - - // This is used to detect relations that become fixed at level zero and - // "upgrade" them to non-enforced relations. Because we only do that when - // we fix variable, a linear scan shouldn't be too bad and is relatively - // compact memory wise. - absl::flat_hash_set variable_appearing_in_reified_relations_; - std::vector> - all_reified_relations_; - - // This stores linear2 <= AffineExpression / divisor. - // - // Note(user): This is a "cheap way" to not have to deal with backtracking, If - // we have many possible AffineExpression that bounds a LinearExpression2, we - // keep the best one during "search dive" but on backtrack we might have a - // sub-optimal relation. - absl::flat_hash_map> - best_affine_ub_; - - absl::btree_set propagator_ids_; -}; - -// Detects if at least one of a subset of linear of size 2 or 1, touching the -// same variable, must be true. When this is the case we add a new propagator to -// propagate that fact. -// -// TODO(user): Shall we do that on the main thread before the workers are -// spawned? note that the probing version need the model to be loaded though. -class GreaterThanAtLeastOneOfDetector { - public: - explicit GreaterThanAtLeastOneOfDetector(Model* model) - : repository_(*model->GetOrCreate()) {} - - // Advanced usage. To be called once all the constraints have been added to - // the model. This will detect GreaterThanAtLeastOneOfConstraint(). - // Returns the number of added constraint. - // - // TODO(user): This can be quite slow, add some kind of deterministic limit - // so that we can use it all the time. - int AddGreaterThanAtLeastOneOfConstraints(Model* model, - bool auto_detect_clauses = false); - - private: - // Given an existing clause, sees if it can be used to add "greater than at - // least one of" type of constraints. Returns the number of such constraint - // added. - int AddGreaterThanAtLeastOneOfConstraintsFromClause( - absl::Span clause, Model* model); - - // Another approach for AddGreaterThanAtLeastOneOfConstraints(), this one - // might be a bit slow as it relies on the propagation engine to detect - // clauses between incoming arcs presence literals. - // Returns the number of added constraints. - int AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection( - Model* model); - - // Once we identified a clause and relevant indices, this build the - // constraint. Returns true if we actually add it. - bool AddRelationFromIndices(IntegerVariable var, - absl::Span clause, - absl::Span indices, Model* model); - - BinaryRelationRepository& repository_; -}; - // ============================================================================= // Implementation of the small API functions below. // ============================================================================= @@ -768,43 +850,6 @@ inline void PrecedencesPropagator::AddPrecedenceWithAllOptions( // Model based functions. // ============================================================================= -// a <= b. -inline std::function LowerOrEqual(IntegerVariable a, - IntegerVariable b) { - return [=](Model* model) { - return model->GetOrCreate()->AddPrecedence(a, b); - }; -} - -// a + offset <= b. -inline std::function LowerOrEqualWithOffset(IntegerVariable a, - IntegerVariable b, - int64_t offset) { - return [=](Model* model) { - LinearExpression2 expr(a, b, 1, -1); - model->GetOrCreate()->AddUpperBound( - expr, IntegerValue(-offset)); - model->GetOrCreate()->AddPrecedenceWithOffset( - a, b, IntegerValue(offset)); - }; -} - -// a + offset <= b. (when a and b are of the form 1 * var + offset). -inline std::function AffineCoeffOneLowerOrEqualWithOffset( - AffineExpression a, AffineExpression b, int64_t offset) { - CHECK_NE(a.var, kNoIntegerVariable); - CHECK_EQ(a.coeff, 1); - CHECK_NE(b.var, kNoIntegerVariable); - CHECK_EQ(b.coeff, 1); - return [=](Model* model) { - LinearExpression2 expr(a.var, b.var, 1, -1); - model->GetOrCreate()->AddUpperBound( - expr, -a.constant + b.constant - offset); - model->GetOrCreate()->AddPrecedenceWithOffset( - a.var, b.var, a.constant - b.constant + offset); - }; -} - // l => (a + b <= ub). inline void AddConditionalSum2LowerOrEqual( absl::Span enforcement_literals, IntegerVariable a, @@ -812,8 +857,8 @@ inline void AddConditionalSum2LowerOrEqual( // TODO(user): Refactor to be sure we do not miss any level zero relations. if (enforcement_literals.empty()) { LinearExpression2 expr(a, b, 1, 1); - model->GetOrCreate()->AddUpperBound(expr, - IntegerValue(ub)); + model->GetOrCreate()->AddUpperBound( + expr, IntegerValue(ub)); } PrecedencesPropagator* p = model->GetOrCreate(); @@ -832,34 +877,21 @@ inline void AddConditionalSum3LowerOrEqual( enforcement_literals); } -// a >= b. -inline std::function GreaterOrEqual(IntegerVariable a, - IntegerVariable b) { - return [=](Model* model) { - return model->GetOrCreate()->AddPrecedence(b, a); - }; -} - // a == b. +// +// ABSL_DEPRECATED("Use linear constraint API instead") inline std::function Equality(IntegerVariable a, IntegerVariable b) { return [=](Model* model) { - model->Add(LowerOrEqual(a, b)); - model->Add(LowerOrEqual(b, a)); - }; -} - -// a + offset == b. -inline std::function EqualityWithOffset(IntegerVariable a, - IntegerVariable b, - int64_t offset) { - return [=](Model* model) { - model->Add(LowerOrEqualWithOffset(a, b, offset)); - model->Add(LowerOrEqualWithOffset(b, a, -offset)); + auto* precedences = model->GetOrCreate(); + precedences->AddPrecedence(a, b); + precedences->AddPrecedence(b, a); }; } // is_le => (a + offset <= b). +// +// ABSL_DEPRECATED("Use linear constraint API instead") inline std::function ConditionalLowerOrEqualWithOffset( IntegerVariable a, IntegerVariable b, int64_t offset, Literal is_le) { return [=](Model* model) { diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 781781d5f2..32fd12c3ee 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -59,20 +59,23 @@ std::vector AddVariables(IntegerTrail* integer_trail) { return vars; } -TEST(PrecedenceRelationsTest, BasicAPI) { +TEST(EnforcedLinear2BoundsTest, BasicAPI) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); + auto* precedence_builder = + model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - PrecedenceRelations precedences(&model); - precedences.AddUpperBound(LinearExpression2::Difference(a, b), -10); - precedences.AddUpperBound(LinearExpression2::Difference(d, c), -7); - precedences.AddUpperBound(LinearExpression2::Difference(b, d), -5); + EnforcedLinear2Bounds precedences(&model); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(d, c), -7); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); - precedences.Build(); + precedence_builder->Build(); EXPECT_EQ( precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), -10); @@ -93,11 +96,11 @@ TEST(PrecedenceRelationsTest, BasicAPI) { -15); EXPECT_EQ( precedences.LevelZeroUpperBound(LinearExpression2::Difference(d, a)), - kMaxIntegerValue); + 100); // Once built, we can update the offsets. // Note however that this would not propagate through the precedence graphs. - precedences.AddUpperBound(LinearExpression2::Difference(a, b), -15); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -15); EXPECT_EQ( precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), -15); @@ -106,22 +109,25 @@ TEST(PrecedenceRelationsTest, BasicAPI) { -15); } -TEST(PrecedenceRelationsTest, CornerCase1) { +TEST(EnforcedLinear2BoundsTest, CornerCase1) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); + auto* precedence_builder = + model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - PrecedenceRelations precedences(&model); - precedences.AddUpperBound(LinearExpression2::Difference(a, b), -10); - precedences.AddUpperBound(LinearExpression2::Difference(b, c), -7); - precedences.AddUpperBound(LinearExpression2::Difference(b, d), -5); - precedences.AddUpperBound(LinearExpression2::Difference(NegationOf(b), a), - -5); + EnforcedLinear2Bounds precedences(&model); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, c), -7); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(b), a), + -5); - precedences.Build(); + precedence_builder->Build(); EXPECT_EQ(precedences.LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), a)), -5); @@ -133,46 +139,52 @@ TEST(PrecedenceRelationsTest, CornerCase1) { -20); } -TEST(PrecedenceRelationsTest, CornerCase2) { +TEST(EnforcedLinear2BoundsTest, CornerCase2) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); + auto* precedence_builder = + model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - PrecedenceRelations precedences(&model); - precedences.AddUpperBound(LinearExpression2::Difference(NegationOf(a), a), - -10); - precedences.AddUpperBound(LinearExpression2::Difference(a, b), -7); - precedences.AddUpperBound(LinearExpression2::Difference(a, c), -5); - precedences.AddUpperBound(LinearExpression2::Difference(a, d), -2); + EnforcedLinear2Bounds precedences(&model); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(a), a), + -10); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -7); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, c), -5); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, d), -2); EXPECT_EQ(precedences.LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), NegationOf(a))), -7); - precedences.Build(); + precedence_builder->Build(); } -TEST(PrecedenceRelationsTest, CoefficientGreaterThanOne) { +TEST(EnforcedLinear2BoundsTest, CoefficientGreaterThanOne) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); + auto* precedence_builder = + model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4); - PrecedenceRelations precedences(&model); - precedences.AddUpperBound(LinearExpression2(a, b, 3, -4), 7); - precedences.AddUpperBound(LinearExpression2(a, c, 2, -3), -5); - precedences.AddUpperBound(LinearExpression2(a, b, 6, -8), 5); + EnforcedLinear2Bounds precedences(&model); + lin2_bounds->AddUpperBound(LinearExpression2(a, b, 3, -4), 7); + lin2_bounds->AddUpperBound(LinearExpression2(a, c, 2, -3), -5); + lin2_bounds->AddUpperBound(LinearExpression2(a, b, 6, -8), 5); EXPECT_EQ(precedences.LevelZeroUpperBound(LinearExpression2(a, b, 9, -12)), 6); - precedences.Build(); + precedence_builder->Build(); } -TEST(PrecedenceRelationsTest, ConditionalRelations) { +TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { Model model; auto* sat_solver = model.GetOrCreate(); auto* integer_trail = model.GetOrCreate(); @@ -183,7 +195,7 @@ TEST(PrecedenceRelationsTest, ConditionalRelations) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2); - PrecedenceRelations precedences(&model); + EnforcedLinear2Bounds precedences(&model); precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 15); precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 20); @@ -202,7 +214,7 @@ TEST(PrecedenceRelationsTest, ConditionalRelations) { EXPECT_TRUE(sat_solver->ResetToLevelZero()); EXPECT_EQ( precedences.UpperBound(LinearExpression2::Difference(a, NegationOf(b))), - kMaxIntegerValue); + 200); literal_reason.clear(); integer_reason.clear(); precedences.AddReasonForUpperBoundLowerThan( @@ -435,8 +447,9 @@ TEST(PrecedencesPropagatorTest, ZeroWeightCycleOnDiscreteDomain) { NewIntegerVariable(Domain::FromValues({3, 6, 9, 14, 16, 18, 20, 35}))); // Add the fact that a == b with two inequalities. - model.Add(LowerOrEqual(a, b)); - model.Add(LowerOrEqual(b, a)); + auto* precedences = model.GetOrCreate(); + precedences->AddPrecedence(a, b); + precedences->AddPrecedence(b, a); // After propagation, we should detect that the only common values fall in // [16, 20]. @@ -455,7 +468,8 @@ TEST(PrecedencesPropagatorTest, ConditionalPrecedencesOnFixedLiteral) { // To trigger the old bug, we need to add some precedences. IntegerVariable x = model.Add(NewIntegerVariable(0, 100)); IntegerVariable y = model.Add(NewIntegerVariable(50, 100)); - model.Add(LowerOrEqual(x, y)); + auto* precedences = model.GetOrCreate(); + precedences->AddPrecedence(x, y); // We then add a Boolean variable and fix it. // This will trigger a propagation. @@ -472,26 +486,27 @@ TEST(PrecedencesPropagatorTest, ConditionalPrecedencesOnFixedLiteral) { #undef EXPECT_BOUNDS_EQ -TEST(PrecedenceRelationsTest, CollectPrecedences) { +TEST(EnforcedLinear2BoundsTest, CollectPrecedences) { Model model; auto* integer_trail = model.GetOrCreate(); - auto* relations = model.GetOrCreate(); + auto* relations = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); std::vector vars = AddVariables(integer_trail); - relations->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), - IntegerValue(-1)); - relations->AddUpperBound(LinearExpression2::Difference(vars[0], vars[5]), - IntegerValue(-1)); - relations->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), - IntegerValue(-1)); - relations->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), - IntegerValue(-1)); - relations->AddUpperBound(LinearExpression2::Difference(vars[3], vars[4]), - IntegerValue(-1)); - relations->AddUpperBound(LinearExpression2::Difference(vars[4], vars[5]), - IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), + IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[5]), + IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), + IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), + IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[3], vars[4]), + IntegerValue(-1)); + lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[4], vars[5]), + IntegerValue(-1)); - std::vector p; + std::vector p; relations->CollectPrecedences({vars[0], vars[2], vars[3]}, &p); // Note that we do not return precedences with just one variable. @@ -947,7 +962,7 @@ TEST(GreaterThanAtLeastOneOfDetectorTest, EXPECT_EQ(model.Get(LowerBound(d)), std::min({2 + 2, 5 - 1, 3 + 0})); } -TEST(PrecedencesPropagatorTest, ComputeFullPrecedencesIfCycle) { +TEST(TransitivePrecedencesEvaluatorTest, ComputeFullPrecedencesIfCycle) { Model model; std::vector vars(10); for (int i = 0; i < vars.size(); ++i) { @@ -955,18 +970,19 @@ TEST(PrecedencesPropagatorTest, ComputeFullPrecedencesIfCycle) { } // Even if the weight are compatible, we will fail here. - model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); - model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); - model.Add(LowerOrEqualWithOffset(vars[2], vars[1], -10)); - model.Add(LowerOrEqualWithOffset(vars[0], vars[2], 5)); + auto* r = model.GetOrCreate(); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[1]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[2], vars[1]), 10); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), -5); std::vector precedences; - model.GetOrCreate()->ComputeFullPrecedences( + model.GetOrCreate()->ComputeFullPrecedences( {vars[0], vars[1]}, &precedences); EXPECT_TRUE(precedences.empty()); } -TEST(PrecedencesPropagatorTest, BasicFiltering) { +TEST(TransitivePrecedencesEvaluatorTest, BasicTest1) { Model model; std::vector vars(10); for (int i = 0; i < vars.size(); ++i) { @@ -978,14 +994,15 @@ TEST(PrecedencesPropagatorTest, BasicFiltering) { // 0 2 -- 4 // \ / // 3 - model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); - model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); - model.Add(LowerOrEqualWithOffset(vars[0], vars[3], 1)); - model.Add(LowerOrEqualWithOffset(vars[3], vars[2], 2)); - model.Add(LowerOrEqualWithOffset(vars[2], vars[4], 2)); + auto* r = model.GetOrCreate(); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[1]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[3]), -1); + r->AddUpperBound(LinearExpression2::Difference(vars[3], vars[2]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), -2); std::vector precedences; - model.GetOrCreate()->ComputeFullPrecedences( + model.GetOrCreate()->ComputeFullPrecedences( {vars[0], vars[1], vars[3]}, &precedences); // We only output size at least 2, and "relevant" precedences. @@ -996,7 +1013,7 @@ TEST(PrecedencesPropagatorTest, BasicFiltering) { EXPECT_THAT(precedences[0].indices, ElementsAre(0, 1, 2)); } -TEST(PrecedencesPropagatorTest, BasicFiltering2) { +TEST(TransitivePrecedencesEvaluatorTest, BasicTest2) { Model model; std::vector vars(10); for (int i = 0; i < vars.size(); ++i) { @@ -1008,15 +1025,16 @@ TEST(PrecedencesPropagatorTest, BasicFiltering2) { // 0 2 -- 4 // \ / / // 3 5 - model.Add(LowerOrEqualWithOffset(vars[0], vars[1], 2)); - model.Add(LowerOrEqualWithOffset(vars[1], vars[2], 2)); - model.Add(LowerOrEqualWithOffset(vars[0], vars[3], 1)); - model.Add(LowerOrEqualWithOffset(vars[3], vars[2], 2)); - model.Add(LowerOrEqualWithOffset(vars[2], vars[4], 2)); - model.Add(LowerOrEqualWithOffset(vars[5], vars[4], 7)); + auto* r = model.GetOrCreate(); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[1]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[0], vars[3]), -1); + r->AddUpperBound(LinearExpression2::Difference(vars[3], vars[2]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), -2); + r->AddUpperBound(LinearExpression2::Difference(vars[5], vars[4]), -7); std::vector precedences; - model.GetOrCreate()->ComputeFullPrecedences( + model.GetOrCreate()->ComputeFullPrecedences( {vars[0], vars[1], vars[3]}, &precedences); // Same as before here. @@ -1027,7 +1045,7 @@ TEST(PrecedencesPropagatorTest, BasicFiltering2) { // But if we ask for 5, we will get two results. precedences.clear(); - model.GetOrCreate()->ComputeFullPrecedences( + model.GetOrCreate()->ComputeFullPrecedences( {vars[0], vars[1], vars[3], vars[5]}, &precedences); ASSERT_EQ(precedences.size(), 2); EXPECT_EQ(precedences[0].var, vars[2]); @@ -1043,6 +1061,7 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const IntegerVariable z = model.Add(NewIntegerVariable(0, 2)); + const IntegerVariable w = model.Add(NewIntegerVariable(0, 20)); // x - y; LinearExpression2 expr; @@ -1052,11 +1071,17 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { expr.coeffs[1] = IntegerValue(-1); // Starts with trivial level zero bound. - auto* tested = model.GetOrCreate(); + auto* tested = model.GetOrCreate(); + auto* root_level_lin2_bounds = model.GetOrCreate(); EXPECT_EQ(tested->UpperBound(expr), IntegerValue(10)); + auto* search = model.GetOrCreate(); + search->TakeDecision( + Literal(search->GetDecisionLiteral(BooleanOrIntegerLiteral( + IntegerLiteral::LowerOrEqual(w, IntegerValue(10)))))); + // Lets add a relation. - tested->AddRelationBounds(expr, IntegerValue(-5), IntegerValue(5)); + root_level_lin2_bounds->Add(expr, IntegerValue(-5), IntegerValue(5)); EXPECT_EQ(tested->UpperBound(expr), IntegerValue(5)); // Note that we canonicalize with gcd. @@ -1070,7 +1095,6 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { EXPECT_EQ(tested->UpperBound(expr), IntegerValue(9)); // Lets test the reason, first push a new bound. - auto* search = model.GetOrCreate(); search->TakeDecision( Literal(search->GetDecisionLiteral(BooleanOrIntegerLiteral( IntegerLiteral::LowerOrEqual(z, IntegerValue(1)))))); diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 0a8780d1c3..435be0f7ba 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2306,6 +2306,14 @@ class CpModel: """ return cmh.CpSatHelper.write_model_to_file(self.__model, file) + def remove_all_names(self) -> None: + """Removes all names from the model.""" + self.__model.ClearField("name") + for v in self.__model.variables: + v.ClearField("name") + for c in self.__model.constraints: + c.ClearField("name") + @overload def add_hint(self, var: IntVar, value: int) -> None: ... diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 7d16abcf42..811338522b 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -443,12 +443,19 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-4, model.proto.constraints[2].enforcement_literal[0]) self.assertEqual(2, model.proto.constraints[2].enforcement_literal[1]) - def test_constraint_with_name(self) -> None: + def test_names(self) -> None: model = cp_model.CpModel() + model.name = "test_model" x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") ct = model.add_linear_constraint(x + 2 * y, 0, 10).with_name("test_constraint") + self.assertEqual(model.name, "test_model") + self.assertEqual(x.name, "x") self.assertEqual("test_constraint", ct.name) + model.remove_all_names() + self.assertEmpty(model.name) + self.assertEmpty(x.name) + self.assertEmpty(ct.name) def test_natural_api_minimize(self) -> None: model = cp_model.CpModel() diff --git a/ortools/sat/rins.cc b/ortools/sat/rins.cc index 11a6672285..fbd01e762f 100644 --- a/ortools/sat/rins.cc +++ b/ortools/sat/rins.cc @@ -204,14 +204,11 @@ ReducedDomainNeighborhood GetRinsRensNeighborhood( if (relaxation_values.empty()) return reduced_domains; // Not generated. std::bernoulli_distribution three_out_of_four(0.75); - - if (response_manager != nullptr && - response_manager->SolutionsRepository().NumSolutions() > 0 && + if (response_manager != nullptr && response_manager->HasFeasibleSolution() && three_out_of_four(random)) { // Rins. std::shared_ptr::Solution> solution = - response_manager->SolutionsRepository().GetRandomBiasedSolution( - random); + response_manager->SolutionPool().GetSolutionToImprove(random); FillRinsNeighborhood(solution->variable_values, relaxation_values, difficulty, random, reduced_domains); reduced_domains.source_info = "rins_"; diff --git a/ortools/sat/rins_test.cc b/ortools/sat/rins_test.cc index 74ff5995da..17740b6d10 100644 --- a/ortools/sat/rins_test.cc +++ b/ortools/sat/rins_test.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/strings/match.h" #include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/parse_test_proto.h" @@ -150,19 +151,24 @@ TEST(GetRinsRensNeighborhoodTest, GetRinsRensNeighborhoodLP) { // Add a lp solution. lp_solutions.NewLPSolution({3.5, 5}); lp_solutions.Synchronize(); + // Add a solution. CpSolverResponse solution; solution.add_solution(4); solution.add_solution(5); shared_response_manager->NewSolution(solution.solution(), solution.solution_info(), &model); - shared_response_manager->MutableSolutionsRepository()->Synchronize(); + shared_response_manager->Synchronize(); - const ReducedDomainNeighborhood rins_neighborhood = GetRinsRensNeighborhood( - shared_response_manager, &lp_solutions, &incomplete_solutions, - /*difficulty=*/0.5, random); + ReducedDomainNeighborhood rins_neighborhood; + for (int i = 0; i < 100; ++i) { + rins_neighborhood = GetRinsRensNeighborhood( + shared_response_manager, &lp_solutions, &incomplete_solutions, + /*difficulty=*/0.5, random); + if (absl::StartsWith(rins_neighborhood.source_info, "rins")) break; + } - EXPECT_EQ(rins_neighborhood.reduced_domain_vars.size(), 0); + EXPECT_TRUE(rins_neighborhood.reduced_domain_vars.empty()); EXPECT_EQ(rins_neighborhood.fixed_vars.size(), 1); EXPECT_EQ(rins_neighborhood.fixed_vars[0].first, 1); EXPECT_EQ(rins_neighborhood.fixed_vars[0].second, 5); diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index fb7d23f541..b013e7d314 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 325 +// NEXT TAG: 326 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -1336,10 +1336,15 @@ message SatParameters { optional bool use_lns_only = 101 [default = false]; // Size of the top-n different solutions kept by the solver. - // This parameter must be > 0. - // Currently this only impact the "base" solution chosen for a LNS fragment. + // This parameter must be > 0. Currently, having this larger than one mainly + // impact the "base" solution chosen for a LNS/LS fragment. optional int32 solution_pool_size = 193 [default = 3]; + // In order to not get stuck in local optima, when this is non-zero, we try to + // also work on "older" solutions with a worse objective value so we get a + // chance to follow a different LS/LNS trajectory. + optional int32 alternative_pool_size = 325 [default = 1]; + // Turns on relaxation induced neighborhood generator. optional bool use_rins_lns = 129 [default = true]; diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index e51929b16d..27c07aac51 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1102,26 +1102,25 @@ std::string CompletionTimeEvent::DebugString() const { void CtExhaustiveHelper::Init( const absl::Span events, Model* model) { - BinaryRelationsMaps* binary_relations = - model->GetOrCreate(); max_task_index_ = 0; - for (const auto& event : events) { - max_task_index_ = std::max(max_task_index_, event.task_index); - } + if (events.empty() || events.size() > 100) return; + + ReifiedLinear2Bounds* binary_relations = + model->GetOrCreate(); + + std::vector sorted_events(events.begin(), events.end()); + std::sort(sorted_events.begin(), sorted_events.end(), + [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) { + return a.task_index < b.task_index; + }); + max_task_index_ = sorted_events.back().task_index; predecessors_.reserve(max_task_index_ + 1); for (const auto& e1 : events) { - CHECK_LE(predecessors_.size(), e1.task_index); - while (predecessors_.size() <= e1.task_index) { - predecessors_.Add({}); - } - - // Cap the number of precedences to avoid O(n^2) time complexity. - if (predecessors_.num_entries() > 20000) break; - for (const auto& e2 : events) { if (e2.task_index == e1.task_index) continue; if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) == RelationStatus::IS_TRUE) { + while (predecessors_.size() <= e1.task_index) predecessors_.Add({}); predecessors_.AppendToLastVector(e2.task_index); } } @@ -1138,6 +1137,7 @@ bool CtExhaustiveHelper::PermutationIsCompatibleWithPrecedences( visited_.assign(max_task_index_ + 1, false); for (int i = permutation.size() - 1; i >= 0; --i) { const CompletionTimeEvent& event = events[permutation[i]]; + if (event.task_index >= predecessors_.size()) continue; for (const int predecessor : predecessors_[event.task_index]) { if (visited_[predecessor]) return false; } @@ -1328,9 +1328,11 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( helper.task_to_index_[events[i].task_index] = i; } helper.valid_permutation_iterator_.Reset(events.size()); + const auto& predecessors = helper.predecessors(); for (int i = 0; i < events.size(); ++i) { const int task_i = events[i].task_index; - for (const int task_j : helper.predecessors()[task_i]) { + if (task_i >= predecessors.size()) continue; + for (const int task_j : predecessors[task_i]) { const int j = helper.task_to_index_[task_j]; if (j != -1) { helper.valid_permutation_iterator_.AddArc(j, i); @@ -1456,6 +1458,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( helper, min_sum_of_ends, min_sum_of_weighted_ends, cut_use_precedences, exploration_limit); if (status == CompletionTimeExplorationStatus::NO_VALID_PERMUTATION) { + // TODO(user): We should return false here but there is a bug. break; } else if (status == CompletionTimeExplorationStatus::ABORTED) { break; @@ -1846,6 +1849,7 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( auto generate_cuts = [integer_trail, sat_solver, model, manager, helper, demands_helper, capacity](bool time_is_forward) -> bool { + DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) { return false; } diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index 69d0bad256..149bc24703 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -48,7 +48,8 @@ SchedulingConstraintHelper::SchedulingConstraintHelper( assignment_(sat_solver_->Assignment()), integer_trail_(model->GetOrCreate()), watcher_(model->GetOrCreate()), - precedence_relations_(model->GetOrCreate()), + precedence_relations_(model->GetOrCreate()), + root_level_lin2_bounds_(model->GetOrCreate()), starts_(std::move(starts)), ends_(std::move(ends)), sizes_(std::move(sizes)), @@ -86,7 +87,8 @@ SchedulingConstraintHelper::SchedulingConstraintHelper(int num_tasks, sat_solver_(model->GetOrCreate()), assignment_(sat_solver_->Assignment()), integer_trail_(model->GetOrCreate()), - precedence_relations_(model->GetOrCreate()), + precedence_relations_(model->GetOrCreate()), + root_level_lin2_bounds_(model->GetOrCreate()), capacity_(num_tasks), cached_size_min_(new IntegerValue[capacity_]), cached_start_min_(new IntegerValue[capacity_]), @@ -390,7 +392,7 @@ bool SchedulingConstraintHelper::PropagatePrecedence(int a, int b) { const IntegerValue offset = before.constant - after.constant; const LinearExpression2 expr = LinearExpression2::Difference(before.var, after.var); - if (precedence_relations_->AddUpperBound(expr, -offset)) { + if (root_level_lin2_bounds_->AddUpperBound(expr, -offset)) { VLOG(2) << "new relation " << TaskDebugString(a) << " <= " << TaskDebugString(b); if (before.var == NegationOf(after.var)) { diff --git a/ortools/sat/scheduling_helpers.h b/ortools/sat/scheduling_helpers.h index def922023e..9b524644da 100644 --- a/ortools/sat/scheduling_helpers.h +++ b/ortools/sat/scheduling_helpers.h @@ -397,7 +397,8 @@ class SchedulingConstraintHelper : public PropagatorInterface { const VariablesAssignment& assignment_; IntegerTrail* integer_trail_; GenericLiteralWatcher* watcher_; - PrecedenceRelations* precedence_relations_; + Linear2Bounds* precedence_relations_; + RootLevelLinear2Bounds* root_level_lin2_bounds_; // The current direction of time, true for forward, false for backward. bool current_time_direction_ = true; diff --git a/ortools/sat/shaving_solver.cc b/ortools/sat/shaving_solver.cc index cb35ef5677..488ce0228e 100644 --- a/ortools/sat/shaving_solver.cc +++ b/ortools/sat/shaving_solver.cc @@ -633,9 +633,9 @@ bool VariablesShavingSolver::ResetAndSolveModel(int64_t task_id, State* state, // Use the current best solution as hint. { - auto sols = shared_->response->SolutionsRepository().GetBestNSolutions(1); - if (!sols.empty()) { - const std::vector& solution = sols[0]->variable_values; + auto sol = shared_->response->SolutionPool().BestSolutions().GetSolution(0); + if (sol != nullptr) { + const std::vector& solution = sol->variable_values; auto* hint = shaving_proto->mutable_solution_hint(); hint->clear_vars(); hint->clear_values(); diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index a4272ca360..0c1ed51803 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -30,9 +30,6 @@ #include #include -#include "absl/hash/hash.h" -#include "absl/log/log.h" -#include "absl/time/time.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" #if !defined(__PORTABLE_PLATFORM__) @@ -40,11 +37,17 @@ #include "ortools/base/options.h" #endif // __PORTABLE_PLATFORM__ #include "absl/algorithm/container.h" +#include "absl/base/thread_annotations.h" #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" +#include "absl/hash/hash.h" #include "absl/log/check.h" +#include "absl/log/log.h" +#include "absl/numeric/int128.h" +#include "absl/random/bit_gen_ref.h" +#include "absl/random/distributions.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" @@ -74,6 +77,144 @@ ABSL_FLAG(bool, cp_model_dump_tightened_models, false, namespace operations_research { namespace sat { +std::shared_ptr::Solution> +SharedSolutionPool::Add(SharedSolutionRepository::Solution solution) { + // Only add to the alternative path if it has the correct source id. + if (alternative_path_.num_solutions_to_keep() > 0 && + solution.source_id == alternative_path_.source_id()) { + alternative_path_.Add(solution); + if (solution.rank < best_solutions_.GetBestRank()) { + VLOG(2) << "ALTERNATIVE WIN !"; + } + } + + // For now we only return a solution if it was stored in best_solutions_. + return best_solutions_.Add(std::move(solution)); +} + +void SharedSolutionPool::Synchronize(absl::BitGenRef random) { + // Update the "seeds" for the aternative path. + if (alternative_path_.num_solutions_to_keep() > 0) { + absl::MutexLock mutex_lock(&mutex_); + + auto process_solution = + [this](const SharedSolutionRepository::Solution& solution) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { + if (solution.variable_values.empty()) return; + if (solution.rank < min_rank_ || solution.rank > max_rank_) { + // Recompute buckets. + min_rank_ = std::min(min_rank_, solution.rank); + max_rank_ = std::max(max_rank_, solution.rank); + + // We want to store around 100 MB max. + int num_solutions = std::max( + 10, 100'000'000 / solution.variable_values.size()); + const int64_t range = max_rank_ - min_rank_ + 1; + if (num_solutions > range) { + num_solutions = range; + } + + // But if the number of variables is low, we do not want + // to use a lot of space/time just iterating over num_solutions. + // + // TODO(user): Rework the algo to be in + // O(num_different_solutions) rather than initializing the + // maximum amount right away. + num_solutions = std::min(num_solutions, 1'000); + + // Resize and recompute rank_. + // + // seeds_[i] should contains solution in [ranks_[i], + // rank_[i+1]). rank_[0] is always min_rank_. As long as we have + // room, we should have exactly one bucket per rank. + ranks_.resize(num_solutions); + seeds_.resize(num_solutions); + + int64_t offset = (max_rank_ - min_rank_ + 1) / num_solutions; + CHECK_GT(offset, 0); + for (int i = 0; i < num_solutions; ++i) { + ranks_[i] = min_rank_ + + static_cast(absl::int128(i) * + absl::int128(range) / + absl::int128(num_solutions)); + } + + // Move existing solutions to their new bucket. + int to_index = seeds_.size() - 1; + for (int i = seeds_.size(); --i >= 0;) { + if (seeds_[i] == nullptr) continue; + while (to_index >= 0 && ranks_[to_index] > seeds_[i]->rank) { + --to_index; + } + seeds_[to_index] = std::move(seeds_[i]); + } + } + + // rank[limit] is the first > solution.rank. + const int limit = std::upper_bound(ranks_.begin(), ranks_.end(), + solution.rank) - + ranks_.begin(); + CHECK_GT(limit, 0); + seeds_[limit - 1] = + std::make_shared::Solution>( + solution); + }; + + // All solution go through best_solutions_.Add(), so we only need + // to process these here. + best_solutions_.Synchronize(process_solution); + } else { + best_solutions_.Synchronize(); + } + alternative_path_.Synchronize(); + + // If we try to improve the alternate path without success, reset it + // from a random path_seeds_. + // + // TODO(user): find a way to generate random solution and update the seeds + // with them. Shall we do that in a continuous way or only when needed? + if (alternative_path_.num_solutions_to_keep() > 0) { + // Restart the alternative path ? + const int threshold = std::max( + 100, static_cast(std::sqrt(best_solutions_.num_queried()))); + if (alternative_path_.NumRecentlyNonImproving() > threshold) { + VLOG(2) << "Done. num_non_improving: " + << alternative_path_.NumRecentlyNonImproving() + << " achieved: " << alternative_path_.GetBestRank() << " / " + << best_solutions_.GetBestRank(); + alternative_path_.ClearSolutionsAndIncreaseSourceId(); + } + + // If we restarted, or we are at the beginning, pick a seed for the path. + if (alternative_path_.NumSolutions() == 0) { + absl::MutexLock mutex_lock(&mutex_); + + // Pick random bucket with bias. If the bucket is empty, we will scan + // "worse" bucket until we find a solution. We never pick bucket 0. + if (seeds_.size() > 1) { + // Note that LogUniform() is always inclusive. + // TODO(user): Shall we bias even more? + int index = 1 + absl::LogUniform(random, 0, seeds_.size() - 2); + for (; index < seeds_.size(); ++index) { + if (seeds_[index] != nullptr) { + alternative_path_.Add(*seeds_[index]); + alternative_path_.Synchronize(); + VLOG(2) << "RESTART bucket=" << index << "/" << seeds_.size() + << " rank=" << alternative_path_.GetSolution(0)->rank + << " from_optimal=" + << alternative_path_.GetSolution(0)->rank - min_rank_; + break; + } + } + + // The last bucket should never be empty. + CHECK(seeds_.back() != nullptr); + CHECK_LT(index, seeds_.size()); + } + } + } +} + void SharedLPSolutionRepository::NewLPSolution( std::vector lp_solution) { if (lp_solution.empty()) return; @@ -119,7 +260,8 @@ SharedResponseManager::SharedResponseManager(Model* model) : parameters_(*model->GetOrCreate()), wall_timer_(*model->GetOrCreate()), shared_time_limit_(model->GetOrCreate()), - solutions_(parameters_.solution_pool_size(), "feasible solutions"), + random_(model->GetOrCreate()), + solution_pool_(parameters_), logger_(model->GetOrCreate()) { bounds_logging_id_ = logger_->GetNewThrottledId(); } @@ -397,13 +539,15 @@ IntegerValue SharedResponseManager::GetInnerObjectiveUpperBound() { } void SharedResponseManager::Synchronize() { + solution_pool_.Synchronize(*random_); + absl::MutexLock mutex_lock(&mutex_); synchronized_inner_objective_lower_bound_ = IntegerValue(inner_objective_lower_bound_); synchronized_inner_objective_upper_bound_ = IntegerValue(inner_objective_upper_bound_); synchronized_best_status_ = best_status_; - if (solutions_.NumSolutions() > 0) { + if (solution_pool_.BestSolutions().NumSolutions() > 0) { first_solution_solvers_should_stop_ = true; } logger_->FlushPendingThrottledLogs(); @@ -502,7 +646,7 @@ void SharedResponseManager::UnregisterBestBoundCallback(int callback_id) { CpSolverResponse SharedResponseManager::GetResponseInternal( absl::Span variable_values, - const std::string& solution_info) { + absl::string_view solution_info) { CpSolverResponse result; result.set_status(best_status_); if (!unsat_cores_.empty()) { @@ -551,19 +695,19 @@ CpSolverResponse SharedResponseManager::GetResponseInternal( CpSolverResponse SharedResponseManager::GetResponse() { absl::MutexLock mutex_lock(&mutex_); CpSolverResponse result; - if (solutions_.NumSolutions() == 0) { + if (solution_pool_.BestSolutions().NumSolutions() == 0) { result = GetResponseInternal({}, ""); } else { std::shared_ptr::Solution> - solution = solutions_.GetSolution(0); + solution = solution_pool_.BestSolutions().GetSolution(0); result = GetResponseInternal(solution->variable_values, solution->info); } // If this is true, we postsolve and copy all of our solutions. if (parameters_.fill_additional_solutions_in_response()) { std::vector temp; - for (int i = 0; i < solutions_.NumSolutions(); ++i) { - std::shared_ptr::Solution> - solution = solutions_.GetSolution(i); + const int size = solution_pool_.BestSolutions().NumSolutions(); + for (int i = 0; i < size; ++i) { + const auto solution = solution_pool_.BestSolutions().GetSolution(i); temp = solution->variable_values; for (int i = solution_postprocessors_.size(); --i >= 0;) { solution_postprocessors_[i](&temp); @@ -623,7 +767,7 @@ void SharedResponseManager::FillObjectiveValuesInResponse( std::shared_ptr::Solution> SharedResponseManager::NewSolution(absl::Span solution_values, const std::string& solution_info, - Model* model) { + Model* model, int source_id) { absl::MutexLock mutex_lock(&mutex_); std::shared_ptr::Solution> ret; @@ -634,7 +778,8 @@ SharedResponseManager::NewSolution(absl::Span solution_values, solution.variable_values.assign(solution_values.begin(), solution_values.end()); solution.info = solution_info; - ret = solutions_.Add(solution); + solution.source_id = source_id; + ret = solution_pool_.Add(solution); } else { const int64_t objective_value = ComputeInnerObjective(*objective_or_null_, solution_values); @@ -645,7 +790,8 @@ SharedResponseManager::NewSolution(absl::Span solution_values, solution_values.end()); solution.rank = objective_value; solution.info = solution_info; - ret = solutions_.Add(solution); + solution.source_id = source_id; + ret = solution_pool_.Add(solution); // Ignore any non-strictly improving solution. if (objective_value > inner_objective_upper_bound_) return ret; @@ -666,7 +812,7 @@ SharedResponseManager::NewSolution(absl::Span solution_values, // In single thread, no one is synchronizing the solution manager, so we // should do it from here. if (always_synchronize_) { - solutions_.Synchronize(); + solution_pool_.Synchronize(*random_); first_solution_solvers_should_stop_ = true; } diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index c6eadff080..38722c2264 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -61,8 +61,11 @@ template class SharedSolutionRepository { public: explicit SharedSolutionRepository(int num_solutions_to_keep, - absl::string_view name = "") - : name_(name), num_solutions_to_keep_(num_solutions_to_keep) {} + absl::string_view name = "", + int source_id = -1) + : name_(name), + num_solutions_to_keep_(num_solutions_to_keep), + source_id_(source_id) {} // The solution format used by this class. struct Solution { @@ -84,6 +87,8 @@ class SharedSolutionRepository { // Should be private: only SharedSolutionRepository should modify this. mutable int num_selected = 0; + int source_id; // Internal information. + bool operator==(const Solution& other) const { return rank == other.rank && variable_values == other.variable_values; } @@ -100,10 +105,11 @@ class SharedSolutionRepository { int NumSolutions() const; // Returns the solution #i where i must be smaller than NumSolutions(). + // Returns nullptr if i is out of range. std::shared_ptr GetSolution(int index) const; - // Returns the rank of the best known solution. - // You shouldn't call this if NumSolutions() is zero. + // Returns the rank of the best known solution. If there is no solution, this + // will return std::numeric_limits::max(). int64_t GetBestRank() const; std::vector> GetBestNSolutions(int n) const; @@ -131,7 +137,9 @@ class SharedSolutionRepository { // set of added solutions is the same. // // Works in O(num_solutions_to_keep_). - void Synchronize(); + // + // If f() is provided, it will be called on all new solutions. + void Synchronize(std::function f = nullptr); std::vector TableLineStats() const { absl::MutexLock mutex_lock(&mutex_); @@ -139,20 +147,52 @@ class SharedSolutionRepository { FormatCounter(num_queried_), FormatCounter(num_synchronization_)}; } + int64_t NumRecentlyNonImproving() const { + absl::MutexLock mutex_lock(&mutex_); + return num_non_improving_; + } + + void ClearSolutionsAndIncreaseSourceId() { + absl::MutexLock mutex_lock(&mutex_); + new_solutions_.clear(); + solutions_.clear(); + ++source_id_; + } + + int source_id() const { + absl::MutexLock mutex_lock(&mutex_); + return source_id_; + } + + int num_queried() const { + absl::MutexLock mutex_lock(&mutex_); + return num_queried_; + } + + int num_solutions_to_keep() const { return num_solutions_to_keep_; } + protected: const std::string name_; const int num_solutions_to_keep_; mutable absl::Mutex mutex_; + int source_id_ ABSL_GUARDED_BY(mutex_); int64_t num_added_ ABSL_GUARDED_BY(mutex_) = 0; mutable int64_t num_queried_ ABSL_GUARDED_BY(mutex_) = 0; int64_t num_synchronization_ ABSL_GUARDED_BY(mutex_) = 0; + mutable int64_t num_queried_at_last_sync_ ABSL_GUARDED_BY(mutex_) = 0; + mutable int64_t num_non_improving_ ABSL_GUARDED_BY(mutex_) = 0; + // Our two solutions pools, the current one and the new one that will be // merged into the current one on each Synchronize() calls. mutable std::vector tmp_indices_ ABSL_GUARDED_BY(mutex_); std::vector> solutions_ ABSL_GUARDED_BY(mutex_); std::vector> new_solutions_ ABSL_GUARDED_BY(mutex_); + + // For computing orthogonality. + std::vector ABSL_GUARDED_BY(mutex_) distances_; + std::vector ABSL_GUARDED_BY(mutex_) buffer_; }; // Solutions coming from the LP. @@ -165,6 +205,74 @@ class SharedLPSolutionRepository : public SharedSolutionRepository { void NewLPSolution(std::vector lp_solution); }; +// This stores all the feasible solutions the solver know about. +// Moreover, for meta-heuristics, we keep them in different buckets. +class SharedSolutionPool { + public: + explicit SharedSolutionPool(const SatParameters& parameters_) + : best_solutions_(parameters_.solution_pool_size(), "best_solutions"), + alternative_path_(parameters_.alternative_pool_size(), + "alternative_path", /*source_id=*/0) {} + + const SharedSolutionRepository& BestSolutions() const { + return best_solutions_; + } + + // Note that the given random generator is likely local to the thread calling + // this. + std::shared_ptr::Solution> + GetSolutionToImprove(absl::BitGenRef random) const { + // If we seems to have trouble making progress, work on the alternative + // path too. + if (alternative_path_.num_solutions_to_keep() > 0 && + best_solutions_.NumRecentlyNonImproving() > 100 && + absl::Bernoulli(random, 0.5) && alternative_path_.NumSolutions() > 0) { + // Tricky: We might clear the alternative_path_ between NumSolutions() + // and this call. + auto result = alternative_path_.GetRandomBiasedSolution(random); + if (result != nullptr) return result; + } + + if (best_solutions_.NumSolutions() > 0) { + return best_solutions_.GetRandomBiasedSolution(random); + } + return nullptr; + } + + std::shared_ptr::Solution> Add( + SharedSolutionRepository::Solution solution); + + void Synchronize(absl::BitGenRef random); + + void AddTableStats(std::vector>* table) const { + table->push_back(best_solutions_.TableLineStats()); + table->push_back(alternative_path_.TableLineStats()); + } + + private: + // Currently we only have two "pools" of solutions. + SharedSolutionRepository best_solutions_; + SharedSolutionRepository alternative_path_; + + // We also keep a list of possible "path seeds" in n buckets defined according + // to the objective value of the solution. These are updated on Synchronize(). + // Bucket i will only contain the last seen solution in the internal objective + // range [ranks_[i], ranks_[i + 1]). + // + // ranks_[0] should always be min_rank_, and seeds_[0] should be one of the + // best known solution. We usually never select seeds_[0] but keep it around + // for later in case new best solutions are found. + absl::Mutex mutex_; + int64_t max_rank_ ABSL_GUARDED_BY(mutex_) = + std::numeric_limits::min(); + int64_t min_rank_ ABSL_GUARDED_BY(mutex_) = + std::numeric_limits::max(); + std::vector ranks_; + std::vector< + std::shared_ptr::Solution>> + ABSL_GUARDED_BY(mutex_) seeds_; +}; + // Set of best solution from the feasibility jump workers. // // We store (solution, num_violated_constraints), so we have a list of solutions @@ -316,6 +424,13 @@ class SharedResponseManager { void Synchronize(); IntegerValue GetInnerObjectiveLowerBound(); IntegerValue GetInnerObjectiveUpperBound(); + IntegerValue GetBestSolutionObjective() { + if (solution_pool_.BestSolutions().NumSolutions() > 0) { + return solution_pool_.BestSolutions().GetBestRank(); + } else { + return GetInnerObjectiveUpperBound(); + } + } // Returns the current best solution inner objective value or kInt64Max if // there is no solution. @@ -361,7 +476,8 @@ class SharedResponseManager { // stored in the repository. std::shared_ptr::Solution> NewSolution(absl::Span solution_values, - const std::string& solution_info, Model* model = nullptr); + const std::string& solution_info, Model* model = nullptr, + int source_id = -1); // Changes the solution to reflect the fact that the "improving" problem is // infeasible. This means that if we have a solution, we have proven @@ -380,14 +496,13 @@ class SharedResponseManager { // OPTIMAL and consider the problem solved. bool ProblemIsSolved() const; + bool HasFeasibleSolution() const { + return solution_pool_.BestSolutions().NumSolutions() > 0; + } + // Returns the underlying solution repository where we keep a set of best // solutions. - const SharedSolutionRepository& SolutionsRepository() const { - return solutions_; - } - SharedSolutionRepository* MutableSolutionsRepository() { - return &solutions_; - } + const SharedSolutionPool& SolutionPool() const { return solution_pool_; } // Debug only. Set dump prefix for solutions written to file. void set_dump_prefix(absl::string_view dump_prefix) { @@ -433,11 +548,12 @@ class SharedResponseManager { // Generates a response for callbacks and GetResponse(). CpSolverResponse GetResponseInternal( absl::Span variable_values, - const std::string& solution_info) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + absl::string_view solution_info) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); const SatParameters& parameters_; const WallTimer& wall_timer_; ModelSharedTimeLimit* shared_time_limit_; + ModelRandomGenerator* random_; CpObjectiveProto const* objective_or_null_ = nullptr; mutable absl::Mutex mutex_; @@ -450,7 +566,7 @@ class SharedResponseManager { CpSolverStatus synchronized_best_status_ ABSL_GUARDED_BY(mutex_) = CpSolverStatus::UNKNOWN; std::vector unsat_cores_ ABSL_GUARDED_BY(mutex_); - SharedSolutionRepository solutions_; // Thread-safe. + SharedSolutionPool solution_pool_; // Thread-safe. int num_solutions_ ABSL_GUARDED_BY(mutex_) = 0; int64_t inner_objective_lower_bound_ ABSL_GUARDED_BY(mutex_) = @@ -807,6 +923,7 @@ template std::shared_ptr::Solution> SharedSolutionRepository::GetSolution(int i) const { absl::MutexLock mutex_lock(&mutex_); + if (i >= solutions_.size()) return nullptr; ++num_queried_; return solutions_[i]; } @@ -814,7 +931,7 @@ SharedSolutionRepository::GetSolution(int i) const { template int64_t SharedSolutionRepository::GetBestRank() const { absl::MutexLock mutex_lock(&mutex_); - CHECK_GT(solutions_.size(), 0); + if (solutions_.empty()) return std::numeric_limits::max(); return solutions_[0]->rank; } @@ -823,11 +940,12 @@ std::vector::Solution>> SharedSolutionRepository::GetBestNSolutions(int n) const { absl::MutexLock mutex_lock(&mutex_); - // Sorted and unique. - DCHECK(absl::c_is_sorted( - solutions_, - [](const std::shared_ptr& a, - const std::shared_ptr& b) { return *a < *b; })); + // Sorted by rank and unique. + DCHECK(absl::c_is_sorted(solutions_, + [](const std::shared_ptr& a, + const std::shared_ptr& b) { + return a->rank < b->rank; + })); DCHECK(absl::c_adjacent_find(solutions_, [](const std::shared_ptr& a, const std::shared_ptr& b) { @@ -855,34 +973,41 @@ std::shared_ptr::Solution> SharedSolutionRepository::GetRandomBiasedSolution( absl::BitGenRef random) const { absl::MutexLock mutex_lock(&mutex_); + if (solutions_.empty()) return nullptr; ++num_queried_; - const int64_t best_rank = solutions_[0]->rank; + int index = 0; - // As long as we have solution with the best objective that haven't been - // explored too much, we select one uniformly. Otherwise, we select a solution - // from the pool uniformly. - // - // Note(user): Because of the increase of num_selected, this is dependent on - // the order of call. It should be fine for "determinism" because we do - // generate the task of a batch always in the same order. - const int kExplorationThreshold = 100; + if (solutions_.size() > 1) { + const int64_t best_rank = solutions_[0]->rank; - // Select all the best solution with a low enough selection count. - tmp_indices_.clear(); - for (int i = 0; i < solutions_.size(); ++i) { - std::shared_ptr solution = solutions_[i]; - if (solution->rank == best_rank && - solution->num_selected <= kExplorationThreshold) { - tmp_indices_.push_back(i); + // As long as we have solution with the best objective that haven't been + // explored too much, we select one uniformly. Otherwise, we select a + // solution from the pool uniformly. + // + // Note(user): Because of the increase of num_selected, this is dependent on + // the order of call. It should be fine for "determinism" because we do + // generate the task of a batch always in the same order. + const int kExplorationThreshold = 100; + + // Select all the best solution with a low enough selection count. + tmp_indices_.clear(); + for (int i = 0; i < solutions_.size(); ++i) { + std::shared_ptr solution = solutions_[i]; + if (solution->rank == best_rank && + solution->num_selected <= kExplorationThreshold) { + tmp_indices_.push_back(i); + } + } + + if (tmp_indices_.empty()) { + index = absl::Uniform(random, 0, solutions_.size()); + } else { + index = tmp_indices_[absl::Uniform(random, 0, tmp_indices_.size())]; } } - int index = 0; - if (tmp_indices_.empty()) { - index = absl::Uniform(random, 0, solutions_.size()); - } else { - index = tmp_indices_[absl::Uniform(random, 0, tmp_indices_.size())]; - } + CHECK_GE(index, 0); + CHECK_LT(index, solutions_.size()); solutions_[index]->num_selected++; return solutions_[index]; } @@ -896,38 +1021,147 @@ SharedSolutionRepository::Add(Solution solution) { { absl::MutexLock mutex_lock(&mutex_); ++num_added_; + solution_ptr->source_id = source_id_; new_solutions_.push_back(solution_ptr); } return solution_ptr; } template -void SharedSolutionRepository::Synchronize() { +void SharedSolutionRepository::Synchronize( + std::function f) { absl::MutexLock mutex_lock(&mutex_); - if (new_solutions_.empty()) return; + if (new_solutions_.empty()) { + const int64_t diff = num_queried_ - num_queried_at_last_sync_; + num_non_improving_ += diff; + num_queried_at_last_sync_ = num_queried_; + return; + } + + if (f != nullptr) { + gtl::STLStableSortAndRemoveDuplicates( + &new_solutions_, + [](const std::shared_ptr& a, + const std::shared_ptr& b) { return *a < *b; }); + for (const auto& ptr : new_solutions_) { + f(*ptr); + } + } + + const int64_t old_best_rank = solutions_.empty() + ? std::numeric_limits::max() + : solutions_[0]->rank; solutions_.insert(solutions_.end(), new_solutions_.begin(), new_solutions_.end()); new_solutions_.clear(); // We use a stable sort to keep the num_selected count for the already - // existing solutions. - // - // TODO(user): Introduce a notion of orthogonality to diversify the pool? + // existing solutions (in case of duplicates). gtl::STLStableSortAndRemoveDuplicates( &solutions_, [](const std::shared_ptr& a, const std::shared_ptr& b) { return *a < *b; }); + const int64_t new_best_rank = solutions_[0]->rank; + + // If we have more than num_solutions_to_keep_ solutions with the best rank, + // select them via orthogonality. + if (solutions_.size() > num_solutions_to_keep_ && + num_solutions_to_keep_ > 1) { + int num_best = 1; + while (num_best < solutions_.size() && + solutions_[num_best]->rank == new_best_rank) { + ++num_best; + } + + if (num_best > num_solutions_to_keep_ && num_solutions_to_keep_ < 10) { + // We should only be here if a new solution (not in our current set) was + // found. It could be one we saw before but forgot about. We put one + // first. + for (auto& solution : solutions_) { + if (solution->num_selected == 0) { + // TODO(user): randomize amongst new solution? + std::swap(solutions_[0], solution); + break; + } + } + + // We are going to be in O(n^2 * solution_size), so keep n <= 10. + solutions_.resize(std::min(10, num_best)); + + // Fill the pairwise distances. + const int n = solutions_.size(); + distances_.resize(n * n); + const int size = solutions_[0]->variable_values.size(); + for (int i = 0; i < n; ++i) { + for (int j = i + 1; j < n; ++j) { + int64_t dist = 0; + for (int k = 0; k < size; ++k) { + if (solutions_[i]->variable_values[k] != + solutions_[j]->variable_values[k]) { + ++dist; + } + } + distances_[i * n + j] = distances_[j * n + i] = dist; + } + } + + // In order to not get stuck on a subset that always maximize the sum of + // orthogonality, we pick the first element (which should be a new one + // thanks to the swap above), and we maximize the sum of orthogonality + // with the rest. + // + // This way, as we find new solution, the set changes slowly. + const std::vector selected = + FindMostDiverseSubset(num_solutions_to_keep_, n, distances_, buffer_, + /*always_pick_mask = */ 1); + + DCHECK(std::is_sorted(selected.begin(), selected.end())); + int new_size = 0; + for (const int s : selected) { + solutions_[new_size++] = std::move(solutions_[s]); + } + solutions_.resize(new_size); + + if (VLOG_IS_ON(3)) { + int min_count = std::numeric_limits::max(); + int max_count = 0; + for (const auto& s : solutions_) { + CHECK(s != nullptr); + min_count = std::min(s->num_selected, min_count); + max_count = std::max(s->num_selected, max_count); + } + int64_t score = 0; + for (const int i : selected) { + for (const int j : selected) { + if (i > j) score += distances_[i * n + j]; + } + } + LOG(INFO) << name_ << " rank=" << new_best_rank + << " num=" << num_solutions_to_keep_ << "/" << num_best + << " orthogonality=" << score << " count=[" << min_count + << ", " << max_count << "]"; + } + } + } + if (solutions_.size() > num_solutions_to_keep_) { solutions_.resize(num_solutions_to_keep_); } - + CHECK(!solutions_.empty()); if (!solutions_.empty()) { - VLOG(2) << "Solution pool update:" << " num_solutions=" << solutions_.size() + VLOG(4) << "Solution pool update:" << " num_solutions=" << solutions_.size() << " min_rank=" << solutions_[0]->rank << " max_rank=" << solutions_.back()->rank; } num_synchronization_++; + if (new_best_rank < old_best_rank) { + num_non_improving_ = 0; + } else { + const int64_t diff = num_queried_ - num_queried_at_last_sync_; + num_non_improving_ += diff; + } + num_queried_at_last_sync_ = num_queried_; } } // namespace sat diff --git a/ortools/sat/util.cc b/ortools/sat/util.cc index 9d047a4f6b..da811a56f0 100644 --- a/ortools/sat/util.cc +++ b/ortools/sat/util.cc @@ -14,6 +14,7 @@ #include "ortools/sat/util.h" #include +#include #include #include #include @@ -25,6 +26,7 @@ #include "absl/algorithm/container.h" #include "absl/container/btree_set.h" #include "absl/log/check.h" +#include "absl/numeric/bits.h" #include "absl/numeric/int128.h" #include "absl/random/bit_gen_ref.h" #include "absl/random/distributions.h" @@ -1008,5 +1010,48 @@ int64_t MaxBoundedSubsetSumExact::MaxSubsetSum( return result; } +std::vector FindMostDiverseSubset(int k, int n, + absl::Span distances, + std::vector& buffer, + int always_pick_mask) { + CHECK_LE(n, 20); + const int limit = 1 << n; + buffer.assign(limit, 0); + int best_mask; + int best_value = -1; + for (unsigned int mask = 1; mask < limit; ++mask) { + const int hamming_weight = absl::popcount(mask); + + // TODO(user): Increase mask by more than one ? but counting to 1k is fast + // anyway. + if (hamming_weight > k) continue; + int low_bit = -1; + int64_t sum = 0; + for (int i = 0; i < n; ++i) { + if ((mask >> i) & 1) { + if (low_bit == -1) { + low_bit = i; + } else { + sum += distances[low_bit * n + i]; + } + } + } + buffer[mask] = buffer[mask ^ (1 << low_bit)] + sum; + if (hamming_weight == k && buffer[mask] > best_value) { + if ((mask & always_pick_mask) != always_pick_mask) continue; + best_value = buffer[mask]; + best_mask = mask; + } + } + std::vector result; + result.reserve(k); + for (int i = 0; i < n; ++i) { + if ((best_mask >> i) & 1) { + result.push_back(i); + } + } + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/util.h b/ortools/sat/util.h index 88a5b927d3..3ec89ce7a9 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -391,6 +391,21 @@ int MoveOneUnprocessedLiteralLast( const absl::btree_set& processed, int relevant_prefix_size, std::vector* literals); +// Selects k out of n such that the sum of pairwise distances is maximal. +// distances[i * n + j] = distances[j * n + j] = distances between i and j. +// +// This shall only be called with small n, we CHECK_LE(n, 20). +// Complexity is in O(2 ^ n + n_choose_k * n). +// Memory is in O(2 ^ n). +// +// In case of tie, this will choose deterministically, so one can randomize the +// order first to get a random subset. The returned subset will always be +// sorted. +std::vector FindMostDiverseSubset(int k, int n, + absl::Span distances, + std::vector& buffer, + int always_pick_mask = 0); + // Simple DP to compute the maximum reachable value of a "subset sum" under // a given bound (inclusive). Note that we abort as soon as the computation // become too important. @@ -1005,7 +1020,6 @@ inline void CompactVectorVector::ResetFromTranspose( // // Note 2: adding an arc during an iteration is not supported and the behavior // is undefined. - class DagTopologicalSortIterator { public: DagTopologicalSortIterator() = default; diff --git a/ortools/sat/util_test.cc b/ortools/sat/util_test.cc index 1b2be2db49..fc6074dca2 100644 --- a/ortools/sat/util_test.cc +++ b/ortools/sat/util_test.cc @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -1160,6 +1161,94 @@ TEST(DagTopologicalSortIteratorTest, RandomTest) { } } +TEST(FindMostDiverseSubsetTest, Random) { + const int k = 4; + const int n = 10; + absl::BitGen random; + std::vector distances(n * n); + std::vector buffer; + for (int i = 0; i < n; ++i) { + for (int j = i + 1; j < n; ++j) { + distances[i * n + j] = distances[j * n + i] = + absl::Uniform(random, 0, 1000); + } + } + + const std::vector result = + FindMostDiverseSubset(k, n, distances, buffer); + CHECK(std::is_sorted(result.begin(), result.end())); + int64_t result_value = 0; + for (const int i : result) { + for (const int j : result) { + if (i < j) result_value += distances[i * n + j]; + } + } + + int64_t best_seen = 0; + std::vector subset; + const int limit = 1 << n; + for (unsigned int mask = 0; mask < limit; ++mask) { + if (absl::popcount(mask) != k) continue; + subset.clear(); + for (int i = 0; i < n; ++i) { + if ((mask >> i) & 1) subset.push_back(i); + } + int64_t value = 0; + for (const int i : subset) { + for (const int j : subset) { + if (i < j) value += distances[i * n + j]; + } + } + ASSERT_LE(value, result_value); + best_seen = std::max(best_seen, value); + } + EXPECT_EQ(best_seen, result_value); +} + +TEST(FindMostDiverseSubsetTest, RandomButAlwaysPickZero) { + const int k = 5; + const int n = 10; + absl::BitGen random; + std::vector distances(n * n); + std::vector buffer; + for (int i = 0; i < n; ++i) { + for (int j = i + 1; j < n; ++j) { + distances[i * n + j] = distances[j * n + i] = + absl::Uniform(random, 0, 1000); + } + } + + const std::vector result = + FindMostDiverseSubset(k, n, distances, buffer, /*always_pick_mask=*/1); + CHECK(std::is_sorted(result.begin(), result.end())); + int64_t result_value = 0; + for (const int i : result) { + for (const int j : result) { + if (i < j) result_value += distances[i * n + j]; + } + } + + int64_t best_seen = 0; + std::vector subset; + const int limit = 1 << n; + for (unsigned int mask = 1; mask < limit; mask += 2) { // bit 1 always set. + if (absl::popcount(mask) != k) continue; + subset.clear(); + for (int i = 0; i < n; ++i) { + if ((mask >> i) & 1) subset.push_back(i); + } + int64_t value = 0; + for (const int i : subset) { + for (const int j : subset) { + if (i < j) value += distances[i * n + j]; + } + } + ASSERT_LE(value, result_value); + best_seen = std::max(best_seen, value); + } + EXPECT_EQ(best_seen, result_value); +} + } // namespace } // namespace sat } // namespace operations_research From d3617d58b26c11ff723f3f3cf1c3e83fda3bb7bf Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 11 Jun 2025 12:44:21 +0200 Subject: [PATCH 069/509] proper fix --- ortools/sat/scheduling_cuts.cc | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 27c07aac51..65da5a23da 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1100,10 +1100,17 @@ std::string CompletionTimeEvent::DebugString() const { "]"); } + void CtExhaustiveHelper::Init( const absl::Span events, Model* model) { max_task_index_ = 0; - if (events.empty() || events.size() > 100) return; + if (events.empty()) return; + // We compute the max_task_index_ from the events early to avoid sorting + // the events if there are too many of them. + for (const auto& event : events) { + max_task_index_ = std::max(max_task_index_, event.task_index); + } + if (events.size() > 100) return; ReifiedLinear2Bounds* binary_relations = model->GetOrCreate(); @@ -1113,10 +1120,9 @@ void CtExhaustiveHelper::Init( [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) { return a.task_index < b.task_index; }); - max_task_index_ = sorted_events.back().task_index; predecessors_.reserve(max_task_index_ + 1); - for (const auto& e1 : events) { - for (const auto& e2 : events) { + for (const auto& e1 : sorted_events) { + for (const auto& e2 : sorted_events) { if (e2.task_index == e1.task_index) continue; if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) == RelationStatus::IS_TRUE) { From 9956831a04855c32c642c917155864a1ca23ce42 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 13:21:25 +0200 Subject: [PATCH 070/509] cmake: Fix README.md --- cmake/README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cmake/README.md b/cmake/README.md index a233fb8142..efb3a66088 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -193,10 +193,10 @@ CMake Option | Default Value | Note `BUILD_DOTNET` | OFF | Build .Net wrapper and packages `BUILD_JAVA` | OFF | Build Java wrapper and packages `BUILD_PYTHON` | OFF | Build Python wrapper and package - | | +| `BUILD_FLATZINC` | ON\* | Build the flatzinc library
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLOP` | OFF\* | Build the standalone Glop library
**Forced** to OFF if `BUILD_CXX=ON`, otherwise default to ON - | | +| **Dependencies** `BUILD_DEPS` | OFF* | Default to ON if `BUILD_JAVA=ON` or `BUILD_PYTHON=ON` or `BUILD_DOTNET=ON` `BUILD_ZLIB` | OFF* | Build the zlib dynamic library
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_BZip2` | OFF* | Build the bzip2 dynamic library
**Forced** to ON if `BUILD_DEPS=ON` @@ -204,44 +204,44 @@ CMake Option | Default Value | Note `BUILD_Protobuf` | OFF* | Build the protobuf dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_re2` | OFF* | Build the re2 dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_Eigen3` | OFF* | Build the Eigen3 libraries
**Forced** to ON if `BUILD_DEPS=ON` - | | +| Coin-OR `USE_COINOR` | ON\* | Enable Coin-OR support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_CoinUtils` | OFF\* | Build the CoinUtils dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Osi` | OFF\* | Build the Osi dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Clp` | OFF\* | Build the Clp dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cgl` | OFF\* | Build the Cgl dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cbc` | OFF\* | Build the Cbc dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` - | | +| GLPK `USE_GLPK` | OFF\* | Enable GLPK support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLPK` | OFF\* | Build the GLPK dynamic libraries
**Forced** to ON if `USE_GLPK=ON` **and** `BUILD_DEPS=ON` - | | +| HiGHS `USE_HIGHS` | ON\* | Enable HIGHS support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_HIGHS` | OFF\* | Build the HiGHS dynamic libraries
**Forced** to ON if `USE_HIGHS=ON` **and** `BUILD_DEPS=ON` - | | +| SCIP `USE_SCIP` | ON\* | Enable SCIP support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_SCIP` | OFF\* | Build the SCIP dynamic libraries
**Forced** to ON if `USE_SCIP=ON` **and** `BUILD_DEPS=ON` - | | +| CPLEX `USE_CPLEX` | OFF | Enable CPLEX support - | | +| **Documentation** `BUILD_DOC` | OFF\* | Build all documentations `BUILD_CXX_DOC` | OFF\* | Build C++ documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_DOTNET_DOC` | OFF\* | Build .Net documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_JAVA_DOC` | OFF\* | Build Java documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_PYTHON_DOC` | OFF\* | Build Python documentation
**Forced** to ON if `BUILD_DOC=ON` `INSTALL_DOC` | OFF\* | Install all documentations
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_DOC=OFF` - | | +| **Samples** `BUILD_SAMPLES` | ON\* | Build all samples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_SAMPLES` | ON\* | Build all C++ samples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_SAMPLES` | ON\* | Build all .Net samples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_SAMPLES` | ON\* | Build all Java samples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_SAMPLES` | ON\* | Build all Python samples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` - | | +| **Examples** `BUILD_EXAMPLES` | ON\* | Build all examples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_EXAMPLES` | ON\* | Build all C++ examples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_EXAMPLES` | ON\* | Build all .Net examples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_EXAMPLES` | ON\* | Build all Java examples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_EXAMPLES` | ON\* | Build all Python examples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` - | | +| **.Net** `USE_DOTNET_46` | OFF | Enable .Net Framework 4.6 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_461` | OFF | Enable .Net Framework 4.6.1 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_462` | OFF | Enable .Net Framework 4.6.2 support
Only available if `BUILD_DOTNET=ON` @@ -253,11 +253,11 @@ CMake Option | Default Value | Note `USE_DOTNET_8` | ON | Enable .Net 8 LTS support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_9` | OFF | Enable .Net 9 support
Only available if `BUILD_DOTNET=ON` `UNIVERSAL_DOTNET_PACKAGE` | OFF | Build a multi platform package (i.e. `Google.OrTools` will depends on all runtime packages)
Only available if `BUILD_DOTNET=ON` - | | +| **Java** `SKIP_GPG` | ON | Disable GPG sign
Only available if `BUILD_JAVA=ON` `UNIVERSAL_JAVA_PACKAGE` | OFF | Build a multi platform package (i.e. `ortools-java` will depends on all native packages)
Only available if `BUILD_JAVA=ON` `BUILD_FAT_JAR` | OFF | Build a `ortools-java` .jar that includes all of its own Maven dependencies, including the native package
Only available if `BUILD_JAVA=ON` - | | +| **Python** `BUILD_pybind11` | `BUILD_DEPS` | Static build the pybind11 libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_abseil` | `BUILD_DEPS` | Static build the pybind11_abseil libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_protobuf` | `BUILD_DEPS` | Static build the pybind11_protobuf libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` @@ -265,7 +265,7 @@ CMake Option | Default Value | Note `BUILD_VENV` | `BUILD_TESTING` | Create python venv in `BINARY_DIR/python/venv`
**Forced** to ON if `BUILD_TESTING=ON`
Only available if `BUILD_PYTHON=ON` `VENV_USE_SYSTEM_SITE_PACKAGES` | OFF | Python venv can use system site package (e.g. `py3-numpy` on Alpine)
Only available if `BUILD_PYTHON=ON` and `BUILD_VENV=ON` `FETCH_PYTHON_DEPS` | `BUILD_DEPS` | Fetch python modules needed to build ortools package
Only available if `BUILD_PYTHON=ON` - | | +| ## Integrating OR-Tools in your CMake Project From 8b82db18497ed3b5144b2bf544968c220535caa4 Mon Sep 17 00:00:00 2001 From: Mizux Date: Wed, 11 Jun 2025 13:24:56 +0200 Subject: [PATCH 071/509] Update README.md cmake: Fix README.md for GFM note: code markdown preview do not have the same behaviour... --- cmake/README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/cmake/README.md b/cmake/README.md index efb3a66088..816f67d067 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -193,10 +193,10 @@ CMake Option | Default Value | Note `BUILD_DOTNET` | OFF | Build .Net wrapper and packages `BUILD_JAVA` | OFF | Build Java wrapper and packages `BUILD_PYTHON` | OFF | Build Python wrapper and package -| +| | `BUILD_FLATZINC` | ON\* | Build the flatzinc library
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLOP` | OFF\* | Build the standalone Glop library
**Forced** to OFF if `BUILD_CXX=ON`, otherwise default to ON -| **Dependencies** +| **Dependencies** | `BUILD_DEPS` | OFF* | Default to ON if `BUILD_JAVA=ON` or `BUILD_PYTHON=ON` or `BUILD_DOTNET=ON` `BUILD_ZLIB` | OFF* | Build the zlib dynamic library
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_BZip2` | OFF* | Build the bzip2 dynamic library
**Forced** to ON if `BUILD_DEPS=ON` @@ -204,44 +204,44 @@ CMake Option | Default Value | Note `BUILD_Protobuf` | OFF* | Build the protobuf dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_re2` | OFF* | Build the re2 dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_Eigen3` | OFF* | Build the Eigen3 libraries
**Forced** to ON if `BUILD_DEPS=ON` -| Coin-OR +| Coin-OR | `USE_COINOR` | ON\* | Enable Coin-OR support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_CoinUtils` | OFF\* | Build the CoinUtils dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Osi` | OFF\* | Build the Osi dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Clp` | OFF\* | Build the Clp dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cgl` | OFF\* | Build the Cgl dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cbc` | OFF\* | Build the Cbc dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` -| GLPK +| GLPK | `USE_GLPK` | OFF\* | Enable GLPK support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLPK` | OFF\* | Build the GLPK dynamic libraries
**Forced** to ON if `USE_GLPK=ON` **and** `BUILD_DEPS=ON` -| HiGHS +| HiGHS | `USE_HIGHS` | ON\* | Enable HIGHS support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_HIGHS` | OFF\* | Build the HiGHS dynamic libraries
**Forced** to ON if `USE_HIGHS=ON` **and** `BUILD_DEPS=ON` -| SCIP +| SCIP | `USE_SCIP` | ON\* | Enable SCIP support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_SCIP` | OFF\* | Build the SCIP dynamic libraries
**Forced** to ON if `USE_SCIP=ON` **and** `BUILD_DEPS=ON` | CPLEX `USE_CPLEX` | OFF | Enable CPLEX support -| **Documentation** +| **Documentation** | `BUILD_DOC` | OFF\* | Build all documentations `BUILD_CXX_DOC` | OFF\* | Build C++ documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_DOTNET_DOC` | OFF\* | Build .Net documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_JAVA_DOC` | OFF\* | Build Java documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_PYTHON_DOC` | OFF\* | Build Python documentation
**Forced** to ON if `BUILD_DOC=ON` `INSTALL_DOC` | OFF\* | Install all documentations
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_DOC=OFF` -| **Samples** +| **Samples** | `BUILD_SAMPLES` | ON\* | Build all samples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_SAMPLES` | ON\* | Build all C++ samples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_SAMPLES` | ON\* | Build all .Net samples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_SAMPLES` | ON\* | Build all Java samples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_SAMPLES` | ON\* | Build all Python samples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` -| **Examples** +| **Examples** | `BUILD_EXAMPLES` | ON\* | Build all examples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_EXAMPLES` | ON\* | Build all C++ examples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_EXAMPLES` | ON\* | Build all .Net examples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_EXAMPLES` | ON\* | Build all Java examples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_EXAMPLES` | ON\* | Build all Python examples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` -| **.Net** +| **.Net** | `USE_DOTNET_46` | OFF | Enable .Net Framework 4.6 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_461` | OFF | Enable .Net Framework 4.6.1 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_462` | OFF | Enable .Net Framework 4.6.2 support
Only available if `BUILD_DOTNET=ON` @@ -253,11 +253,11 @@ CMake Option | Default Value | Note `USE_DOTNET_8` | ON | Enable .Net 8 LTS support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_9` | OFF | Enable .Net 9 support
Only available if `BUILD_DOTNET=ON` `UNIVERSAL_DOTNET_PACKAGE` | OFF | Build a multi platform package (i.e. `Google.OrTools` will depends on all runtime packages)
Only available if `BUILD_DOTNET=ON` -| **Java** +| **Java** | `SKIP_GPG` | ON | Disable GPG sign
Only available if `BUILD_JAVA=ON` `UNIVERSAL_JAVA_PACKAGE` | OFF | Build a multi platform package (i.e. `ortools-java` will depends on all native packages)
Only available if `BUILD_JAVA=ON` `BUILD_FAT_JAR` | OFF | Build a `ortools-java` .jar that includes all of its own Maven dependencies, including the native package
Only available if `BUILD_JAVA=ON` -| **Python** +| **Python** | `BUILD_pybind11` | `BUILD_DEPS` | Static build the pybind11 libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_abseil` | `BUILD_DEPS` | Static build the pybind11_abseil libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_protobuf` | `BUILD_DEPS` | Static build the pybind11_protobuf libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` @@ -265,7 +265,7 @@ CMake Option | Default Value | Note `BUILD_VENV` | `BUILD_TESTING` | Create python venv in `BINARY_DIR/python/venv`
**Forced** to ON if `BUILD_TESTING=ON`
Only available if `BUILD_PYTHON=ON` `VENV_USE_SYSTEM_SITE_PACKAGES` | OFF | Python venv can use system site package (e.g. `py3-numpy` on Alpine)
Only available if `BUILD_PYTHON=ON` and `BUILD_VENV=ON` `FETCH_PYTHON_DEPS` | `BUILD_DEPS` | Fetch python modules needed to build ortools package
Only available if `BUILD_PYTHON=ON` -| +| | ## Integrating OR-Tools in your CMake Project From f8f4651d7682604f9ccd1bb7847bf6e8287999dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Tue, 10 Jun 2025 14:53:23 +0200 Subject: [PATCH 072/509] cmake: Add support for custom protoc executable via OR_TOOLS_PROTOC_EXECUTABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow users to specify a custom protoc executable by setting the OR_TOOLS_PROTOC_EXECUTABLE variable, which takes precedence over the default cross-compilation and system protoc detection logic. Signed-off-by: Clément Péron --- cmake/host.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/host.cmake b/cmake/host.cmake index fe303362fe..f95949cb90 100644 --- a/cmake/host.cmake +++ b/cmake/host.cmake @@ -11,6 +11,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +if (OR_TOOLS_PROTOC_EXECUTABLE) + set(PROTOC_PRG ${OR_TOOLS_PROTOC_EXECUTABLE}) + return() +endif() + if(NOT CMAKE_CROSSCOMPILING) set(PROTOC_PRG protobuf::protoc) return() From e25617bac8d40ee78550cdd0002fb1b6e87d39cd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 11 Jun 2025 15:03:14 +0200 Subject: [PATCH 073/509] reindent; span --- ortools/algorithms/BUILD.bazel | 1 + ortools/algorithms/knapsack_solver_test.cc | 9 +++++---- ortools/algorithms/samples/knapsack.py | 1 + ortools/algorithms/samples/simple_knapsack_program.py | 1 + 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index 4f4def32e7..ed81524193 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -240,6 +240,7 @@ cc_test( "//ortools/base:gmock_main", "//ortools/util:time_limit", "@abseil-cpp//absl/base:core_headers", + "@abseil-cpp//absl/types:span", ], ) diff --git a/ortools/algorithms/knapsack_solver_test.cc b/ortools/algorithms/knapsack_solver_test.cc index 1589f20e43..2c7572aead 100644 --- a/ortools/algorithms/knapsack_solver_test.cc +++ b/ortools/algorithms/knapsack_solver_test.cc @@ -18,6 +18,7 @@ #include #include "absl/base/macros.h" +#include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/util/time_limit.h" @@ -26,8 +27,8 @@ namespace { const int kInvalidSolution = -1; -bool IsSolutionValid(const std::vector& profits, - const std::vector >& weights, +bool IsSolutionValid(absl::Span profits, + absl::Span> weights, const std::vector& capacities, const std::vector& best_solution, int64_t optimal_profit) { @@ -59,7 +60,7 @@ int64_t SolveKnapsackProblemUsingSpecificSolverAndReduction( std::vector profits(profit_array, profit_array + number_of_items); std::vector capacities(capacity_array, capacity_array + number_of_dimensions); - std::vector > weights; + std::vector> weights; for (int i = 0; i < number_of_dimensions; ++i) { const int64_t* one_dimension = weight_array + number_of_items * i; std::vector weights_one_dimension(one_dimension, @@ -484,7 +485,7 @@ TEST(KnapsackSolverTest, SolveTwoDimensionsSettingPrimaryPropagator) { std::vector profits(kProfitArray, kProfitArray + kArraySize); std::vector capacities(kCapacityArray, kCapacityArray + kNumberOfDimensions); - std::vector > weights; + std::vector> weights; for (int i = 0; i < kNumberOfDimensions; ++i) { const int64_t* one_dimension = kWeightArray + kArraySize * i; std::vector weights_one_dimension(one_dimension, diff --git a/ortools/algorithms/samples/knapsack.py b/ortools/algorithms/samples/knapsack.py index eb63388c4e..d0e92e2631 100644 --- a/ortools/algorithms/samples/knapsack.py +++ b/ortools/algorithms/samples/knapsack.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.algorithms.python import knapsack_solver + # [END import] diff --git a/ortools/algorithms/samples/simple_knapsack_program.py b/ortools/algorithms/samples/simple_knapsack_program.py index 03ab5729d3..666a399796 100644 --- a/ortools/algorithms/samples/simple_knapsack_program.py +++ b/ortools/algorithms/samples/simple_knapsack_program.py @@ -16,6 +16,7 @@ """A simple knapsack problem.""" # [START import] from ortools.algorithms.python import knapsack_solver + # [END import] From 9ca57ad2cd8d7a2489af041dbbf984ead4d0715d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 11 Jun 2025 15:03:52 +0200 Subject: [PATCH 074/509] [CP-SAT] more precedence handling; minor python improvements --- ortools/sat/BUILD.bazel | 3 + ortools/sat/constraint_violation.cc | 2 +- ortools/sat/constraint_violation.h | 2 +- ortools/sat/cp_model_presolve.cc | 2 +- ortools/sat/cp_model_solver_helpers.cc | 26 +-- ortools/sat/cp_model_solver_test.cc | 2 +- ortools/sat/disjunctive.cc | 14 +- ortools/sat/flaky_models_test.cc | 2 +- ortools/sat/integer_base.cc | 25 +- ortools/sat/integer_base.h | 19 +- ortools/sat/precedences.cc | 152 +++++++------ ortools/sat/precedences.h | 57 +++-- ortools/sat/precedences_test.cc | 251 +++++++++++++-------- ortools/sat/python/cp_model_helper.cc | 16 +- ortools/sat/python/cp_model_helper_test.py | 3 + ortools/sat/python/cp_model_test.py | 12 + ortools/sat/routing_cuts.cc | 114 ++++++---- ortools/sat/routing_cuts_test.cc | 227 +++++++++++-------- ortools/sat/scheduling_cuts.cc | 15 +- ortools/sat/scheduling_cuts.h | 3 + ortools/sat/scheduling_cuts_test.cc | 2 +- ortools/sat/scheduling_helpers.cc | 79 +++---- ortools/sat/scheduling_helpers.h | 16 +- ortools/sat/util.cc | 1 - ortools/sat/util_test.cc | 2 +- 25 files changed, 630 insertions(+), 417 deletions(-) diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index b7de5d2357..f76d43b09e 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -2081,6 +2081,7 @@ cc_library( "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:vlog_is_on", + "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/types:span", ], ) @@ -2104,6 +2105,7 @@ cc_test( "//ortools/base:parse_test_proto", "//ortools/util:sorted_interval_list", "@abseil-cpp//absl/container:flat_hash_map", + "@abseil-cpp//absl/types:span", ], ) @@ -3203,6 +3205,7 @@ cc_test( "@abseil-cpp//absl/container:btree", "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/numeric:bits", "@abseil-cpp//absl/numeric:int128", "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", diff --git a/ortools/sat/constraint_violation.cc b/ortools/sat/constraint_violation.cc index 0cd5f80d10..4f41c10499 100644 --- a/ortools/sat/constraint_violation.cc +++ b/ortools/sat/constraint_violation.cc @@ -1830,7 +1830,7 @@ void LsEvaluator::CompileOneConstraint(const ConstraintProto& ct) { void LsEvaluator::CompileConstraintsAndObjective( const std::vector& ignored_constraints, - const std::vector& additional_constraints) { + absl::Span additional_constraints) { constraints_.clear(); // The first compiled constraint is always the objective if present. diff --git a/ortools/sat/constraint_violation.h b/ortools/sat/constraint_violation.h index 54880e9f5a..768fde9514 100644 --- a/ortools/sat/constraint_violation.h +++ b/ortools/sat/constraint_violation.h @@ -434,7 +434,7 @@ class LsEvaluator { private: void CompileConstraintsAndObjective( const std::vector& ignored_constraints, - const std::vector& additional_constraints); + absl::Span additional_constraints); void CompileOneConstraint(const ConstraintProto& ct_proto); void BuildVarConstraintGraph(); diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 9cfb4f05be..589cba2d38 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -7887,7 +7887,7 @@ void CpModelPresolver::Probe() { context_->params().probing_deterministic_time_limit()); for (const auto& [expr, ub] : model.GetOrCreate() - ->GetSortedNonTrivialBounds()) { + ->GetSortedNonTrivialUpperBounds()) { if (expr.vars[0] == kNoIntegerVariable || expr.vars[1] == kNoIntegerVariable) { continue; diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index b2016a01d5..1be370ab37 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -1095,13 +1095,15 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, // var1_min <= var1 - delta.var2 <= var1_max, which is equivalent to // the default bounds if var2 = 0, and gives implied_lb <= var1 <= // var1_max + delta otherwise. - repository->Add(enforcement_literal, {var1, 1}, {var2, -delta}, + repository->Add(enforcement_literal, + LinearExpression2(var1, var2, 1, -delta), var1_domain.Min(), var1_domain.Max()); } else if (negated_var2 != kNoIntegerVariable) { // var1_min + delta <= var1 + delta.neg_var2 <= var1_max + delta, // which is equivalent to the default bounds if neg_var2 = 1, and // gives implied_lb <= var1 <= var1_max + delta otherwise. - repository->Add(enforcement_literal, {var1, 1}, {negated_var2, delta}, + repository->Add(enforcement_literal, + LinearExpression2(var1, negated_var2, 1, delta), var1_domain.Min() + delta, var1_domain.Max() + delta); } }; @@ -1137,27 +1139,23 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, if (ct.enforcement_literal().empty()) { if (vars.size() == 2) { - repository->Add(Literal(kNoLiteralIndex), {vars[0], coeffs[0]}, - {vars[1], coeffs[1]}, rhs_min, rhs_max); - - LinearExpression2 expr; - expr.vars[0] = vars[0]; - expr.vars[1] = vars[1]; - expr.coeffs[0] = coeffs[0]; - expr.coeffs[1] = coeffs[1]; + const LinearExpression2 expr(vars[0], vars[1], coeffs[0], coeffs[1]); root_level_lin2_bounds->Add(expr, rhs_min, rhs_max); } } else { const Literal lit = mapping->Literal(ct.enforcement_literal(0)); if (vars.size() == 1) { - repository->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); + repository->Add( + lit, LinearExpression2(vars[0], kNoIntegerVariable, coeffs[0], 0), + rhs_min, rhs_max); } else if (vars.size() == 2) { - repository->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, - rhs_min, rhs_max); + repository->Add( + lit, LinearExpression2(vars[0], vars[1], coeffs[0], coeffs[1]), + rhs_min, rhs_max); } } } - repository->Build(); + repository->Build(root_level_lin2_bounds); } } // namespace diff --git a/ortools/sat/cp_model_solver_test.cc b/ortools/sat/cp_model_solver_test.cc index d48d8ef4d0..e3d719b400 100644 --- a/ortools/sat/cp_model_solver_test.cc +++ b/ortools/sat/cp_model_solver_test.cc @@ -109,7 +109,7 @@ TEST(StopAfterFirstSolutionTest, BooleanLinearOptimizationProblem) { Model model; SatParameters params; - params.set_num_search_workers(8); + params.set_num_workers(8); params.set_stop_after_first_solution(true); int num_solutions = 0; diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index 3098a21a98..d92ae5ac59 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -750,6 +750,7 @@ bool DisjunctiveSimplePrecedences::Propagate() { bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) { const int t_before = before.task_index; + DCHECK_NE(t_before, t); helper_->ClearReason(); helper_->AddPresenceReason(t_before); @@ -758,6 +759,10 @@ bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) { if (!helper_->IncreaseStartMin(t, before.time)) { return false; } + if (helper_->CurrentDecisionLevel() == 0 && helper_->IsPresent(t_before) && + helper_->IsPresent(t)) { + if (!helper_->NotifyLevelZeroPrecedence(t_before, t)) return false; + } ++stats_.num_propagations; return true; } @@ -969,7 +974,7 @@ bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min, // Process detected precedence. if (helper_->CurrentDecisionLevel() == 0 && helper_->IsPresent(t)) { for (int i = critical_index; i < sorted_tasks.size(); ++i) { - if (!helper_->PropagatePrecedence(sorted_tasks[i].task, t)) { + if (!helper_->NotifyLevelZeroPrecedence(sorted_tasks[i].task, t)) { return false; } } @@ -1259,8 +1264,9 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // the offset as much as possible. Note that the alternative of storing it // in PrecedenceData is not necessarily better and harder to update as we // dive/backtrack. - const IntegerValue inner_offset = -linear2_bounds_->UpperBound( - LinearExpression2::Difference(end_exp.var, var)); + const IntegerValue inner_offset = + -linear2_bounds_->NonTrivialUpperBoundForGcd1( + LinearExpression2::Difference(end_exp.var, var)); DCHECK_NE(inner_offset, kMinIntegerValue); // We have var >= end_exp.var + inner_offset, so @@ -1788,7 +1794,7 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { for (int i = first_event; i < window_size; ++i) { const int task = window_[i].task_index; if (!is_gray_[task]) { - if (!helper_->PropagatePrecedence(task, gray_task)) { + if (!helper_->NotifyLevelZeroPrecedence(task, gray_task)) { return false; } } diff --git a/ortools/sat/flaky_models_test.cc b/ortools/sat/flaky_models_test.cc index c233ab9070..e00ebee981 100644 --- a/ortools/sat/flaky_models_test.cc +++ b/ortools/sat/flaky_models_test.cc @@ -90,7 +90,7 @@ TEST(FlakyTest, Issue3108) { SatParameters parameters; parameters.set_log_search_progress(true); parameters.set_cp_model_probing_level(0); - parameters.set_num_search_workers(1); + parameters.set_num_workers(1); const CpSolverResponse response = SolveWithParameters(model_proto, parameters); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index 7c04db31d4..d514001c31 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -128,6 +129,16 @@ bool LinearExpression2::IsCanonicalized() const { return coeffs[0] > 0 && coeffs[1] > 0; } +void LinearExpression2::MakeVariablesPositive() { + SimpleCanonicalization(); + for (int i = 0; i < 2; ++i) { + if (vars[i] != kNoIntegerVariable && !VariableIsPositive(vars[i])) { + coeffs[i] = -coeffs[i]; + vars[i] = NegationOf(vars[i]); + } + } +} + std::pair BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, @@ -224,7 +235,7 @@ IntegerValue BestBinaryRelationBounds::UpperBoundWhenCanonicalized( } std::vector> -BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { +BestBinaryRelationBounds::GetSortedNonTrivialUpperBounds() const { std::vector> root_relations_sorted; root_relations_sorted.reserve(2 * best_bounds_.size()); for (const auto& [expr, bounds] : best_bounds_) { @@ -241,4 +252,16 @@ BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { return root_relations_sorted; } +std::vector> +BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { + std::vector> + root_relations_sorted; + root_relations_sorted.reserve(best_bounds_.size()); + for (const auto& [expr, bounds] : best_bounds_) { + root_relations_sorted.push_back({expr, bounds.first, bounds.second}); + } + std::sort(root_relations_sorted.begin(), root_relations_sorted.end()); + return root_relations_sorted; +} + } // namespace operations_research::sat diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index c6f3ba4427..a148a91cf6 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -392,6 +392,8 @@ struct LinearExpression2 { // the expression was negated. bool NegateForCanonicalization(); + void MakeVariablesPositive(); + absl::Span non_zero_vars() const { const int first = coeffs[0] == 0 ? 1 : 0; const int last = coeffs[1] == 0 ? 0 : 1; @@ -416,14 +418,14 @@ struct LinearExpression2 { IntegerValue coeffs[2]; IntegerVariable vars[2]; -}; -inline std::ostream& operator<<(std::ostream& os, - const LinearExpression2& expr) { - os << absl::StrCat(expr.coeffs[0], " X", expr.vars[0], " + ", expr.coeffs[1], - " X", expr.vars[1]); - return os; -} + template + friend void AbslStringify(Sink& sink, const LinearExpression2& expr) { + absl::Format(&sink, "%d X%d + %d X%d", expr.coeffs[0].value(), + expr.vars[0].value(), expr.coeffs[1].value(), + expr.vars[1].value()); + } +}; template H AbslHashValue(H h, const LinearExpression2& e) { @@ -472,6 +474,9 @@ class BestBinaryRelationBounds { int64_t num_bounds() const { return best_bounds_.size(); } std::vector> + GetSortedNonTrivialUpperBounds() const; + + std::vector> GetSortedNonTrivialBounds() const; private: diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 66fd26ee3a..d7ffd981d8 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -304,7 +304,8 @@ void TransitivePrecedencesEvaluator::Build() { is_built_ = true; const std::vector> - root_relations_sorted = root_level_bounds_->GetSortedNonTrivialBounds(); + root_relations_sorted = + root_level_bounds_->GetSortedNonTrivialUpperBounds(); int max_node = 0; for (const auto [expr, _] : root_relations_sorted) { max_node = std::max(max_node, PositiveVariable(expr.vars[0]).value()); @@ -1219,37 +1220,16 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) { return true; } -void BinaryRelationRepository::Add(Literal lit, LinearTerm a, LinearTerm b, +void BinaryRelationRepository::Add(Literal lit, LinearExpression2 expr, IntegerValue lhs, IntegerValue rhs) { - if (lit.Index() != kNoLiteralIndex) { - num_enforced_relations_++; - DCHECK(a.coeff == 0 || a.var != kNoIntegerVariable); - DCHECK(b.coeff == 0 || b.var != kNoIntegerVariable); - } else { - DCHECK_NE(a.coeff, 0); - DCHECK_NE(b.coeff, 0); - DCHECK_NE(a.var, kNoIntegerVariable); - DCHECK_NE(b.var, kNoIntegerVariable); - } + expr.MakeVariablesPositive(); + CHECK_NE(lit.Index(), kNoLiteralIndex); + num_enforced_relations_++; + DCHECK(expr.coeffs[0] == 0 || expr.vars[0] != kNoIntegerVariable); + DCHECK(expr.coeffs[1] == 0 || expr.vars[1] != kNoIntegerVariable); - Relation r; - r.enforcement = lit; - r.a = a; - r.b = b; - r.lhs = lhs; - r.rhs = rhs; - - // We shall only consider positive variable here. - if (r.a.var != kNoIntegerVariable && !VariableIsPositive(r.a.var)) { - r.a.var = NegationOf(r.a.var); - r.a.coeff = -r.a.coeff; - } - if (r.b.var != kNoIntegerVariable && !VariableIsPositive(r.b.var)) { - r.b.var = NegationOf(r.b.var); - r.b.coeff = -r.b.coeff; - } - - relations_.push_back(std::move(r)); + relations_.push_back( + {.enforcement = lit, .expr = expr, .lhs = lhs, .rhs = rhs}); } void BinaryRelationRepository::AddPartialRelation(Literal lit, @@ -1258,10 +1238,23 @@ void BinaryRelationRepository::AddPartialRelation(Literal lit, DCHECK_NE(a, kNoIntegerVariable); DCHECK_NE(b, kNoIntegerVariable); DCHECK_NE(a, b); - Add(lit, LinearTerm(a, 1), LinearTerm(b, 1), 0, 0); + Add(lit, LinearExpression2(a, b, 1, 1), 0, 0); } -void BinaryRelationRepository::Build() { +void BinaryRelationRepository::Build( + const RootLevelLinear2Bounds* root_level_bounds) { + for (const auto& [expr, lb, ub] : + root_level_bounds->GetSortedNonTrivialBounds()) { + LinearExpression2 positive_expr = expr; + positive_expr.MakeVariablesPositive(); + Relation r; + r.enforcement = Literal(kNoLiteralIndex); + r.expr = positive_expr; + r.rhs = root_level_bounds->LevelZeroUpperBound(positive_expr); + positive_expr.Negate(); + r.lhs = -root_level_bounds->LevelZeroUpperBound(positive_expr); + relations_.push_back(r); + } DCHECK(!is_built_); is_built_ = true; std::vector> literal_key_values; @@ -1272,10 +1265,11 @@ void BinaryRelationRepository::Build() { for (int i = 0; i < num_relations; ++i) { const Relation& r = relations_[i]; if (r.enforcement.Index() == kNoLiteralIndex) { - var_key_values.emplace_back(r.a.var, i); - var_key_values.emplace_back(r.b.var, i); - std::pair key(r.a.var, r.b.var); - if (relations_[i].a.var > relations_[i].b.var) { + var_key_values.emplace_back(r.expr.vars[0], i); + var_key_values.emplace_back(r.expr.vars[1], i); + std::pair key(r.expr.vars[0], + r.expr.vars[1]); + if (relations_[i].expr.vars[0] > relations_[i].expr.vars[1]) { std::swap(key.first, key.second); } var_pair_to_relations_[key].push_back(i); @@ -1311,24 +1305,28 @@ bool BinaryRelationRepository::PropagateLocalBounds( auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) { update_lower_bound_by_var(NegationOf(var), -ub); }; - auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b, - IntegerValue lhs, IntegerValue rhs) { - if (a.coeff == 0) return; + auto update_var_bounds = [&](const LinearExpression2& expr, IntegerValue lhs, + IntegerValue rhs) { + if (expr.coeffs[0] == 0) return; // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) - if (b.coeff != 0) { - lhs = lhs - b.coeff * get_upper_bound(b.var); - rhs = rhs - b.coeff * get_lower_bound(b.var); + if (expr.coeffs[1] != 0) { + lhs = lhs - expr.coeffs[1] * get_upper_bound(expr.vars[1]); + rhs = rhs - expr.coeffs[1] * get_lower_bound(expr.vars[1]); } - update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff)); - update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff)); + update_lower_bound_by_var(expr.vars[0], + MathUtil::CeilOfRatio(lhs, expr.coeffs[0])); + update_upper_bound_by_var(expr.vars[0], + MathUtil::FloorOfRatio(rhs, expr.coeffs[0])); }; auto update_var_bounds_from_relation = [&](Relation r) { - r.a.MakeCoeffPositive(); - r.b.MakeCoeffPositive(); - update_var_bounds(r.a, r.b, r.lhs, r.rhs); - update_var_bounds(r.b, r.a, r.lhs, r.rhs); + r.expr.SimpleCanonicalization(); + + update_var_bounds(r.expr, r.lhs, r.rhs); + std::swap(r.expr.vars[0], r.expr.vars[1]); + std::swap(r.expr.coeffs[0], r.expr.coeffs[1]); + update_var_bounds(r.expr, r.lhs, r.rhs); }; if (lit.Index() < lit_to_relations_.size()) { for (const int relation_index : lit_to_relations_[lit]) { @@ -1361,17 +1359,22 @@ bool GreaterThanAtLeastOneOfDetector::AddRelationFromIndices( const IntegerValue var_lb = integer_trail->LevelZeroLowerBound(var); for (const int index : indices) { Relation r = repository_.relation(index); - if (r.a.var != PositiveVariable(var)) std::swap(r.a, r.b); - CHECK_EQ(r.a.var, PositiveVariable(var)); + if (r.expr.vars[0] != PositiveVariable(var)) { + std::swap(r.expr.vars[0], r.expr.vars[1]); + std::swap(r.expr.coeffs[0], r.expr.coeffs[1]); + } + CHECK_EQ(r.expr.vars[0], PositiveVariable(var)); - if ((r.a.coeff == 1) == VariableIsPositive(var)) { + if ((r.expr.coeffs[0] == 1) == VariableIsPositive(var)) { // a + b >= lhs if (r.lhs <= kMinIntegerValue) continue; - exprs.push_back(AffineExpression(r.b.var, -r.b.coeff, r.lhs)); + exprs.push_back( + AffineExpression(r.expr.vars[1], -r.expr.coeffs[1], r.lhs)); } else { // -a + b <= rhs. if (r.rhs >= kMaxIntegerValue) continue; - exprs.push_back(AffineExpression(r.b.var, r.b.coeff, -r.rhs)); + exprs.push_back( + AffineExpression(r.expr.vars[1], r.expr.coeffs[1], -r.rhs)); } // Ignore this entry if it is always true. @@ -1419,11 +1422,13 @@ int GreaterThanAtLeastOneOfDetector:: for (const int index : repository_.IndicesOfRelationsEnforcedBy(l.Index())) { const Relation& r = repository_.relation(index); - if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { - infos.push_back({r.a.var, index}); + if (r.expr.vars[0] != kNoIntegerVariable && + IntTypeAbs(r.expr.coeffs[0]) == 1) { + infos.push_back({r.expr.vars[0], index}); } - if (r.b.var != kNoIntegerVariable && IntTypeAbs(r.b.coeff) == 1) { - infos.push_back({r.b.var, index}); + if (r.expr.vars[1] != kNoIntegerVariable && + IntTypeAbs(r.expr.coeffs[1]) == 1) { + infos.push_back({r.expr.vars[1], index}); } } } @@ -1473,17 +1478,19 @@ int GreaterThanAtLeastOneOfDetector:: for (int index = 0; index < repository_.size(); ++index) { const Relation& r = repository_.relation(index); if (r.enforcement.Index() == kNoLiteralIndex) continue; - if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { - if (r.a.var >= var_to_relations.size()) { - var_to_relations.resize(r.a.var + 1); + if (r.expr.vars[0] != kNoIntegerVariable && + IntTypeAbs(r.expr.coeffs[0]) == 1) { + if (r.expr.vars[0] >= var_to_relations.size()) { + var_to_relations.resize(r.expr.vars[0] + 1); } - var_to_relations[r.a.var].push_back(index); + var_to_relations[r.expr.vars[0]].push_back(index); } - if (r.b.var != kNoIntegerVariable && IntTypeAbs(r.b.coeff) == 1) { - if (r.b.var >= var_to_relations.size()) { - var_to_relations.resize(r.b.var + 1); + if (r.expr.vars[1] != kNoIntegerVariable && + IntTypeAbs(r.expr.coeffs[1]) == 1) { + if (r.expr.vars[1] >= var_to_relations.size()) { + var_to_relations.resize(r.expr.vars[1] + 1); } - var_to_relations[r.b.var].push_back(index); + var_to_relations[r.expr.vars[1]].push_back(index); } } @@ -1846,6 +1853,21 @@ IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { return CapProdI(gcd, ub); } +IntegerValue Linear2Bounds::NonTrivialUpperBoundForGcd1( + LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0) { + return integer_trail_->UpperBound(expr); + } + DCHECK_NE(expr.coeffs[1], 0); + DCHECK_EQ(1, expr.DivideByGcd()); + IntegerValue ub = kMaxIntegerValue; + ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(expr)); + ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(expr)); + ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); + return ub; +} + void Linear2Bounds::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 57e75abf64..cf192933a6 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -27,6 +27,7 @@ #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" +#include "absl/strings/str_format.h" #include "absl/types/span.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/graph.h" @@ -76,6 +77,12 @@ class RootLevelLinear2Bounds { // Return a list of (expr <= ub) sorted by expr. std::vector> + GetSortedNonTrivialUpperBounds() const { + return root_level_relations_.GetSortedNonTrivialUpperBounds(); + } + + // Return a list of (lb <= expr <= ub) sorted by expr. + std::vector> GetSortedNonTrivialBounds() const { return root_level_relations_.GetSortedNonTrivialBounds(); } @@ -296,41 +303,26 @@ class EnforcedLinear2Bounds : public ReversibleInterface { std::vector tmp_precedences_; }; -// Similar to AffineExpression, but with a zero constant. -// If coeff is zero, then this is always zero and var is ignored. -struct LinearTerm { - LinearTerm() = default; - LinearTerm(IntegerVariable v, IntegerValue c) : var(v), coeff(c) {} - - void MakeCoeffPositive() { - if (coeff < 0) { - coeff = -coeff; - var = NegationOf(var); - } - } - - bool operator==(const LinearTerm& other) const { - return var == other.var && coeff == other.coeff; - } - - IntegerVariable var = kNoIntegerVariable; - IntegerValue coeff = IntegerValue(0); -}; - -// A relation of the form enforcement => a + b \in [lhs, rhs]. +// A relation of the form enforcement => expr \in [lhs, rhs]. // Note that the [lhs, rhs] interval should always be within [min_activity, -// max_activity] where the activity is the value of a + b. +// max_activity] where the activity is the value of expr. struct Relation { Literal enforcement; - LinearTerm a; - LinearTerm b; + LinearExpression2 expr; IntegerValue lhs; IntegerValue rhs; bool operator==(const Relation& other) const { - return enforcement == other.enforcement && a == other.a && b == other.b && + return enforcement == other.enforcement && expr == other.expr && lhs == other.lhs && rhs == other.rhs; } + + template + friend void AbslStringify(Sink& sink, const Relation& relation) { + absl::Format(&sink, "%s => %v in [%v, %v]", + relation.enforcement.DebugString(), relation.expr, + relation.lhs, relation.rhs); + } }; // A repository of all the enforced linear constraints of size 1 or 2, and of @@ -371,10 +363,9 @@ class BinaryRelationRepository { return it->second; } - // Adds a conditional relation lit => a + b \in [lhs, rhs] (one of the terms - // can be zero), or an always true binary relation a + b \in [lhs, rhs] (both - // terms must be non-zero). - void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, + // Adds a conditional relation lit => expr \in [lhs, rhs] (one of the coeffs + // can be zero). + void Add(Literal lit, LinearExpression2 expr, IntegerValue lhs, IntegerValue rhs); // Adds a partial conditional relation between two variables, with unspecified @@ -383,7 +374,7 @@ class BinaryRelationRepository { // Builds the literal to relations mapping. This should be called once all the // relations have been added. - void Build(); + void Build(const RootLevelLinear2Bounds* root_level_bounds); // Assuming level-zero bounds + any (var >= value) in the input map, // fills "output" with a "propagated" set of bounds assuming lit is true (by @@ -533,6 +524,10 @@ class Linear2Bounds { std::vector* literal_reason, std::vector* integer_reason) const; + // Like UpperBound(), but optimized for the case of gcd == 1 and when we + // don't want the trivial bounds. + IntegerValue NonTrivialUpperBoundForGcd1(LinearExpression2 expr) const; + std::vector GetAllExpressionsWithPotentialNonTrivialBounds() const; diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 32fd12c3ee..0579923c9a 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -14,10 +14,12 @@ #include "ortools/sat/precedences.h" #include +#include #include #include #include "absl/container/flat_hash_map.h" +#include "absl/types/span.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/base/parse_test_proto.h" @@ -526,45 +528,90 @@ TEST(EnforcedLinear2BoundsTest, CollectPrecedences) { TEST(BinaryRelationRepositoryTest, Build) { Model model; - const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); - const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); - const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable x = model.Add(NewIntegerVariable(-100, 100)); + const IntegerVariable y = model.Add(NewIntegerVariable(-100, 100)); + const IntegerVariable z = model.Add(NewIntegerVariable(-100, 100)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); BinaryRelationRepository repository; - repository.Add(lit_a, {NegationOf(x), 1}, {y, 1}, 2, 8); - repository.Add(Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10); - repository.Add(lit_a, {x, -3}, {NegationOf(y), 2}, 1, 15); - repository.Add(lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5); - repository.Add(Literal(kNoLiteralIndex), {x, 3}, {y, -1}, 5, 15); - repository.Add(Literal(kNoLiteralIndex), {x, 1}, {z, -1}, 0, 10); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository.Add(lit_a, LinearExpression2(NegationOf(x), y, 1, 1), 2, 8); + root_level_bounds->Add(LinearExpression2(x, y, 2, -2), 0, 10); + repository.Add(lit_a, LinearExpression2(x, NegationOf(y), -3, 2), 1, 15); + repository.Add(lit_b, LinearExpression2(x, kNoIntegerVariable, -3, 0), 3, 5); + root_level_bounds->Add(LinearExpression2(x, y, 3, -1), 5, 15); + root_level_bounds->Add(LinearExpression2::Difference(x, z), 0, 10); repository.AddPartialRelation(lit_b, x, z); - repository.Build(); + repository.Build(root_level_bounds); - EXPECT_EQ(repository.size(), 7); - EXPECT_EQ(repository.relation(0), (Relation{lit_a, {x, -1}, {y, 1}, 2, 8})); - EXPECT_EQ(repository.relation(1), - (Relation{Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10})); - EXPECT_EQ(repository.relation(2), (Relation{lit_a, {x, -3}, {y, -2}, 1, 15})); - EXPECT_EQ(repository.relation(3), - (Relation{lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5})); - EXPECT_EQ(repository.relation(6), (Relation{lit_b, {x, 1}, {z, 1}, 0, 0})); - EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_a), - UnorderedElementsAre(0, 2)); - EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_b), - UnorderedElementsAre(3, 6)); - EXPECT_THAT(repository.IndicesOfRelationsContaining(x), - UnorderedElementsAre(1, 4, 5)); - EXPECT_THAT(repository.IndicesOfRelationsContaining(y), - UnorderedElementsAre(1, 4)); - EXPECT_THAT(repository.IndicesOfRelationsContaining(z), - UnorderedElementsAre(5)); - EXPECT_THAT(repository.IndicesOfRelationsBetween(x, y), - UnorderedElementsAre(1, 4)); - EXPECT_THAT(repository.IndicesOfRelationsBetween(y, x), - UnorderedElementsAre(1, 4)); - EXPECT_THAT(repository.IndicesOfRelationsBetween(x, z), - UnorderedElementsAre(5)); + auto get_rel = [&](absl::Span indexes) { + std::vector result; + for (int i : indexes) { + result.push_back(repository.relation(i)); + } + return result; + }; + std::vector all(repository.size()); + std::iota(all.begin(), all.end(), 0); + EXPECT_THAT( + get_rel(all), + UnorderedElementsAre( + Relation{lit_a, LinearExpression2(x, y, -1, 1), 2, 8}, + Relation{Literal(kNoLiteralIndex), LinearExpression2(x, y, 1, -1), 0, + 5}, + Relation{Literal(kNoLiteralIndex), LinearExpression2(x, y, 3, -1), 5, + 15}, + Relation{Literal(kNoLiteralIndex), LinearExpression2(x, z, 1, -1), 0, + 10}, + Relation{lit_a, LinearExpression2(x, y, -3, -2), 1, 15}, + Relation{lit_b, LinearExpression2(kNoIntegerVariable, x, 0, -3), 3, + 5}, + Relation{lit_b, LinearExpression2(x, z, 1, 1), 0, 0})); + EXPECT_THAT(get_rel(repository.IndicesOfRelationsEnforcedBy(lit_a)), + UnorderedElementsAre( + Relation{lit_a, LinearExpression2(x, y, -1, 1), 2, 8}, + Relation{lit_a, LinearExpression2(x, y, -3, -2), 1, 15})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsEnforcedBy(lit_b)), + UnorderedElementsAre( + Relation{lit_b, LinearExpression2(kNoIntegerVariable, x, 0, -3), 3, + 5}, + Relation{lit_b, LinearExpression2(x, z, 1, 1), 0, 0})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsContaining(x)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 1, -1), 0, 5}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 3, -1), 5, 15}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, z, 1, -1), 0, 10})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsContaining(y)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 1, -1), 0, 5}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 3, -1), 5, 15})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsContaining(z)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, z, 1, -1), 0, 10})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsBetween(x, y)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 1, -1), 0, 5}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 3, -1), 5, 15})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsBetween(y, x)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 1, -1), 0, 5}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, y, 3, -1), 5, 15})); + EXPECT_THAT( + get_rel(repository.IndicesOfRelationsBetween(x, z)), + UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), + LinearExpression2(x, z, 1, -1), 0, 10})); EXPECT_THAT(repository.IndicesOfRelationsBetween(z, y), IsEmpty()); } @@ -574,12 +621,11 @@ std::vector GetRelations(Model& model) { std::vector relations; for (int i = 0; i < repository.size(); ++i) { Relation r = repository.relation(i); - if (r.a.coeff < 0) { + if (r.expr.coeffs[0] < 0) { r = Relation({r.enforcement, - {r.a.var, -r.a.coeff}, - {r.b.var, -r.b.coeff}, - -r.rhs, - -r.lhs}); + LinearExpression2(r.expr.vars[0], r.expr.vars[1], + -r.expr.coeffs[0], -r.expr.coeffs[1]), + -r.rhs, -r.lhs}); } relations.push_back(r); } @@ -638,21 +684,19 @@ TEST(BinaryRelationRepositoryTest, LoadCpModelAddUnaryAndBinaryRelations) { const CpModelMapping& mapping = *model.GetOrCreate(); EXPECT_THAT(GetRelations(model), - UnorderedElementsAre(Relation{mapping.Literal(0), - {mapping.Integer(2), 1}, - {mapping.Integer(3), -1}, - 0, - 10}, - Relation{mapping.Literal(1), - {mapping.Integer(2), 1}, - {kNoIntegerVariable, 0}, - 5, - 10}, - Relation{Literal(kNoLiteralIndex), - {mapping.Integer(2), 3}, - {mapping.Integer(3), -2}, - -10, - 10})); + UnorderedElementsAre( + Relation{mapping.Literal(0), + LinearExpression2::Difference(mapping.Integer(2), + mapping.Integer(3)), + 0, 10}, + Relation{mapping.Literal(1), + LinearExpression2(kNoIntegerVariable, + mapping.Integer(2), 0, 1), + 5, 10}, + Relation{Literal(kNoLiteralIndex), + LinearExpression2(mapping.Integer(2), + mapping.Integer(3), 3, -2), + -10, 10})); } TEST(BinaryRelationRepositoryTest, @@ -687,8 +731,10 @@ TEST(BinaryRelationRepositoryTest, // - b => x - 10.a in [10, 90] EXPECT_THAT(GetRelations(model), UnorderedElementsAre( - Relation{mapping.Literal(0), {x, 1}, {b, -10}, 10, 90}, - Relation{mapping.Literal(1), {x, 1}, {a, -10}, 10, 90})); + Relation{mapping.Literal(0), LinearExpression2(b, x, 10, -1), + -90, -10}, + Relation{mapping.Literal(1), LinearExpression2(a, x, 10, -1), + -90, -10})); } TEST(BinaryRelationRepositoryTest, @@ -721,10 +767,12 @@ TEST(BinaryRelationRepositoryTest, // Two binary relations enforced by only one literal should be added: // - a => x + 10.b in [10, 90] // - b => x + 10.a in [10, 90] - EXPECT_THAT(GetRelations(model), - UnorderedElementsAre( - Relation{mapping.Literal(0), {x, 1}, {b, 10}, 10, 90}, - Relation{mapping.Literal(1), {x, 1}, {a, 10}, 10, 90})); + EXPECT_THAT( + GetRelations(model), + UnorderedElementsAre( + Relation{mapping.Literal(0), LinearExpression2(b, x, 10, 1), 10, 90}, + Relation{mapping.Literal(1), LinearExpression2(a, x, 10, 1), 10, + 90})); } TEST(BinaryRelationRepositoryTest, @@ -760,8 +808,9 @@ TEST(BinaryRelationRepositoryTest, EXPECT_THAT( GetRelations(model), UnorderedElementsAre( - Relation{mapping.Literal(0), {x, 1}, {b, 10}, 20, 100}, - Relation{mapping.Literal(1).Negated(), {x, 1}, {a, -10}, 10, 90})); + Relation{mapping.Literal(0), LinearExpression2(b, x, 10, 1), 20, 100}, + Relation{mapping.Literal(1).Negated(), + LinearExpression2(a, x, 10, -1), -90, -10})); } TEST(BinaryRelationRepositoryTest, @@ -797,8 +846,9 @@ TEST(BinaryRelationRepositoryTest, EXPECT_THAT( GetRelations(model), UnorderedElementsAre( - Relation{mapping.Literal(0), {x, 1}, {b, -10}, 0, 80}, - Relation{mapping.Literal(1).Negated(), {x, 1}, {a, 10}, 10, 90})); + Relation{mapping.Literal(0), LinearExpression2(b, x, 10, -1), -80, 0}, + Relation{mapping.Literal(1).Negated(), LinearExpression2(a, x, 10, 1), + 10, 90})); } TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { @@ -807,8 +857,11 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); BinaryRelationRepository repository; - repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 - repository.Build(); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository.Add(lit_a, LinearExpression2::Difference(y, x), 2, + 10); // lit_a => y => x + 2 + repository.Build(root_level_bounds); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; @@ -823,14 +876,17 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_UnenforcedRelation) { Model model; - const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); - const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + const IntegerVariable x = model.Add(NewIntegerVariable(-100, 100)); + const IntegerVariable y = model.Add(NewIntegerVariable(-100, 100)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); - const Literal kNoLiteral = Literal(kNoLiteralIndex); BinaryRelationRepository repository; - repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 - repository.Add(kNoLiteral, {x, -1}, {y, 1}, 2, 10); // y => x + 2 - repository.Build(); + repository.Add(lit_a, LinearExpression2(x, y, -1, 1), -5, + 10); // lit_a => y => x - 5 + root_level_bounds->Add(LinearExpression2(x, y, -1, 1), 2, + 10); // y => x + 2 + repository.Build(root_level_bounds); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; @@ -839,21 +895,25 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_UnenforcedRelation) { repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); EXPECT_TRUE(result); - EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -98), std::make_pair(y, 5))); } TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedBoundSmallerThanLevelZeroBound) { Model model; + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); BinaryRelationRepository repository; - repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 - repository.Add(lit_b, {x, -1}, {y, 1}, 2, 10); // lit_b => y => x + 2 - repository.Build(); + repository.Add(lit_a, LinearExpression2::Difference(y, x), -5, + 10); // lit_a => y => x - 5 + repository.Add(lit_b, LinearExpression2::Difference(y, x), 2, + 10); // lit_b => y => x + 2 + repository.Build(root_level_bounds); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; @@ -872,8 +932,11 @@ TEST(BinaryRelationRepositoryTest, const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); BinaryRelationRepository repository; - repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 - repository.Build(); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository.Add(lit_a, LinearExpression2::Difference(y, x), 2, + 10); // lit_a => y => x + 2 + repository.Build(root_level_bounds); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output = {{y, 8}}; @@ -892,8 +955,11 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_Infeasible) { const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); BinaryRelationRepository repository; - repository.Add(lit_a, {x, -1}, {y, 1}, 8, 10); // lit_a => y => x + 8 - repository.Build(); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository.Add(lit_a, LinearExpression2::Difference(y, x), 8, + 10); // lit_a => y => x + 8 + repository.Build(root_level_bounds); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; @@ -918,10 +984,15 @@ TEST(GreaterThanAtLeastOneOfDetectorTest, AddGreaterThanAtLeastOneOf) { model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); auto* repository = model.GetOrCreate(); - repository->Add(lit_a, {a, -1}, {d, 1}, 2, 1000); // d >= a + 2 - repository->Add(lit_b, {b, -1}, {d, 1}, -1, 1000); // d >= b -1 - repository->Add(lit_c, {c, -1}, {d, 1}, 0, 1000); // d >= c - repository->Build(); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository->Add(lit_a, LinearExpression2::Difference(d, a), 2, + 1000); // d >= a + 2 + repository->Add(lit_b, LinearExpression2::Difference(d, b), -1, + 1000); // d >= b -1 + repository->Add(lit_c, LinearExpression2::Difference(d, c), 0, + 1000); // d >= c + repository->Build(root_level_bounds); auto* detector = model.GetOrCreate(); auto* solver = model.GetOrCreate(); @@ -946,10 +1017,14 @@ TEST(GreaterThanAtLeastOneOfDetectorTest, model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); auto* repository = model.GetOrCreate(); - repository->Add(lit_a, {a, -1}, {d, 1}, 2, 1000); // d >= a + 2 - repository->Add(lit_b, {b, -1}, {d, 1}, -1, 1000); // d >= b -1 - repository->Add(lit_c, {c, -1}, {d, 1}, 0, 1000); // d >= c - repository->Build(); + RootLevelLinear2Bounds* root_level_bounds = + model.GetOrCreate(); + repository->Add(lit_a, LinearExpression2(a, d, -1, 1), 2, + 1000); // d >= a + 2 + repository->Add(lit_b, LinearExpression2(b, d, -1, 1), -1, + 1000); // d >= b -1 + repository->Add(lit_c, LinearExpression2(c, d, -1, 1), 0, 1000); // d >= c + repository->Build(root_level_bounds); auto* detector = model.GetOrCreate(); auto* solver = model.GetOrCreate(); diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index c2aae7fd4c..ef40d8161e 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -503,8 +503,10 @@ PYBIND11_MODULE(cp_model_helper, m) { py::class_(m, "ResponseWrapper") .def("best_objective_bound", &ResponseWrapper::BestObjectiveBound) - .def("boolean_value", &ResponseWrapper::BooleanValue, py::arg("lit")) - .def("boolean_value", &ResponseWrapper::FixedBooleanValue, py::arg("lit")) + .def("boolean_value", &ResponseWrapper::BooleanValue, + py::arg("lit").none(false)) + .def("boolean_value", &ResponseWrapper::FixedBooleanValue, + py::arg("lit").none(false)) .def("deterministic_time", &ResponseWrapper::DeterministicTime) .def("num_binary_propagations", &ResponseWrapper::NumBinaryPropagations) .def("num_booleans", &ResponseWrapper::NumBooleans) @@ -520,10 +522,12 @@ PYBIND11_MODULE(cp_model_helper, m) { .def("sufficient_assumptions_for_infeasibility", &ResponseWrapper::SufficientAssumptionsForInfeasibility) .def("user_time", &ResponseWrapper::UserTime) - .def("float_value", &ResponseWrapper::FloatValue, py::arg("expr")) - .def("float_value", &ResponseWrapper::FixedFloatValue, py::arg("value")) - .def("value", &ResponseWrapper::Value, py::arg("expr")) - .def("value", &ResponseWrapper::FixedValue, py::arg("value")) + .def("float_value", &ResponseWrapper::FloatValue, + py::arg("expr").none(false)) + .def("float_value", &ResponseWrapper::FixedFloatValue, + py::arg("value").none(false)) + .def("value", &ResponseWrapper::Value, py::arg("expr").none(false)) + .def("value", &ResponseWrapper::FixedValue, py::arg("value").none(false)) .def("wall_time", &ResponseWrapper::WallTime); py::class_(m, "SolveWrapper") diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index e8ee7c4695..46cd288225 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -187,6 +187,9 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) self.assertEqual(30.0, response_wrapper.objective_value()) self.assertEqual(30.0, response_wrapper.best_objective_bound()) + self.assertRaises(TypeError, response_wrapper.value, None) + self.assertRaises(TypeError, response_wrapper.float_value, None) + self.assertRaises(TypeError, response_wrapper.boolean_value, None) def test_solution_callback(self): model_string = """ diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 811338522b..bbed30ffa0 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -272,6 +272,18 @@ class CpModelTest(absltest.TestCase): self.assertEqual(10, solver.value(x)) self.assertEqual(-5, solver.value(y)) + def test_none_argument(self) -> None: + model = cp_model.CpModel() + x = model.new_int_var(-10, 10, "x") + y = model.new_int_var(-10, 10, "y") + model.add_linear_constraint(x + 2 * y, 0, 10) + model.minimize(y) + solver = cp_model.CpSolver() + self.assertEqual(cp_model.OPTIMAL, solver.solve(model)) + self.assertRaises(TypeError, solver.value, None) + self.assertRaises(TypeError, solver.float_value, None) + self.assertRaises(TypeError, solver.boolean_value, None) + def test_linear_constraint(self) -> None: model = cp_model.CpModel() model.add_linear_constraint(5, 0, 10) diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index 13f484a901..cdf96d1edb 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -1077,12 +1077,24 @@ struct LocalRelation { IntegerVariable UniqueSharedVariable(const sat::Relation& r1, const sat::Relation& r2) { - DCHECK_NE(r1.a.var, r1.b.var); - DCHECK_NE(r2.a.var, r2.b.var); - if (r1.a.var == r2.a.var && r1.b.var != r2.b.var) return r1.a.var; - if (r1.a.var == r2.b.var && r1.b.var != r2.a.var) return r1.a.var; - if (r1.b.var == r2.a.var && r1.a.var != r2.b.var) return r1.b.var; - if (r1.b.var == r2.b.var && r1.a.var != r2.a.var) return r1.b.var; + DCHECK_NE(r1.expr.vars[0], r1.expr.vars[1]); + DCHECK_NE(r2.expr.vars[0], r2.expr.vars[1]); + if (r1.expr.vars[0] == r2.expr.vars[0] && + r1.expr.vars[1] != r2.expr.vars[1]) { + return r1.expr.vars[0]; + } + if (r1.expr.vars[0] == r2.expr.vars[1] && + r1.expr.vars[1] != r2.expr.vars[0]) { + return r1.expr.vars[0]; + } + if (r1.expr.vars[1] == r2.expr.vars[0] && + r1.expr.vars[0] != r2.expr.vars[1]) { + return r1.expr.vars[1]; + } + if (r1.expr.vars[1] == r2.expr.vars[1] && + r1.expr.vars[0] != r2.expr.vars[0]) { + return r1.expr.vars[1]; + } return kNoIntegerVariable; } @@ -1254,10 +1266,11 @@ class RouteRelationsBuilder { binary_relation_repository_.IndicesOfRelationsEnforcedBy( literals_[i])) { const auto& r = binary_relation_repository_.relation(relation_index); - if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + if (r.expr.vars[0] == kNoIntegerVariable || + r.expr.vars[1] == kNoIntegerVariable) { continue; } - cc_finder.AddEdge(r.a.var, r.b.var); + cc_finder.AddEdge(r.expr.vars[0], r.expr.vars[1]); } } const std::vector> connected_components = @@ -1283,10 +1296,11 @@ class RouteRelationsBuilder { binary_relation_repository_.IndicesOfRelationsEnforcedBy( literals_[i])) { const auto& r = binary_relation_repository_.relation(relation_index); - if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + if (r.expr.vars[0] == kNoIntegerVariable || + r.expr.vars[1] == kNoIntegerVariable) { continue; } - const int dimension = dimension_by_var_[r.a.var]; + const int dimension = dimension_by_var_[r.expr.vars[0]]; adjacent_relation_indices_[dimension][tails_[i]].push_back( relation_index); adjacent_relation_indices_[dimension][heads_[i]].push_back( @@ -1360,24 +1374,25 @@ class RouteRelationsBuilder { binary_relation_repository_.IndicesOfRelationsEnforcedBy( literals_[arc_index])) { const auto& r = binary_relation_repository_.relation(relation_index); - if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + if (r.expr.vars[0] == kNoIntegerVariable || + r.expr.vars[1] == kNoIntegerVariable) { continue; } - if (r.a.var == node_expr.var) { + if (r.expr.vars[0] == node_expr.var) { if (candidate_var != kNoIntegerVariable && - candidate_var != r.b.var) { + candidate_var != r.expr.vars[1]) { candidate_var_is_unique = false; break; } - candidate_var = r.b.var; + candidate_var = r.expr.vars[1]; } - if (r.b.var == node_expr.var) { + if (r.expr.vars[1] == node_expr.var) { if (candidate_var != kNoIntegerVariable && - candidate_var != r.a.var) { + candidate_var != r.expr.vars[0]) { candidate_var_is_unique = false; break; } - candidate_var = r.a.var; + candidate_var = r.expr.vars[0]; } } if (candidate_var != kNoIntegerVariable && candidate_var_is_unique) { @@ -1488,21 +1503,26 @@ class RouteRelationsBuilder { // Try to match the relation variables with the node expression // variables. First swap the relation terms if needed (this does not // change the relation bounds). - if ((r.a.var != kNoIntegerVariable && r.a.var == head_expr.var) || - (r.b.var != kNoIntegerVariable && r.b.var == tail_expr.var)) { - std::swap(r.a, r.b); + if ((r.expr.vars[0] != kNoIntegerVariable && + r.expr.vars[0] == head_expr.var) || + (r.expr.vars[1] != kNoIntegerVariable && + r.expr.vars[1] == tail_expr.var)) { + std::swap(r.expr.vars[0], r.expr.vars[1]); + std::swap(r.expr.coeffs[0], r.expr.coeffs[1]); } // If the relation has only one term, try to remove the variable // in the node expression corresponding to the missing term. - if (r.a.var == kNoIntegerVariable) { + if (r.expr.vars[0] == kNoIntegerVariable) { if (!to_constant(tail_expr)) continue; - } else if (r.b.var == kNoIntegerVariable) { + } else if (r.expr.vars[1] == kNoIntegerVariable) { if (!to_constant(head_expr)) continue; } // If the relation and node expression variables do not match, we // cannot use this relation for this arc. - if (!((tail_expr.var == r.a.var && head_expr.var == r.b.var) || - (tail_expr.var == r.b.var && head_expr.var == r.a.var))) { + if (!((tail_expr.var == r.expr.vars[0] && + head_expr.var == r.expr.vars[1]) || + (tail_expr.var == r.expr.vars[1] && + head_expr.var == r.expr.vars[0]))) { continue; } ComputeArcRelation(i, dimension, tail_expr, head_expr, r, @@ -1553,20 +1573,25 @@ class RouteRelationsBuilder { const NodeExpression& tail_expr, const NodeExpression& head_expr, sat::Relation r, const IntegerTrail& integer_trail) { - DCHECK((r.a.var == tail_expr.var && r.b.var == head_expr.var) || - (r.a.var == head_expr.var && r.b.var == tail_expr.var)); - if (r.a.var != tail_expr.var) std::swap(r.a, r.b); - if (r.a.coeff == 0 || tail_expr.coeff == 0) { - LocalRelation result = ComputeArcUnaryRelation(head_expr, tail_expr, - r.b.coeff, r.lhs, r.rhs); + DCHECK( + (r.expr.vars[0] == tail_expr.var && r.expr.vars[1] == head_expr.var) || + (r.expr.vars[0] == head_expr.var && r.expr.vars[1] == tail_expr.var)); + if (r.expr.vars[0] != tail_expr.var) { + std::swap(r.expr.vars[0], r.expr.vars[1]); + std::swap(r.expr.coeffs[0], r.expr.coeffs[1]); + } + if (r.expr.coeffs[0] == 0 || tail_expr.coeff == 0) { + LocalRelation result = ComputeArcUnaryRelation( + head_expr, tail_expr, r.expr.coeffs[1], r.lhs, r.rhs); std::swap(result.tail_coeff, result.head_coeff); ProcessNewArcRelation(arc_index, dimension, result); return; } - if (r.b.coeff == 0 || head_expr.coeff == 0) { - ProcessNewArcRelation(arc_index, dimension, - ComputeArcUnaryRelation(tail_expr, head_expr, - r.a.coeff, r.lhs, r.rhs)); + if (r.expr.coeffs[1] == 0 || head_expr.coeff == 0) { + ProcessNewArcRelation( + arc_index, dimension, + ComputeArcUnaryRelation(tail_expr, head_expr, r.expr.coeffs[0], r.lhs, + r.rhs)); return; } const auto [lhs, rhs] = @@ -1680,14 +1705,16 @@ IntegerValue GetDifferenceLowerBound( // TODO(user): overflows could happen if the node expressions are // provided by the user in the model proto. auto lower_bound = [&](IntegerValue k) { - const IntegerValue y_coeff = y_expr.coeff - k * r.b.coeff; - const IntegerValue x_coeff = k * (-r.a.coeff) - x_expr.coeff; + const IntegerValue y_coeff = y_expr.coeff - k * r.expr.coeffs[1]; + const IntegerValue x_coeff = k * (-r.expr.coeffs[0]) - x_expr.coeff; return y_coeff * (y_coeff >= 0 ? y_var_bounds.first : y_var_bounds.second) + x_coeff * (x_coeff >= 0 ? x_var_bounds.first : x_var_bounds.second) + k * (k >= 0 ? r.lhs : r.rhs); }; - const IntegerValue k_x = MathUtil::FloorOfRatio(x_expr.coeff, -r.a.coeff); - const IntegerValue k_y = MathUtil::FloorOfRatio(y_expr.coeff, r.b.coeff); + const IntegerValue k_x = + MathUtil::FloorOfRatio(x_expr.coeff, -r.expr.coeffs[0]); + const IntegerValue k_y = + MathUtil::FloorOfRatio(y_expr.coeff, r.expr.coeffs[1]); IntegerValue result = lower_bound(0); result = std::max(result, lower_bound(k_x)); result = std::max(result, lower_bound(k_x + 1)); @@ -1702,14 +1729,14 @@ std::pair GetDifferenceBounds( const sat::Relation& r, const std::pair& x_var_bounds, const std::pair& y_var_bounds) { - DCHECK_EQ(x_expr.var, r.a.var); - DCHECK_EQ(y_expr.var, r.b.var); + DCHECK_EQ(x_expr.var, r.expr.vars[0]); + DCHECK_EQ(y_expr.var, r.expr.vars[1]); DCHECK_NE(x_expr.var, kNoIntegerVariable); DCHECK_NE(y_expr.var, kNoIntegerVariable); DCHECK_NE(x_expr.coeff, 0); DCHECK_NE(y_expr.coeff, 0); - DCHECK_NE(r.a.coeff, 0); - DCHECK_NE(r.b.coeff, 0); + DCHECK_NE(r.expr.coeffs[0], 0); + DCHECK_NE(r.expr.coeffs[1], 0); const IntegerValue lb = GetDifferenceLowerBound(x_expr, y_expr, r, x_var_bounds, y_var_bounds); const IntegerValue ub = -GetDifferenceLowerBound( @@ -1830,7 +1857,8 @@ BinaryRelationRepository ComputePartialBinaryRelationRepository( ToPositiveIntegerVariable(vars[0]), ToPositiveIntegerVariable(vars[1])); } - repository.Build(); + Model empty_model; + repository.Build(empty_model.GetOrCreate()); return repository; } diff --git a/ortools/sat/routing_cuts_test.cc b/ortools/sat/routing_cuts_test.cc index 9b38802d5f..707b9f8b37 100644 --- a/ortools/sat/routing_cuts_test.cc +++ b/ortools/sat/routing_cuts_test.cc @@ -65,7 +65,7 @@ std::pair ExactDifferenceBounds( IntegerValue ub = kMinIntegerValue; for (IntegerValue x = x_bounds.first; x <= x_bounds.second; ++x) { for (IntegerValue y = y_bounds.first; y <= y_bounds.second; ++y) { - const IntegerValue r_value = x * r.a.coeff + y * r.b.coeff; + const IntegerValue r_value = x * r.expr.coeffs[0] + y * r.expr.coeffs[1]; if (r_value < r.lhs || r_value > r.rhs) continue; const IntegerValue difference = y_expr.ValueAt(y) - x_expr.ValueAt(x); lb = std::min(lb, difference); @@ -101,8 +101,7 @@ TEST(GetDifferenceBounds, RandomTest) { const NodeExpression y_expr(y, B, absl::Uniform(random, -5, 5)); const Relation r{ .enforcement = lit, - .a = LinearTerm(x, a), - .b = LinearTerm(y, b), + .expr = LinearExpression2(x, y, a, b), .lhs = lhs, .rhs = rhs, }; @@ -162,10 +161,10 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) { // picked up by the vehicle leaving n. const int head_load = head == 0 ? 0 : head + 10; // loads[head] - loads[tail] >= head_load - repository->Add(literal, {loads[head], 1}, {loads[tail], -1}, head_load, - 1000); + repository->Add(literal, LinearExpression2(loads[head], loads[tail], 1, -1), + head_load, 1000); } - repository->Build(); + repository->Build(model.GetOrCreate()); // Subject under test. MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); @@ -230,15 +229,17 @@ TEST_P(DimensionBasedMinOutgoingFlowHelperTest, BasicCapacities) { if (tail == 0 || head == 0) continue; if (pickup) { // loads[head] - loads[tail] >= demand - repository->Add(literal, {loads[head], 1}, {loads[tail], -1}, + repository->Add(literal, + LinearExpression2(loads[head], loads[tail], 1, -1), demands[use_outgoing_load ? head : tail], 1000); } else { // loads[tail] - loads[head] >= demand - repository->Add(literal, {loads[tail], 1}, {loads[head], -1}, + repository->Add(literal, + LinearExpression2(loads[tail], loads[head], 1, -1), demands[use_outgoing_load ? head : tail], 1000); } } - repository->Build(); + repository->Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -301,15 +302,17 @@ TEST_P(DimensionBasedMinOutgoingFlowHelperTest, const int tail = tails[i]; if (pickup) { // loads[head] - loads[tail] >= demand - repository->Add(literals[i], {loads[head], 1}, {loads[tail], -1}, + repository->Add(literals[i], + LinearExpression2::Difference(loads[head], loads[tail]), demands[use_outgoing_load ? head : tail], 1000); } else { // loads[tail] - loads[head] >= demand - repository->Add(literals[i], {loads[tail], 1}, {loads[head], -1}, + repository->Add(literals[i], + LinearExpression2::Difference(loads[tail], loads[head]), demands[use_outgoing_load ? head : tail], 1000); } } - repository->Build(); + repository->Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -357,9 +360,9 @@ TEST(MinOutgoingFlowHelperTest, NodeExpressionWithConstant) { auto* repository = model.GetOrCreate(); // Capacity constraint: (offset_load2 + offset) - load1 >= demand1 - repository->Add(literals[0], {offset_load2, 1}, {load1, -1}, demand1 - offset, - 1000); - repository->Build(); + repository->Add(literals[0], LinearExpression2(offset_load2, load1, 1, -1), + demand1 - offset, 1000); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -398,9 +401,10 @@ TEST(MinOutgoingFlowHelperTest, ConstantNodeExpression) { auto* repository = model.GetOrCreate(); // Capacity constraint: load2 - load1 >= demand1 - repository->Add(literals[0], {kNoIntegerVariable, 0}, {load1, -1}, + repository->Add(literals[0], + LinearExpression2(kNoIntegerVariable, load1, 0, -1), demand1 - load2, 1000); - repository->Build(); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -451,12 +455,13 @@ TEST(MinOutgoingFlowHelperTest, NodeExpressionUsingArcLiteralAsVariable) { // Capacity constraint: load2 - load1 >= demand1. This expands to // (capacity - demand2 - demand3 * l) - load1 >= demand1, i.e., // -demand3 * l - load1 >= demand1 + demand2 - capacity - repository->Add(literals[0], {arc_2_3_var, -demand3}, {load1, -1}, + repository->Add(literals[0], + LinearExpression2(arc_2_3_var, load1, -demand3, -1), demand1 + demand2 - capacity, 1000); // Capacity constraint: load3 - load2 >= demand2. This expands to // (capacity - demand3) - (capacity - demand2 - demand3 * l) >= demand2 which, // when l is 1, simplifies to 0 >= 0. Hence this constraint is ignored. - repository->Build(); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -508,13 +513,14 @@ TEST(MinOutgoingFlowHelperTest, // Capacity constraint: load2 - load1 >= demand1. This expands to // (capacity - demand2 - demand3 + demand3 * l) - load1 >= demand1, i.e., // demand3 * l - load1 >= demand1 + demand2 + demand3 - capacity - repository->Add(literals[0], {arc_2_3_var, demand3}, {load1, -1}, + repository->Add(literals[0], + LinearExpression2(arc_2_3_var, load1, demand3, -1), demand1 + demand2 + demand3 - capacity, 1000); // Capacity constraint: load3 - load2 >= demand2. This expands to // (capacity - demand3) - (capacity - demand2 - demand3 + demand3 * l) >= // demand2 which, when l is 0, simplifies to 0 >= 0. Hence this constraint is // ignored. - repository->Build(); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -566,12 +572,12 @@ TEST(MinOutgoingFlowHelperTest, ArcNodeExpressionsWithSharedVariable) { // Capacity constraint: load2 - load1 >= demand1. This expands to // (capacity - demand2 - demand3) - coeff * x - load1 >= demand1, i.e., // -coeff * x - load1 >= demand1 + demand2 + demand3 - capacity. - repository->Add(literals[0], {x, -coeff}, {load1, -1}, + repository->Add(literals[0], LinearExpression2(x, load1, -coeff, -1), demand1 + demand2 + demand3 - capacity, 1000); // Capacity constraint: load3 - load2 >= demand2. This expands to // (capacity - demand3) - (capacity - demand2 - demand3) >= demand2, which // simplifies to 0 >= 0. Hence this constraint is ignored. - repository->Build(); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create( num_nodes, tails, heads, literals, @@ -629,13 +635,15 @@ TEST(MinOutgoingFlowHelperTest, UnaryRelationForTwoNodeExpressions) { // constraint is enforced by arc_1_2_lit we can assume it is true, which // implies that x = 0. Hence the constraint simplifies to load1 <= capacity - // demand2 - demand1. - repository->Add(literals[0], {load1, 1}, {kNoIntegerVariable, 0}, 0, + repository->Add(literals[0], + LinearExpression2(load1, kNoIntegerVariable, 1, 0), 0, capacity - demand1 - demand2); // Capacity constraint: load3 - load2 >= demand2. This expands to // load3 - ((capacity - demand2) - demand1 * x) >= demand2, i.e. to load3 + // demand1 * x >= capacity - repository->Add(literals[1], {load3, 1}, {x, demand1}, capacity, 1000); - repository->Build(); + repository->Add(literals[1], LinearExpression2(load3, x, 1, demand1), + capacity, 1000); + repository->Build(model.GetOrCreate()); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -687,10 +695,12 @@ TEST(MinOutgoingFlowHelperTest, NodeMustBeInnerNode) { auto* repository = model.GetOrCreate(); for (int i = 0; i < num_arcs; ++i) { // loads[head] - loads[tail] >= demand[arc] - repository->Add(literals[i], {loads[heads[i]], 1}, {loads[tails[i]], -1}, - demands[i], 1000); + repository->Add( + literals[i], + LinearExpression2(loads[heads[i]], loads[tails[i]], 1, -1), + demands[i], 1000); } - repository->Build(); + repository->Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); @@ -745,10 +755,12 @@ TEST(MinOutgoingFlowHelperTest, BetterUseOfUpperBound) { auto* repository = model.GetOrCreate(); for (int i = 0; i < num_arcs; ++i) { // loads[head] - loads[tail] >= demand[arc] - repository->Add(literals[i], {loads[heads[i]], 1}, {loads[tails[i]], -1}, - demands[i], 1000); + repository->Add( + literals[i], + LinearExpression2::Difference(loads[heads[i]], loads[tails[i]]), + demands[i], 1000); } - repository->Build(); + repository->Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( loads.size(), tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -783,10 +795,11 @@ TEST(MinOutgoingFlowHelperTest, DimensionBasedMinOutgoingFlow_IsolatedNodes) { literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); variables.push_back(model.Add(NewIntegerVariable(0, 100))); // Dummy relation, used only to associate a variable with each node. - repository->Add(literals.back(), {variables[head], 1}, {variables[0], -1}, - 1, 100); + repository->Add(literals.back(), + LinearExpression2(variables[head], variables[0], 1, -1), 1, + 100); } - repository->Build(); + repository->Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -834,10 +847,10 @@ TEST(MinOutgoingFlowHelperTest, TimeWindows) { const auto& [tail, head] = arc; const int travel_time = 10 - tail; // times[head] - times[tail] >= travel_time - repository->Add(literal, {times[head], 1}, {times[tail], -1}, travel_time, - 1000); + repository->Add(literal, LinearExpression2(times[head], times[tail], 1, -1), + travel_time, 1000); } - repository->Build(); + repository->Build(model.GetOrCreate()); // Subject under test. MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); @@ -963,12 +976,16 @@ TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutes) { const auto& [tail, head] = arc; // vars[head] >= vars[tail] + load[head]; - repository->Add(literal, {cumul_vars_1[head], 1}, {cumul_vars_1[tail], -1}, - load1[head], 10000); - repository->Add(literal, {cumul_vars_2[head], 1}, {cumul_vars_2[tail], -1}, - load2[head], 10000); + repository->Add( + literal, + LinearExpression2(cumul_vars_1[head], cumul_vars_1[tail], 1, -1), + load1[head], 10000); + repository->Add( + literal, + LinearExpression2(cumul_vars_2[head], cumul_vars_2[tail], 1, -1), + load2[head], 10000); } - repository->Build(); + repository->Build(model.GetOrCreate()); const int optimal = SolveTwoDimensionBinPacking(capacity, load1, load2); EXPECT_EQ(optimal, 2); @@ -1031,12 +1048,16 @@ TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutesRandom) { const auto& [tail, head] = arc; // vars[head] >= vars[tail] + load[head]; - repository->Add(literal, {cumul_vars_1[head], 1}, {cumul_vars_1[tail], -1}, - load1[head], 10000); - repository->Add(literal, {cumul_vars_2[head], 1}, {cumul_vars_2[tail], -1}, - load2[head], 10000); + repository->Add( + literal, + LinearExpression2::Difference(cumul_vars_1[head], cumul_vars_1[tail]), + load1[head], 10000); + repository->Add( + literal, + LinearExpression2::Difference(cumul_vars_2[head], cumul_vars_2[tail]), + load2[head], 10000); } - repository->Build(); + repository->Build(model.GetOrCreate()); // To check our indices mapping, lets remove a random nodes from the subset std::vector subset; @@ -1160,10 +1181,12 @@ TEST(MinOutgoingFlowHelperTest, const Literal literal = literals[arc]; // vars[head] >= vars[tail] + travel_times[arc]; - repository->Add(literal, {cumul_vars[head], 1}, {cumul_vars[tail], -1}, - travel_times[arc], 10000); + repository->Add( + literal, + LinearExpression2::Difference(cumul_vars[head], cumul_vars[tail]), + travel_times[arc], 10000); } - repository->Build(); + repository->Build(model.GetOrCreate()); // Serve everyone but the depot. std::vector subset; @@ -1387,15 +1410,17 @@ TEST(RouteRelationsHelperTest, Basic) { const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); BinaryRelationRepository repository; - repository.Add(literals[0], {a, 1}, {b, -1}, 50, 1000); - repository.Add(literals[1], {a, 1}, {c, -1}, 70, 1000); - repository.Add(literals[2], {c, 1}, {b, -1}, 40, 1000); - repository.Add(literals[0], {NegationOf(u), -1}, {NegationOf(v), 1}, 4, 100); - repository.Add(literals[1], {u, 1}, {w, -1}, 4, 100); - repository.Add(literals[2], {w, -1}, {v, 1}, -100, -3); - repository.Add(literals[3], {x, 1}, {w, -1}, 5, 100); - repository.Add(literals[4], {z, 1}, {y, -1}, 7, 100); - repository.Build(); + repository.Add(literals[0], LinearExpression2::Difference(a, b), 50, 1000); + repository.Add(literals[1], LinearExpression2::Difference(a, c), 70, 1000); + repository.Add(literals[2], LinearExpression2::Difference(c, b), 40, 1000); + repository.Add(literals[0], + LinearExpression2(NegationOf(u), NegationOf(v), -1, 1), 4, + 100); + repository.Add(literals[1], LinearExpression2::Difference(u, w), 4, 100); + repository.Add(literals[2], LinearExpression2(w, v, -1, 1), -100, -3); + repository.Add(literals[3], LinearExpression2::Difference(x, w), 5, 100); + repository.Add(literals[4], LinearExpression2::Difference(z, y), 7, 100); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1480,16 +1505,17 @@ TEST(RouteRelationsHelperTest, UnenforcedRelations) { const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 1, 1); - repository.Add(literals[1], {c, 1}, {b, -1}, 2, 2); - repository.Add(literals[2], {d, 1}, {c, -1}, 3, 3); - repository.Add(literals[3], {a, 1}, {d, -1}, 4, 4); + RootLevelLinear2Bounds* bounds = model.GetOrCreate(); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 1, 1); + repository.Add(literals[1], LinearExpression2::Difference(c, b), 2, 2); + repository.Add(literals[2], LinearExpression2::Difference(d, c), 3, 3); + repository.Add(literals[3], LinearExpression2::Difference(a, d), 4, 4); // Several unenforced relations on the diagonal arc. The one with the +/-1 // coefficients should be preferred. - repository.Add(Literal(kNoLiteralIndex), {c, 3}, {a, -2}, 1, 9); - repository.Add(Literal(kNoLiteralIndex), {c, 1}, {a, -1}, 5, 5); - repository.Add(Literal(kNoLiteralIndex), {c, 2}, {a, -3}, 3, 8); - repository.Build(); + bounds->Add(LinearExpression2(c, a, 3, -2), 1, 9); + bounds->Add(LinearExpression2(c, a, 1, -1), 5, 5); + bounds->Add(LinearExpression2(c, a, 2, -3), 3, 8); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1529,14 +1555,14 @@ TEST(RouteRelationsHelperTest, SeveralVariablesPerNode) { const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); - repository.Add(literals[1], {c, 1}, {b, -1}, 70, 1000); - repository.Add(literals[0], {z, 1}, {y, -1}, 5, 100); - repository.Add(literals[1], {y, 1}, {x, -1}, 7, 100); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 50, 1000); + repository.Add(literals[1], LinearExpression2::Difference(c, b), 70, 1000); + repository.Add(literals[0], LinearExpression2::Difference(z, y), 5, 100); + repository.Add(literals[1], LinearExpression2::Difference(y, x), 7, 100); // Weird relation linking time and load variables, causing all the variables // to be in a single "dimension". - repository.Add(literals[0], {x, 1}, {a, -1}, 0, 100); - repository.Build(); + repository.Add(literals[0], LinearExpression2::Difference(x, a), 0, 100); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1561,8 +1587,8 @@ TEST(RouteRelationsHelperTest, ComplexVariableRelations) { const IntegerVariable b = model.Add(NewIntegerVariable(0, 1)); BinaryRelationRepository repository; // "complex" relation with non +1/-1 coefficients. - repository.Add(literals[0], {b, 10}, {a, 1}, 0, 150); - repository.Build(); + repository.Add(literals[0], LinearExpression2(b, a, 10, 1), 0, 150); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = { .num_dimensions = 0, @@ -1595,7 +1621,7 @@ TEST(RouteRelationsHelperTest, TwoUnaryRelationsPerArc) { encoder.AssociateToIntegerEqualValue(literals[0], a, 20); encoder.AssociateToIntegerLiteral(literals[0], {b, 50}); BinaryRelationRepository repository; - repository.Build(); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = { .num_dimensions = 0, @@ -1625,11 +1651,11 @@ TEST(RouteRelationsHelperTest, SeveralRelationsPerArc) { const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); - repository.Add(literals[1], {c, 1}, {b, -1}, 70, 1000); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 50, 1000); + repository.Add(literals[1], LinearExpression2::Difference(c, b), 70, 1000); // Add a second relation for some arc. - repository.Add(literals[1], {c, 2}, {b, -3}, 100, 200); - repository.Build(); + repository.Add(literals[1], LinearExpression2(c, b, 2, -3), 100, 200); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1661,9 +1687,9 @@ TEST(RouteRelationsHelperTest, SeveralArcsPerLiteral) { const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); - repository.Add(literals[0], {c, 1}, {b, -1}, 40, 1000); - repository.Build(); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 50, 1000); + repository.Add(literals[0], LinearExpression2::Difference(c, b), 40, 1000); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1703,14 +1729,14 @@ TEST(RouteRelationsHelperTest, InconsistentRelationIsSkipped) { const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); const IntegerVariable f = model.Add(NewIntegerVariable(0, 100)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); - repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); - repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); - repository.Add(literals[3], {e, 1}, {d, -1}, 3, 3); - repository.Add(literals[4], {f, 1}, {b, -1}, 4, 4); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 0, 0); + repository.Add(literals[1], LinearExpression2::Difference(c, b), 1, 1); + repository.Add(literals[2], LinearExpression2::Difference(d, c), 2, 2); + repository.Add(literals[3], LinearExpression2::Difference(e, d), 3, 3); + repository.Add(literals[4], LinearExpression2::Difference(f, b), 4, 4); // Inconsistent relation for arc 5->3 (should be between f and d). - repository.Add(literals[5], {f, 2}, {b, -1}, 5, 5); - repository.Build(); + repository.Add(literals[5], LinearExpression2(f, b, 2, -1), 5, 5); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1763,17 +1789,17 @@ TEST(RouteRelationsHelperTest, InconsistentRelationWithMultipleArcsPerLiteral) { const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); BinaryRelationRepository repository; - repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); - repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); - repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); - repository.Add(literals[3], {a, 1}, {d, -1}, 3, 3); + repository.Add(literals[0], LinearExpression2::Difference(b, a), 0, 0); + repository.Add(literals[1], LinearExpression2::Difference(c, b), 1, 1); + repository.Add(literals[2], LinearExpression2::Difference(d, c), 2, 2); + repository.Add(literals[3], LinearExpression2::Difference(a, d), 3, 3); // Inconsistent relation for arc 4->1 (should be between e and b). Note that // arcs 4->1 and 4->3 are enforced by the same literal, thus both should // be true at the same time, hence the crossed bounds below. - repository.Add(literals[4], {e, 1}, {d, -1}, 4, 4); - repository.Add(literals[5], {e, 1}, {d, -1}, 5, 5); - repository.Build(); + repository.Add(literals[4], LinearExpression2::Difference(e, d), 4, 4); + repository.Add(literals[5], LinearExpression2::Difference(e, d), 5, 5); + repository.Build(model.GetOrCreate()); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -2406,10 +2432,11 @@ TEST(CreateCVRPCutGeneratorTest, InfeasiblePathCuts) { const int head = heads[i]; if (tail == 0 || head == 0) continue; // loads[head] >= loads[tail] + demand[tail] - repository->Add(literals[i], {loads[head], 1}, {loads[tail], -1}, + repository->Add(literals[i], + LinearExpression2(loads[head], loads[tail], 1, -1), demands[tail], 10000); } - repository->Build(); + repository->Build(model.GetOrCreate()); // Enable the cut generator. model.GetOrCreate() ->set_routing_cut_max_infeasible_path_length(10); diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 65da5a23da..9f79aad290 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1100,16 +1100,25 @@ std::string CompletionTimeEvent::DebugString() const { "]"); } - void CtExhaustiveHelper::Init( const absl::Span events, Model* model) { max_task_index_ = 0; if (events.empty()) return; + // We compute the max_task_index_ from the events early to avoid sorting // the events if there are too many of them. for (const auto& event : events) { max_task_index_ = std::max(max_task_index_, event.task_index); } + BuildPredecessors(events, model); + VLOG(2) << "num_tasks:" << max_task_index_ + 1 + << " num_precedences:" << predecessors_.num_entries() + << " predecessors size:" << predecessors_.size(); +} + +void CtExhaustiveHelper::BuildPredecessors( + const absl::Span events, Model* model) { + predecessors_.clear(); if (events.size() > 100) return; ReifiedLinear2Bounds* binary_relations = @@ -1120,6 +1129,7 @@ void CtExhaustiveHelper::Init( [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) { return a.task_index < b.task_index; }); + predecessors_.reserve(max_task_index_ + 1); for (const auto& e1 : sorted_events) { for (const auto& e2 : sorted_events) { @@ -1131,9 +1141,6 @@ void CtExhaustiveHelper::Init( } } } - VLOG(2) << "num_tasks:" << max_task_index_ + 1 - << " num_precedences:" << predecessors_.num_entries() - << " predecessors size:" << predecessors_.size(); } bool CtExhaustiveHelper::PermutationIsCompatibleWithPrecedences( diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index 920f5a23e6..e6c78edd64 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -174,6 +174,9 @@ class CtExhaustiveHelper { absl::Span permutation); private: + void BuildPredecessors(absl::Span events, + Model* model); + CompactVectorVector predecessors_; int max_task_index_ = 0; std::vector visited_; diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 543bafd019..38263ad2bb 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -587,7 +587,7 @@ double ExactMakespan(absl::Span sizes, std::vector& demands, } builder.Minimize(obj); const CpSolverResponse response = - SolveWithParameters(builder.Build(), "num_search_workers:8"); + SolveWithParameters(builder.Build(), "num_workers:8"); EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); return response.objective_value(); } diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index 149bc24703..fa6fe706a5 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -48,7 +48,7 @@ SchedulingConstraintHelper::SchedulingConstraintHelper( assignment_(sat_solver_->Assignment()), integer_trail_(model->GetOrCreate()), watcher_(model->GetOrCreate()), - precedence_relations_(model->GetOrCreate()), + linear2_bounds_(model->GetOrCreate()), root_level_lin2_bounds_(model->GetOrCreate()), starts_(std::move(starts)), ends_(std::move(ends)), @@ -87,7 +87,7 @@ SchedulingConstraintHelper::SchedulingConstraintHelper(int num_tasks, sat_solver_(model->GetOrCreate()), assignment_(sat_solver_->Assignment()), integer_trail_(model->GetOrCreate()), - precedence_relations_(model->GetOrCreate()), + linear2_bounds_(model->GetOrCreate()), root_level_lin2_bounds_(model->GetOrCreate()), capacity_(num_tasks), cached_size_min_(new IntegerValue[capacity_]), @@ -342,26 +342,22 @@ bool SchedulingConstraintHelper::SynchronizeAndSetTimeDirection( return true; } -// TODO(user): be more precise when we know a and b are in disjunction. -// we really just need start_b > start_a, or even >= if duration is non-zero. IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( int a, int b, bool add_reason_if_after) { const AffineExpression before = ends_[a]; const AffineExpression after = starts_[b]; - LinearExpression2 expr(before.var, after.var, before.coeff, -after.coeff); - - // We take the min of the level zero (end_a - start_b) and the one coming from - // a conditional precedence at true. - const IntegerValue conditional_ub = precedence_relations_->UpperBound(expr); - const IntegerValue level_zero_ub = integer_trail_->LevelZeroUpperBound(expr); - const IntegerValue expr_ub = std::min(conditional_ub, level_zero_ub); + const LinearExpression2 expr(before.var, after.var, before.coeff, + -after.coeff); + const IntegerValue expr_ub = linear2_bounds_->UpperBound(expr); const IntegerValue needed_offset = before.constant - after.constant; const IntegerValue ub_of_end_minus_start = expr_ub + needed_offset; const IntegerValue distance = -ub_of_end_minus_start; - if (add_reason_if_after && distance >= 0 && level_zero_ub > conditional_ub) { - precedence_relations_->AddReasonForUpperBoundLowerThan( - expr, conditional_ub, MutableLiteralReason(), MutableIntegerReason()); + if (add_reason_if_after && distance >= 0) { + // TODO(user): be more precise when we know a and b are in disjunction. we + // really just need end_b > start_a. + linear2_bounds_->AddReasonForUpperBoundLowerThan( + expr, expr_ub, MutableLiteralReason(), MutableIntegerReason()); } return distance; } @@ -370,41 +366,46 @@ IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( // associated to task a before task b. However we only call this for task that // are in detectable precedence, which means the normal precedence or linear // propagator should have already propagated that Boolean too. -bool SchedulingConstraintHelper::PropagatePrecedence(int a, int b) { +bool SchedulingConstraintHelper::NotifyLevelZeroPrecedence(int a, int b) { CHECK(IsPresent(a)); CHECK(IsPresent(b)); CHECK_EQ(sat_solver_->CurrentDecisionLevel(), 0); - const AffineExpression before = ends_[a]; - const AffineExpression after = starts_[b]; - if (after.coeff != 1) return true; - if (before.coeff != 1) return true; - if (after.var == kNoIntegerVariable) return true; - if (before.var == kNoIntegerVariable) return true; - if (before.var == after.var) { - if (before.constant <= after.constant) { - return true; - } else { + // Convert before <= after to linear2 <= rhs. + LinearExpression2 expr; + IntegerValue rhs; + { + const AffineExpression before = ends_[a]; + const AffineExpression after = starts_[b]; + expr.vars[0] = before.var; + expr.coeffs[0] = before.coeff; + expr.vars[1] = after.var; + expr.coeffs[1] = -after.coeff; + rhs = after.constant - before.constant; + } + + // Canonicalization. + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + rhs = FloorRatio(rhs, gcd); + + // Trivial case. + if (expr.coeffs[0] == 0 && expr.coeffs[1] == 0) { + if (rhs < 0) { sat_solver_->NotifyThatModelIsUnsat(); return false; } + return true; } - const IntegerValue offset = before.constant - after.constant; - const LinearExpression2 expr = - LinearExpression2::Difference(before.var, after.var); - if (root_level_lin2_bounds_->AddUpperBound(expr, -offset)) { + + if (root_level_lin2_bounds_->AddUpperBound(expr, rhs)) { VLOG(2) << "new relation " << TaskDebugString(a) << " <= " << TaskDebugString(b); - if (before.var == NegationOf(after.var)) { - AddWeightedSumLowerOrEqual({}, {before.var}, {int64_t{2}}, - -offset.value(), model_); - } else { - // TODO(user): Adding new constraint during propagation might not be the - // best idea as it can create some complication. - AddWeightedSumLowerOrEqual({}, {before.var, after.var}, - {int64_t{1}, int64_t{-1}}, -offset.value(), - model_); - } + // TODO(user): Adding new constraint during propagation might not be the + // best idea as it can create some complication. + AddWeightedSumLowerOrEqual({}, {expr.vars[0], expr.vars[1]}, + {expr.coeffs[0].value(), expr.coeffs[1].value()}, + rhs.value(), model_); if (sat_solver_->ModelIsUnsat()) return false; } return true; diff --git a/ortools/sat/scheduling_helpers.h b/ortools/sat/scheduling_helpers.h index 9b524644da..d6b53e9dbd 100644 --- a/ortools/sat/scheduling_helpers.h +++ b/ortools/sat/scheduling_helpers.h @@ -210,13 +210,15 @@ class SchedulingConstraintHelper : public PropagatorInterface { IntegerValue GetCurrentMinDistanceBetweenTasks( int a, int b, bool add_reason_if_after = false); - // We detected a precedence between two tasks. - // If we are at level zero, we might want to add the constraint. - // If we are at positive level, we might want to propagate the associated - // precedence literal if it exists. - bool PropagatePrecedence(int a, int b); + // We detected a precedence between two tasks at level zero. + // This register a new constraint and notify the linear2 root level bounds + // repository. Returns false on conflict. + // + // TODO(user): We could also call this at positive decision level, but it is a + // bit harder to exploit as we will also need to store the reasons. + bool NotifyLevelZeroPrecedence(int a, int b); - // Return the minimum overlap of interval i with the time window [start..end]. + // Return the minimum overlap of task t with the time window [start..end]. // // Note: this is different from the mandatory part of an interval. IntegerValue GetMinOverlap(int t, IntegerValue start, IntegerValue end) const; @@ -397,7 +399,7 @@ class SchedulingConstraintHelper : public PropagatorInterface { const VariablesAssignment& assignment_; IntegerTrail* integer_trail_; GenericLiteralWatcher* watcher_; - Linear2Bounds* precedence_relations_; + Linear2Bounds* linear2_bounds_; RootLevelLinear2Bounds* root_level_lin2_bounds_; // The current direction of time, true for forward, false for backward. diff --git a/ortools/sat/util.cc b/ortools/sat/util.cc index da811a56f0..dd90fbbcc1 100644 --- a/ortools/sat/util.cc +++ b/ortools/sat/util.cc @@ -14,7 +14,6 @@ #include "ortools/sat/util.h" #include -#include #include #include #include diff --git a/ortools/sat/util_test.cc b/ortools/sat/util_test.cc index fc6074dca2..9aaef75c60 100644 --- a/ortools/sat/util_test.cc +++ b/ortools/sat/util_test.cc @@ -16,7 +16,6 @@ #include #include -#include #include #include #include @@ -30,6 +29,7 @@ #include "absl/container/btree_set.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" +#include "absl/numeric/bits.h" #include "absl/numeric/int128.h" #include "absl/random/random.h" #include "absl/strings/str_join.h" From afbb56379c32451ce6a1fe86abf1fbd5040ff7d3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 15:23:46 +0200 Subject: [PATCH 075/509] make: Enable HiGHS support by default --- makefiles/Makefile.cpp.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefiles/Makefile.cpp.mk b/makefiles/Makefile.cpp.mk index 3d3e656ba0..d26ede715f 100644 --- a/makefiles/Makefile.cpp.mk +++ b/makefiles/Makefile.cpp.mk @@ -35,7 +35,7 @@ endif BUILD_TYPE ?= Release USE_COINOR ?= ON USE_GLPK ?= OFF -USE_HIGHS ?= OFF +USE_HIGHS ?= ON USE_PDLP := ON # OFF not supported USE_SCIP ?= ON USE_CPLEX ?= OFF From 4d9ef15b249e2d7bece2c351390996cbb7497cd0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 10:33:02 +0200 Subject: [PATCH 076/509] tools/release: Detect /Users path in libortools.dylib (#4674) --- tools/release/build_delivery_macos.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 31aa8d1d47..c3052fb932 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -94,6 +94,9 @@ function build_dotnet() { echo -n "Build .Net..." | tee -a build.log cmake -S. -Btemp_dotnet -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_DOTNET=ON cmake --build temp_dotnet -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L temp_dotnet/lib/libortools.dylib | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_dotnet --target test #echo "cmake test: DONE" | tee -a build.log @@ -181,6 +184,9 @@ function build_java() { cmake -S. -Btemp_java -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF \ -DBUILD_JAVA=ON -DSKIP_GPG=OFF ${GPG_EXTRA} cmake --build temp_java -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L temp_java/lib/libortools.dylib | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_java --target test #echo "cmake test: DONE" | tee -a build.log @@ -272,6 +278,9 @@ function build_python() { echo -n "Build Python ${PY_VERSION}..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" cmake --build "temp_python${PY_VERSION}" -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_python${PY_VERSION} --target test #echo "cmake test_python${PY_VERSION}: DONE" | tee -a build.log @@ -321,14 +330,23 @@ function build_archive() { echo -n "Make cpp archive..." | tee -a build.log make archive_cpp + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log echo -n "Make dotnet archive..." | tee -a build.log make archive_dotnet + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log echo -n "Make java archive..." | tee -a build.log make archive_java + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log # move archive to export @@ -372,6 +390,7 @@ function reset() { cd "${ROOT_DIR}" || exit 2 make clean + rm -rf temp_cpp rm -rf temp_dotnet rm -rf temp_java rm -rf temp_python* From c1f2d74de908e4efc13aca3b9bd20ef87ad54918 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 11:16:43 +0200 Subject: [PATCH 077/509] dependencies: Fix MACOSX_RPATH usage (#4674) 1. This is a boolean property which must be set to TRUE or FALSE If TRUE, the default, cmake will use @rpath as directory portion (aka prefix) of the install_name (otool LC_ID_DYLIB) note: CMP0042 set it to TRUE by default 2. To change this prefix you must use INSTALL_NAME_DIR 3. To change the INSTALL_RPATH (otool LC_RPATH) (e.g. to set it to @loader_path) you must use the INSTALL_RPATH property. ref: https://cmake.org/cmake/help/latest/variable/CMAKE_MACOSX_RPATH.html https://cmake.org/cmake/help/latest/prop_tgt/MACOSX_RPATH.html https://cmake.org/cmake/help/latest/prop_tgt/INSTALL_RPATH.html https://cmake.org/cmake/help/latest/policy/CMP0042.html --- cmake/dependencies/CMakeLists.txt | 2 + patches/highs-v1.11.0.patch | 276 ++++++++++++++++++++++++++++++ patches/scip-v922.patch | 42 ++++- patches/soplex-v7.1.3.patch | 28 ++- 4 files changed, 338 insertions(+), 10 deletions(-) create mode 100644 patches/highs-v1.11.0.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 2b461eacd3..926e51c047 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -287,6 +287,8 @@ if(BUILD_HIGHS) GIT_REPOSITORY "https://github.com/ERGO-Code/HiGHS.git" GIT_TAG "v1.11.0" GIT_SHALLOW TRUE + PATCH_COMMAND git apply --ignore-whitespace + "${CMAKE_CURRENT_LIST_DIR}/../../patches/highs-v1.11.0.patch" ) FetchContent_MakeAvailable(highs) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/patches/highs-v1.11.0.patch b/patches/highs-v1.11.0.patch new file mode 100644 index 0000000000..ce02101076 --- /dev/null +++ b/patches/highs-v1.11.0.patch @@ -0,0 +1,276 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 661aa078..2606e08d 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -57,7 +57,7 @@ endif() + # message("CMAKE_CXX_COMPILER_ID is ${CMAKE_CXX_COMPILER_ID}") + if (CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") + message(STATUS "Compiler is IntelLLVM") +- if (CMAKE_HOST_WIN32 AND CMAKE_VERSION VERSION_LESS "3.23.0") ++ if (CMAKE_HOST_WIN32 AND CMAKE_VERSION VERSION_LESS "3.23.0") + message(FATAL_ERROR "Need at least CMake 3.23 for IntelLLVM support of IntelDPCPP package on Windows") + elseif(CMAKE_VERSION VERSION_LESS "3.23.0") + message(FATAL_ERROR "CMake 3.20.5 is the minimum recommended for IntelLLVM on Linux") +@@ -121,9 +121,9 @@ endif() + + option(HIGHS_COVERAGE "Activate the code coverage compilation" OFF) + +-# Address | Thread | Leak ++# Address | Thread | Leak + # Linux atm +-# Only Debug is theted atm ++# Only Debug is theted atm + # See below for RelWithDeb info, todo test wip + set(DEBUG_MEMORY "Off" CACHE STRING "Sanitizers") + +@@ -137,7 +137,7 @@ message(STATUS "Build pdlp with GPU: ${CUPDLP_GPU}") + option(CUPDLP_FIND_CUDA "Build pdlp with GPU" OFF) + message(STATUS "Use FindCUDAConf: ${CUPDLP_FIND_CUDA}") + +-if(CUPDLP_GPU AND CMAKE_VERSION VERSION_LESS "3.25.0") ++if(CUPDLP_GPU AND CMAKE_VERSION VERSION_LESS "3.25.0") + message("CUPDLP FindCUDAConf requires CMake version minumum 3.24. Please use a higher version of CMake.") + endif() + +@@ -158,11 +158,11 @@ if (CUPDLP_GPU) + # With FindCUDAConf.cmake + # Need to have the CUDA_HOME environment variable set. + include(FindCUDAConf) +- else() ++ else() + # Without FindCUDAConf.cmake + enable_language(CUDA) + find_package(CUDAToolkit REQUIRED) +- ++ + set(CUDA_LIBRARY-NOTFOUND, OFF) + set(CUDA_LIBRARY CUDA::cudart CUDA::cublas CUDA::cusparse) + endif() +@@ -205,7 +205,7 @@ if (BUILD_CXX) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) +- # for multi-config build system (e.g. xcode) ++ # for multi-config build system (e.g. xcode) + foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER ${OutputConfig} OUTPUTCONFIG) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) +@@ -244,14 +244,14 @@ if (BUILD_CXX) + option(STDCALL "Build highs with the __stdcall convention" OFF) + endif() + +- if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR +- CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR +- CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") ++ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR ++ CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR ++ CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +- # elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ++ # elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # not recognised by cl +- # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++11") +- endif() ++ # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++11") ++ endif() + + # Basic type + include(CMakePushCheckState) +@@ -275,7 +275,7 @@ if (BUILD_CXX) + check_type_size("int *" SIZEOF_INT_P LANGUAGE CXX) + message(STATUS "Found int * size: ${SIZEOF_INT_P}") + cmake_pop_check_state() +- ++ + # Use current CMAKE_C_FLAGS and CMAKE_CXX_FLAGS when checking for IPO support, + # instead of defaults: https://cmake.org/cmake/help/latest/policy/CMP0138.html + if(MSVC AND BUILD_SHARED_LIBS) +@@ -293,7 +293,7 @@ if (BUILD_CXX) + set(ipo_supported NO) + message(STATUS "IPO / LTO not currently supported building HiGHS on MinGW") + else() +- if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") ++ if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + cmake_policy(SET CMP0138 NEW) + endif() + +@@ -371,19 +371,8 @@ else() + HIGHS_HAVE_BUILTIN_CLZ) + endif() + +-set(CMAKE_MACOSX_RPATH ON) +- +-if (BUILD_DOTNET) +- set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) +-else() +- # use, i.e. don't skip the full RPATH for the build tree +- set(CMAKE_SKIP_BUILD_RPATH FALSE) +- +- # when building, don't use the install RPATH already +- # (but later on when installing) +- set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) +- set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) +-endif() ++# set the correct rpath for OS X ++set(CMAKE_MACOSX_RPATH TRUE) + + if(NOT FAST_BUILD) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${HIGHS_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) +@@ -428,7 +417,7 @@ endif() + + # For debug of cuda locally + +-# does not work with older CMake ++# does not work with older CMake + # add_compile_options("$<$,$>:-G>") + + # add_compile_options("$<$:-G>") +@@ -453,7 +442,7 @@ if(MSVC) + add_compile_options("$<$:-D_CRT_SECURE_NO_WARNINGS>") + add_compile_options("$<$:/MP>") + +- # Try to split large pdb files into objects. ++ # Try to split large pdb files into objects. + # https://github.com/tensorflow/tensorflow/issues/31610 + # add_compile_options("/Z7") + # add_link_options("/DEBUG:FASTLINK") +@@ -611,11 +600,11 @@ if(FAST_BUILD AND HIGHS_COVERAGE) + message(STATUS "Building in coverage mode") + + # Enable coverage flags +- add_compile_options(-O0) +- add_compile_options(--coverage) +- add_compile_options(-fprofile-update=atomic) ++ add_compile_options(-O0) ++ add_compile_options(--coverage) ++ add_compile_options(-fprofile-update=atomic) + +- add_link_options(-O0) ++ add_link_options(-O0) + add_link_options(--coverage) # Ensure coverage data is linked correctly + + find_program(GCOV_PATH gcov) +diff --git a/highs/CMakeLists.txt b/highs/CMakeLists.txt +index 50301433..f7b982fb 100644 +--- a/highs/CMakeLists.txt ++++ b/highs/CMakeLists.txt +@@ -1,7 +1,7 @@ + if (NOT BUILD_CXX) + return() + endif() +- ++ + # Define library. + include(sources) + set(sources ${highs_sources} ${cupdlp_sources} ${ipx_sources} ${basiclu_sources}) +@@ -43,7 +43,7 @@ if(NOT FAST_BUILD) + set_target_properties(libhighs PROPERTIES + OUTPUT_NAME "highs" + PDB_NAME "libhighs" +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ ) + + if(ZLIB AND ZLIB_FOUND) + target_link_libraries(libhighs ZLIB::ZLIB) +@@ -51,8 +51,11 @@ if(NOT FAST_BUILD) + endif() + + # set the install rpath to the installed destination +- set_target_properties(libhighs PROPERTIES INSTALL_RPATH +- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ if(APPLE) ++ set_target_properties(libhighs PROPERTIES INSTALL_RPATH "@loader_path") ++ elseif (UNIX) ++ set_target_properties(libhighs PROPERTIES INSTALL_RPATH "$ORIGIN") ++ endif() + + # install the header files of highs + foreach(file ${headers}) +@@ -84,7 +87,7 @@ if(NOT FAST_BUILD) + # target_compile_options(libipx PRIVATE "-Wno-sign-compare") + # target_compile_options(libipx PRIVATE "-Wno-logical-op-parentheses") + endif() +- ++ + install(TARGETS libhighs EXPORT highs-targets + LIBRARY + ARCHIVE +@@ -150,8 +153,8 @@ else() + + + target_sources(highs PRIVATE ${sources} ${headers} ${win_version_file}) +- +- # Optional Cuda ++ ++ # Optional Cuda + if (CUPDLP_GPU) + + target_include_directories(highs PUBLIC "$") +@@ -164,7 +167,7 @@ else() + else() + target_link_libraries(highs cudalin ${CUDA_LIBRARY} m) + endif() +- ++ + set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) + + endif() +@@ -221,7 +224,7 @@ else() + ) + target_link_libraries(highs ZLIB::ZLIB) + endif() +- ++ + # install the header files of highs + foreach(file ${headers}) + get_filename_component(dir ${file} DIRECTORY) +@@ -236,9 +239,9 @@ else() + + # target_compile_options(highs PRIVATE "-Wall") + # target_compile_options(highs PRIVATE "-Wunused") +- ++ + if (UNIX) +- if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") ++ if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + target_compile_options(highs PRIVATE "-Wall") + target_compile_options(highs PRIVATE "-Wreturn-type") + target_compile_options(highs PRIVATE "-Wmissing-declarations") +@@ -248,7 +251,7 @@ else() + target_compile_options(highs PRIVATE "-Wno-comment") + target_compile_options(highs PRIVATE "-Wno-unused-label") + +- if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") ++ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + target_compile_options(highs PRIVATE "-Wno-unused-lambda-capture") + endif() + +@@ -267,7 +270,7 @@ else() + endif() + + if (BUILD_DOTNET) +- ++ + # see: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog + if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") + set(DOTNET_PLATFORM arm64) +@@ -298,8 +301,8 @@ else() + set(TARGET_FILE_NAME "highs.dll") + endif() + +- add_custom_command(TARGET highs POST_BUILD +- COMMAND "${CMAKE_COMMAND}" -E copy ++ add_custom_command(TARGET highs POST_BUILD ++ COMMAND "${CMAKE_COMMAND}" -E copy + "$" + ${DOTNET_PROJECT_DIR}/runtimes/${DOTNET_RID}/native/${TARGET_FILE_NAME} + COMMENT "Copying to output directory") +@@ -318,7 +321,7 @@ if(FORTRAN_FOUND) + target_link_libraries(FortranHighs PUBLIC highs) + endif() + +- install(TARGETS FortranHighs ++ install(TARGETS FortranHighs + LIBRARY + ARCHIVE + RUNTIME diff --git a/patches/scip-v922.patch b/patches/scip-v922.patch index 7a92254118..b1093c9e34 100644 --- a/patches/scip-v922.patch +++ b/patches/scip-v922.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 8492dc75..4c12a9bf 100644 +index 38ac7845..9b0d4fcb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,9 +38,11 @@ set(CPACK_PACKAGE_VENDOR "Zuse Institute Berlin") @@ -17,6 +17,15 @@ index 8492dc75..4c12a9bf 100644 if(SCIPOptSuite_BINARY_DIR) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${SCIPOptSuite_BINARY_DIR}/bin) +@@ -239,7 +241,7 @@ if(DEBUGSOL) + endif() + + #set the correct rpath for OS X +-set(CMAKE_MACOSX_RPATH ON) ++set(CMAKE_MACOSX_RPATH TRUE) + + #set defines for Windows + if(WIN32) @@ -412,22 +414,11 @@ endif() #search the selected LP solver library message(STATUS "Finding Solver \"${LPS}\"") @@ -96,10 +105,35 @@ index 559552f9..682ac40a 100644 set(SCIP_INCLUDE_DIRS "@CONF_INCLUDE_DIRS@") set(SCIP_FOUND TRUE) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index e6fda2d5..2d04b845 100644 +index d6dd3acf..a146ddec 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt -@@ -1149,17 +1149,8 @@ install(TARGETS scip libscip EXPORT scip-targets +@@ -5,8 +5,8 @@ include(GNUInstallDirs) + + function(setLibProperties targetname outputname) + set_target_properties(${targetname} PROPERTIES +- OUTPUT_NAME ${outputname} +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ OUTPUT_NAME ${outputname} ++ ) + endfunction(setLibProperties) + + set(CMAKE_C_STANDARD 99) +@@ -1112,6 +1112,13 @@ target_link_libraries(scip + add_dependencies(libscip scip_update_githash) + add_dependencies(scip scip_update_githash) + ++if(APPLE) ++ set_target_properties(libscip PROPERTIES ++ INSTALL_RPATH "@loader_path") ++elseif(UNIX) ++ set_target_properties(libscip PROPERTIES ++ INSTALL_RPATH "$ORIGIN") ++endif() + set_target_properties(libscip PROPERTIES + VERSION ${SCIP_VERSION_MAJOR}.${SCIP_VERSION_MINOR}.${SCIP_VERSION_PATCH}.${SCIP_VERSION_SUB} + SOVERSION ${SCIP_VERSION_MAJOR}.${SCIP_VERSION_MINOR} +@@ -1150,17 +1157,8 @@ install(TARGETS scip libscip EXPORT scip-targets INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) # Add all targets to the build-tree export set @@ -119,7 +153,7 @@ index e6fda2d5..2d04b845 100644 # configure the config file for the build tree set(CONF_INCLUDE_DIRS "${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}") -@@ -1175,18 +1166,16 @@ ${PROJECT_BINARY_DIR}/scip-config-version.cmake +@@ -1176,18 +1174,16 @@ ${PROJECT_BINARY_DIR}/scip-config-version.cmake #configure the config file for the install set(CONF_INCLUDE_DIRS "\${CMAKE_CURRENT_LIST_DIR}/../../../include") diff --git a/patches/soplex-v7.1.3.patch b/patches/soplex-v7.1.3.patch index 06b629ec98..2df6a36841 100644 --- a/patches/soplex-v7.1.3.patch +++ b/patches/soplex-v7.1.3.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 0b21f5a..ddf1536 100644 +index 0b21f5a..6f08341 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,10 @@ set(CPACK_PACKAGE_VERSION_PATCH "${SOPLEX_VERSION_PATCH}") @@ -34,7 +34,12 @@ index 0b21f5a..ddf1536 100644 # for colorized output if(NOT WIN32) -@@ -69,6 +79,8 @@ set(CMAKE_MACOSX_RPATH ON) +@@ -65,10 +75,12 @@ if(NOT CMAKE_BUILD_TYPE) + endif() + + # set the correct rpath for OS X +-set(CMAKE_MACOSX_RPATH ON) ++set(CMAKE_MACOSX_RPATH TRUE) # use C++14 standard set(CMAKE_CXX_STANDARD 14) @@ -131,9 +136,20 @@ index 0b21f5a..ddf1536 100644 + add_subdirectory(check) +endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index 84ec5a5..6f5d4ef 100644 +index 84ec5a5..4552300 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt +@@ -3,8 +3,8 @@ + # + function(setLibProperties targetname outputname) + set_target_properties(${targetname} PROPERTIES +- OUTPUT_NAME ${outputname} +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ OUTPUT_NAME ${outputname} ++ ) + endfunction(setLibProperties) + + include(GNUInstallDirs) @@ -193,24 +193,28 @@ target_link_libraries(libsoplexshared libsoplex ${libs}) set_target_properties(libsoplexshared PROPERTIES CXX_VISIBILITY_PRESET default) @@ -143,11 +159,11 @@ index 84ec5a5..6f5d4ef 100644 +if(SOPLEX_SOPLEX) + add_executable(soplex EXCLUDE_FROM_ALL soplexmain.cpp) + target_link_libraries(soplex PRIVATE libsoplex ${Boost_LIBRARIES}) - --if(EMSCRIPTEN AND EMSCRIPTEN_HTML) ++ + # set the install rpath to the installed destination + set_target_properties(soplex PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") -+ + +-if(EMSCRIPTEN AND EMSCRIPTEN_HTML) + if(EMSCRIPTEN AND EMSCRIPTEN_HTML) set_target_properties(soplex PROPERTIES LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/soplex_webdemo_shell.html) set(CMAKE_EXECUTABLE_SUFFIX ".html") From c229e9facdc90947f4fd6e7ddb1c588dbccd87a4 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:11:12 +0200 Subject: [PATCH 078/509] [CP-SAT] improve python layer for += and -= operators --- ortools/sat/python/cp_model_helper.cc | 60 +++++++++++++++++++++++++++ ortools/sat/python/cp_model_test.py | 14 +++++++ ortools/sat/python/linear_expr.cc | 5 +-- 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index ef40d8161e..371a87b7a2 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -960,6 +960,36 @@ PYBIND11_MODULE(cp_model_helper, m) { }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) + .def( + "__iadd__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, Add)) + .def( + "__iadd__", + [](py::object self, int64_t cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddIntInPlace(cst); + return expr; + }, + DOC(operations_research, sat, python, LinearExpr, AddInt)) + .def( + "__iadd__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(cst); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, AddFloat)) .def( "__sub__", [](py::object self, @@ -1003,6 +1033,36 @@ PYBIND11_MODULE(cp_model_helper, m) { }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) + .def( + "__isub__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other->MulInt(-1)); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, Sub)) + .def( + "__isub__", + [](py::object self, int64_t cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddIntInPlace(-cst); + return expr; + }, + DOC(operations_research, sat, python, LinearExpr, SubInt)) + .def( + "__isub__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(-cst); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, SubFloat)) .def_property_readonly("num_exprs", &SumArray::num_exprs) .def_property_readonly("int_offset", &SumArray::int_offset) .def_property_readonly("double_offset", &SumArray::double_offset); diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index bbed30ffa0..2add07de02 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2447,6 +2447,20 @@ TRFM""" x = [model.new_int_var(0, 10, f"x{i}") for i in range(100000)] model.add(sum(x) == 10) + def test_large_iadd(self): + model = cp_model.CpModel() + s = 0 + for _ in range(300000): + s += model.new_bool_var("") + model.add(s == 10) + + def test_large_isub(self): + model = cp_model.CpModel() + s = 0 + for _ in range(300000): + s -= model.new_bool_var("") + model.add(s == 10) + def test_simplification1(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index b158fb9343..42077ef46f 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -89,9 +89,8 @@ std::shared_ptr LinearExpr::AddFloat(double cst) { std::shared_ptr LinearExpr::Sub(std::shared_ptr other) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(other); - const std::vector coeffs = {1, -1}; - return std::make_shared(exprs, coeffs, 0); + exprs.push_back(other->MulInt(-1)); + return std::make_shared(exprs); } std::shared_ptr LinearExpr::SubInt(int64_t cst) { From d1c3c7f84b4cce37ce3f178bfcffca00a901937c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:11:26 +0200 Subject: [PATCH 079/509] [CP-SAT] more work on precedences --- ortools/sat/2d_distances_propagator.cc | 11 +- ortools/sat/cp_model_solver_helpers.cc | 2 +- ortools/sat/disjunctive.cc | 13 +- ortools/sat/integer_base.h | 18 ++ ortools/sat/precedences.cc | 239 +++++++++++++++++-------- ortools/sat/precedences.h | 114 ++++++------ ortools/sat/precedences_test.cc | 220 ++++++++++------------- ortools/sat/routing_cuts.cc | 33 ++-- ortools/sat/routing_cuts.h | 1 + ortools/sat/routing_cuts_test.cc | 52 +++--- ortools/sat/scheduling_helpers.cc | 19 +- 11 files changed, 393 insertions(+), 329 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index e6d2658635..7cf521400f 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -180,15 +180,10 @@ bool Precedences2DPropagator::Propagate() { if (j == 1) { std::swap(b1, b2); } - LinearExpression2 expr; - expr.vars[0] = helper->Starts()[b1].var; - expr.vars[1] = helper->Ends()[b2].var; - expr.coeffs[0] = helper->Starts()[b1].coeff; - expr.coeffs[1] = -helper->Ends()[b2].coeff; + const auto [expr, ub] = EncodeDifferenceLowerThan( + helper->Starts()[b1], helper->Ends()[b2], -1); linear2_bounds_->AddReasonForUpperBoundLowerThan( - expr, - -(helper->Starts()[b1].constant - helper->Ends()[b2].constant) - 1, - helper_.x_helper().MutableLiteralReason(), + expr, ub, helper_.x_helper().MutableLiteralReason(), helper_.x_helper().MutableIntegerReason()); } } diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 1be370ab37..083d657587 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -1155,7 +1155,7 @@ void FillBinaryRelationRepository(const CpModelProto& model_proto, } } } - repository->Build(root_level_lin2_bounds); + repository->Build(); } } // namespace diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index d92ae5ac59..fd4e6b4e58 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -1254,6 +1254,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { int best_index = -1; const IntegerValue current_var_lb = integer_trail_->LowerBound(var); IntegerValue best_new_lb = current_var_lb; + IntegerValue min_offset_at_best = kMinIntegerValue; IntegerValue min_offset = kMaxIntegerValue; IntegerValue sum_of_duration = 0; for (int i = num_before; --i >= 0;) { @@ -1298,6 +1299,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { const IntegerValue start = task_time.time; const IntegerValue new_lb = start + sum_of_duration + min_offset; if (new_lb > best_new_lb) { + min_offset_at_best = min_offset; best_new_lb = new_lb; best_index = i; } @@ -1315,14 +1317,13 @@ bool DisjunctivePrecedences::PropagateSubwindow() { helper_->AddPresenceReason(ct); helper_->AddEnergyAfterReason(ct, helper_->SizeMin(ct), window_start); - // Fetch the explanation. + // Fetch the explanation of (var - end) >= min_offset // This is okay if a bit slow since we only do that when we push. - const AffineExpression& end_exp = helper_->Ends()[ct]; - const LinearExpression2 expr = - LinearExpression2::Difference(end_exp.var, var); + const auto [expr, ub] = EncodeDifferenceLowerThan( + helper_->Ends()[ct], var, -min_offset_at_best); linear2_bounds_->AddReasonForUpperBoundLowerThan( - expr, linear2_bounds_->UpperBound(expr), - helper_->MutableLiteralReason(), helper_->MutableIntegerReason()); + expr, ub, helper_->MutableLiteralReason(), + helper_->MutableIntegerReason()); } ++stats_.num_propagations; if (!helper_->PushIntegerLiteral( diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index a148a91cf6..572f62a906 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -427,6 +427,24 @@ struct LinearExpression2 { } }; +// Encodes (a - b <= ub) in (linear2 <= ub) format. +// Note that the returned expression is canonicalized and divided by its GCD. +inline std::pair EncodeDifferenceLowerThan( + AffineExpression a, AffineExpression b, IntegerValue ub) { + LinearExpression2 expr; + expr.vars[0] = a.var; + expr.coeffs[0] = a.coeff; + expr.vars[1] = b.var; + expr.coeffs[1] = -b.coeff; + IntegerValue rhs = ub + b.constant - a.constant; + + // Canonicalize. + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + rhs = FloorRatio(rhs, gcd); + return {std::move(expr), rhs}; +} + template H AbslHashValue(H h, const LinearExpression2& e) { h = H::combine(std::move(h), e.vars[0]); diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index d7ffd981d8..2e2a5bcf47 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -57,6 +57,7 @@ namespace sat { std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) { + using AddResult = BestBinaryRelationBounds::AddResult; const IntegerValue zero_level_lb = integer_trail_->LevelZeroLowerBound(expr); const IntegerValue zero_level_ub = integer_trail_->LevelZeroUpperBound(expr); if (lb <= zero_level_lb && ub >= zero_level_ub) { @@ -73,15 +74,14 @@ std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, const auto [status_lb, status_ub] = root_level_relations_.Add(expr, lb, ub); const bool lb_restricted = - status_lb == BestBinaryRelationBounds::AddResult::ADDED || - status_lb == BestBinaryRelationBounds::AddResult::UPDATED; + status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED; const bool ub_restricted = - status_ub == BestBinaryRelationBounds::AddResult::ADDED || - status_ub == BestBinaryRelationBounds::AddResult::UPDATED; + status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED; if (!lb_restricted && !ub_restricted) return {false, false}; ++num_updates_; + // Update our special coeff=1 lookup table. if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { // +2 to handle possible negation. const int new_size = @@ -89,19 +89,69 @@ std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, if (new_size > coeff_one_var_lookup_.size()) { coeff_one_var_lookup_.resize(new_size); } - if (status_lb == BestBinaryRelationBounds::AddResult::ADDED) { + if (status_lb == AddResult::ADDED) { // First time added to root_level_relations_. coeff_one_var_lookup_[NegationOf(expr.vars[0])].push_back( NegationOf(expr.vars[1])); coeff_one_var_lookup_[NegationOf(expr.vars[1])].push_back( NegationOf(expr.vars[0])); } - if (status_ub == BestBinaryRelationBounds::AddResult::ADDED) { + if (status_ub == AddResult::ADDED) { coeff_one_var_lookup_[expr.vars[0]].push_back(expr.vars[1]); coeff_one_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); } } + // Update our per-variable and per-pair lookup tables. + IntegerVariable var1 = PositiveVariable(expr.vars[0]); + IntegerVariable var2 = PositiveVariable(expr.vars[1]); + if (var1 > var2) std::swap(var1, var2); + + auto [it_var, inserted] = var_to_bounds_vector_index_.insert({expr, {0, 0}}); + for (const IntegerVariable var : {var1, var2}) { + auto& var_bounds = var_to_bounds_[var]; + if (inserted) { + if (var == var1) { + it_var->second.first = var_bounds.size(); + } else { + it_var->second.second = var_bounds.size(); + } + var_bounds.push_back({expr, lb, ub}); + } else { + const int index = + (var == var1) ? it_var->second.first : it_var->second.second; + DCHECK_LT(index, var_bounds.size()); + std::tuple& var_bound = + var_bounds[index]; + if (status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED) { + std::get<1>(var_bound) = lb; + } + if (status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED) { + std::get<2>(var_bound) = ub; + } + } + } + + auto [it_pair, pair_inserted] = + var_pair_to_bounds_vector_index_.insert({expr, 0}); + DCHECK_EQ(inserted, pair_inserted); + auto& pair_bounds = var_pair_to_bounds_[{var1, var2}]; + if (pair_inserted) { + it_pair->second = pair_bounds.size(); + pair_bounds.push_back({expr, lb, ub}); + } else { + const int index = it_pair->second; + DCHECK_LT(index, pair_bounds.size()); + std::tuple& pair_bound = + pair_bounds[index]; + if (status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED) { + std::get<1>(pair_bound) = lb; + } + if (status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED) { + std::get<2>(pair_bound) = ub; + } + } + return {lb_restricted, ub_restricted}; } @@ -142,6 +192,95 @@ IntegerValue RootLevelLinear2Bounds::GetUpperBoundNoTrail( return root_level_relations_.UpperBoundWhenCanonicalized(expr); } +std::vector> +RootLevelLinear2Bounds::GetSortedNonTrivialUpperBounds() const { + std::vector> result = + root_level_relations_.GetSortedNonTrivialUpperBounds(); + int new_size = 0; + for (int i = 0; i < result.size(); ++i) { + const auto& [expr, ub] = result[i]; + if (ub < integer_trail_->LevelZeroUpperBound(expr)) { + result[new_size] = {expr, ub}; + ++new_size; + } + } + result.resize(new_size); + return result; +} + +// Return a list of (lb <= expr <= ub), with expr.vars[0] = var, where at +// least one of the bounds is non-trivial and the potential other non-trivial +// bound is tight. +std::vector> +RootLevelLinear2Bounds::GetAllBoundsContainingVariable( + IntegerVariable var) const { + std::vector> result; + auto it = var_to_bounds_.find(PositiveVariable(var)); + if (it == var_to_bounds_.end()) return {}; + for (const auto& [expr, lb, ub] : it->second) { + const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); + const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); + if (lb <= trail_lb && ub >= trail_ub) continue; + LinearExpression2 explicit_vars_expr = expr; + if (explicit_vars_expr.vars[0] == NegationOf(var)) { + explicit_vars_expr.vars[0] = NegationOf(explicit_vars_expr.vars[0]); + explicit_vars_expr.coeffs[0] = -explicit_vars_expr.coeffs[0]; + } + if (explicit_vars_expr.vars[1] == NegationOf(var)) { + explicit_vars_expr.vars[1] = NegationOf(explicit_vars_expr.vars[1]); + explicit_vars_expr.coeffs[1] = -explicit_vars_expr.coeffs[1]; + } + if (explicit_vars_expr.vars[1] == var) { + std::swap(explicit_vars_expr.vars[0], explicit_vars_expr.vars[1]); + std::swap(explicit_vars_expr.coeffs[0], explicit_vars_expr.coeffs[1]); + } + DCHECK(explicit_vars_expr.vars[0] == var); + result.push_back( + {explicit_vars_expr, std::max(lb, trail_lb), std::min(ub, trail_ub)}); + } + return result; +} + +// Return a list of (lb <= expr <= ub), with expr.vars = {var1, var2}, where +// at least one of the bounds is non-trivial and the potential other +// non-trivial bound is tight. +std::vector> +RootLevelLinear2Bounds::GetAllBoundsContainingVariables( + IntegerVariable var1, IntegerVariable var2) const { + std::vector> result; + std::pair key = {PositiveVariable(var1), + PositiveVariable(var2)}; + if (key.first > key.second) std::swap(key.first, key.second); + auto it = var_pair_to_bounds_.find(key); + if (it == var_pair_to_bounds_.end()) return {}; + for (const auto& [expr, lb, ub] : it->second) { + const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); + const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); + if (lb <= trail_lb && ub >= trail_ub) continue; + + LinearExpression2 explicit_vars_expr = expr; + if (explicit_vars_expr.vars[0] == NegationOf(var1) || + explicit_vars_expr.vars[0] == NegationOf(var2)) { + explicit_vars_expr.vars[0] = NegationOf(explicit_vars_expr.vars[0]); + explicit_vars_expr.coeffs[0] = -explicit_vars_expr.coeffs[0]; + } + if (explicit_vars_expr.vars[1] == NegationOf(var1) || + explicit_vars_expr.vars[1] == NegationOf(var2)) { + explicit_vars_expr.vars[1] = NegationOf(explicit_vars_expr.vars[1]); + explicit_vars_expr.coeffs[1] = -explicit_vars_expr.coeffs[1]; + } + if (explicit_vars_expr.vars[1] == var1) { + std::swap(explicit_vars_expr.vars[0], explicit_vars_expr.vars[1]); + std::swap(explicit_vars_expr.coeffs[0], explicit_vars_expr.coeffs[1]); + } + DCHECK(explicit_vars_expr.vars[0] == var1 && + explicit_vars_expr.vars[1] == var2); + result.push_back( + {explicit_vars_expr, std::max(lb, trail_lb), std::min(ub, trail_ub)}); + } + return result; +} + EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { if (!VLOG_IS_ON(1)) return; std::vector> stats; @@ -244,7 +383,7 @@ void EnforcedLinear2Bounds::AddReasonForUpperBoundLowerThan( std::vector* literal_reason, std::vector* /*unused*/) const { expr.SimpleCanonicalization(); - if (ub >= LevelZeroUpperBound(expr)) return; + if (ub >= root_level_bounds_->LevelZeroUpperBound(expr)) return; const IntegerValue gcd = expr.DivideByGcd(); const auto it = conditional_relations_.find(expr); DCHECK(it != conditional_relations_.end()); @@ -255,31 +394,12 @@ void EnforcedLinear2Bounds::AddReasonForUpperBoundLowerThan( CHECK(trail_->Assignment().LiteralIsTrue(l)); } } - DCHECK_EQ(CapProdI(gcd, entry.rhs), UpperBound(expr)); DCHECK_LE(CapProdI(gcd, entry.rhs), ub); for (const Literal l : entry.enforcements) { literal_reason->push_back(l.Negated()); } } -IntegerValue EnforcedLinear2Bounds::UpperBound(LinearExpression2 expr) const { - expr.SimpleCanonicalization(); - const IntegerValue gcd = expr.DivideByGcd(); - - const auto it = conditional_relations_.find(expr); - if (it != conditional_relations_.end()) { - const ConditionalEntry& entry = conditional_stack_[it->second]; - if (DEBUG_MODE) { - for (const Literal l : entry.enforcements) { - CHECK(trail_->Assignment().LiteralIsTrue(l)); - } - } - DCHECK_LT(entry.rhs, root_level_bounds_->LevelZeroUpperBound(expr)); - return CapProdI(gcd, entry.rhs); - } - return CapProdI(gcd, root_level_bounds_->LevelZeroUpperBound(expr)); -} - IntegerValue EnforcedLinear2Bounds::GetUpperBoundFromEnforced( LinearExpression2 expr) const { DCHECK_EQ(expr.DivideByGcd(), 1); @@ -1241,48 +1361,22 @@ void BinaryRelationRepository::AddPartialRelation(Literal lit, Add(lit, LinearExpression2(a, b, 1, 1), 0, 0); } -void BinaryRelationRepository::Build( - const RootLevelLinear2Bounds* root_level_bounds) { - for (const auto& [expr, lb, ub] : - root_level_bounds->GetSortedNonTrivialBounds()) { - LinearExpression2 positive_expr = expr; - positive_expr.MakeVariablesPositive(); - Relation r; - r.enforcement = Literal(kNoLiteralIndex); - r.expr = positive_expr; - r.rhs = root_level_bounds->LevelZeroUpperBound(positive_expr); - positive_expr.Negate(); - r.lhs = -root_level_bounds->LevelZeroUpperBound(positive_expr); - relations_.push_back(r); - } +void BinaryRelationRepository::Build() { DCHECK(!is_built_); is_built_ = true; std::vector> literal_key_values; - std::vector> var_key_values; const int num_relations = relations_.size(); literal_key_values.reserve(num_enforced_relations_); - var_key_values.reserve(num_relations - num_enforced_relations_); for (int i = 0; i < num_relations; ++i) { const Relation& r = relations_[i]; - if (r.enforcement.Index() == kNoLiteralIndex) { - var_key_values.emplace_back(r.expr.vars[0], i); - var_key_values.emplace_back(r.expr.vars[1], i); - std::pair key(r.expr.vars[0], - r.expr.vars[1]); - if (relations_[i].expr.vars[0] > relations_[i].expr.vars[1]) { - std::swap(key.first, key.second); - } - var_pair_to_relations_[key].push_back(i); - } else { - literal_key_values.emplace_back(r.enforcement.Index(), i); - } + literal_key_values.emplace_back(r.enforcement.Index(), i); } lit_to_relations_.ResetFromPairs(literal_key_values); - var_to_relations_.ResetFromPairs(var_key_values); } bool BinaryRelationRepository::PropagateLocalBounds( - const IntegerTrail& integer_trail, Literal lit, + const IntegerTrail& integer_trail, + const RootLevelLinear2Bounds& root_level_bounds, Literal lit, const absl::flat_hash_map& input, absl::flat_hash_map* output) const { DCHECK_NE(lit.Index(), kNoLiteralIndex); @@ -1334,9 +1428,10 @@ bool BinaryRelationRepository::PropagateLocalBounds( } } for (const auto& [var, _] : input) { - if (var >= var_to_relations_.size()) continue; - for (const int relation_index : var_to_relations_[var]) { - update_var_bounds_from_relation(relations_[relation_index]); + for (const auto& [expr, lb, ub] : + root_level_bounds.GetAllBoundsContainingVariable(var)) { + update_var_bounds_from_relation( + Relation{Literal(kNoLiteralIndex), expr, lb, ub}); } } @@ -1648,29 +1743,16 @@ Linear2BoundsFromLinear3::~Linear2BoundsFromLinear3() { shared_stats_->AddStats(stats); } -std::pair ReifiedLinear2Bounds::FromDifference( - const AffineExpression& a, const AffineExpression& b) const { - LinearExpression2 expr; - expr.vars[0] = a.var; - expr.vars[1] = b.var; - expr.coeffs[0] = a.coeff; - expr.coeffs[1] = -b.coeff; - IntegerValue lb = kMinIntegerValue; // unused. - IntegerValue ub = b.constant - a.constant; - expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/false); - return {std::move(expr), ub}; -} - RelationStatus ReifiedLinear2Bounds::GetLevelZeroPrecedenceStatus( AffineExpression a, AffineExpression b) const { - const auto [expr, ub] = FromDifference(a, b); + const auto [expr, ub] = EncodeDifferenceLowerThan(a, b, 0); return best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, ub); } void ReifiedLinear2Bounds::AddReifiedPrecedenceIfNonTrivial( Literal l, AffineExpression a, AffineExpression b) { - const auto [expr, ub] = FromDifference(a, b); + const auto [expr, ub] = EncodeDifferenceLowerThan(a, b, 0); const RelationStatus status = best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status != RelationStatus::IS_UNKNOWN) return; @@ -1683,7 +1765,7 @@ void ReifiedLinear2Bounds::AddReifiedPrecedenceIfNonTrivial( LiteralIndex ReifiedLinear2Bounds::GetReifiedPrecedence(AffineExpression a, AffineExpression b) { - const auto [expr, ub] = FromDifference(a, b); + const auto [expr, ub] = EncodeDifferenceLowerThan(a, b, 0); const RelationStatus status = best_root_level_bounds_->GetLevelZeroStatus(expr, kMinIntegerValue, ub); if (status == RelationStatus::IS_TRUE) { @@ -1872,10 +1954,15 @@ void Linear2Bounds::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const { + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + ub = FloorRatio(ub, gcd); + DCHECK_LE(UpperBound(expr), ub); + if (root_level_bounds_->LevelZeroUpperBound(expr) <= ub) { return; } - if (enforced_bounds_->UpperBound(expr) <= ub) { + if (enforced_bounds_->GetUpperBoundFromEnforced(expr) <= ub) { enforced_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, literal_reason, integer_reason); } else { diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index cf192933a6..6b75d0b9d9 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -45,12 +45,8 @@ namespace operations_research { namespace sat { -struct FullIntegerPrecedence { - IntegerVariable var; - std::vector indices; - std::vector offsets; -}; - +// This holds all the relation lhs <= linear2 <= rhs that are true at level +// zero. It is the source of truth across all the solver for such bounds. class RootLevelLinear2Bounds { public: explicit RootLevelLinear2Bounds(Model* model) @@ -75,17 +71,27 @@ class RootLevelLinear2Bounds { int64_t num_bounds() const { return root_level_relations_.num_bounds(); } - // Return a list of (expr <= ub) sorted by expr. + // Return a list of (expr <= ub) sorted by expr. They are guaranteed to be + // better than the trivial upper bound. std::vector> - GetSortedNonTrivialUpperBounds() const { - return root_level_relations_.GetSortedNonTrivialUpperBounds(); - } + GetSortedNonTrivialUpperBounds() const; - // Return a list of (lb <= expr <= ub) sorted by expr. + // Return a list of (lb <= expr <= ub), with expr.vars[0] = var, where at + // least one of the bounds is non-trivial and the potential other non-trivial + // bound is tight. + // + // As the class name indicates, all bounds are level zero ones. std::vector> - GetSortedNonTrivialBounds() const { - return root_level_relations_.GetSortedNonTrivialBounds(); - } + GetAllBoundsContainingVariable(IntegerVariable var) const; + + // Return a list of (lb <= expr <= ub), with expr.vars = {var1, var2}, where + // at least one of the bounds is non-trivial and the potential other + // non-trivial bound is tight. + // + // As the class name indicates, all bounds are level zero ones. + std::vector> + GetAllBoundsContainingVariables(IntegerVariable var1, + IntegerVariable var2) const; // For a given variable `var`, return all variables `other` so that // LinearExpression2(var, other, 1, 1) has a non trivial upper bound. @@ -115,12 +121,39 @@ class RootLevelLinear2Bounds { util_intops::StrongVector> coeff_one_var_lookup_; + // TODO(user): use data structures that consume less memory. A single + // std::vector and hash maps having the index as value + // could be enough. + absl::flat_hash_map< + IntegerVariable, + absl::InlinedVector< + std::tuple, 2>> + var_to_bounds_; + // Map to implement GetAllBoundsContainingVariables(). + absl::flat_hash_map< + std::pair, + absl::InlinedVector< + std::tuple, 1>> + var_pair_to_bounds_; + // Data structure to quickly update var_to_bounds_. Return the index where + // this linear expression appear in the vector for the first and second + // variable. + absl::flat_hash_map> + var_to_bounds_vector_index_; + absl::flat_hash_map var_pair_to_bounds_vector_index_; + // TODO(user): Also push them to a global shared repository after // remapping IntegerVariable to proto indices. BestBinaryRelationBounds root_level_relations_; int64_t num_updates_ = 0; }; +struct FullIntegerPrecedence { + IntegerVariable var; + std::vector indices; + std::vector offsets; +}; + // This class is used to compute the transitive closure of the level-zero // precedence relations. // @@ -223,6 +256,9 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // // This method currently ignores all linear2 expressions with any coefficient // different from 1. + // + // TODO(user): Ideally this should be moved to a new class and maybe augmented + // with other kind of precedences. struct PrecedenceData { IntegerVariable var; int index; @@ -230,21 +266,6 @@ class EnforcedLinear2Bounds : public ReversibleInterface { void CollectPrecedences(absl::Span vars, std::vector* output); - IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const { - return root_level_bounds_->LevelZeroUpperBound(expr); - } - - // Returns the maximum value for expr, and the reason for it (all - // true). Note that we always check LevelZeroUpperBound() so if it is better, - // the returned literal reason will be empty. - // - // We separate the two because usually the reason is only needed when we push, - // which happen less often, so we don't mind doing two hash lookups, and we - // really want to optimize the UpperBound() instead. - // - // NOTE: most users will want to call Linear2Bounds::UpperBound() instead. - IntegerValue UpperBound(LinearExpression2 expr) const; - // Low-level function that returns the upper bound if there is some enforced // relations only. Otherwise always returns kMaxIntegerValue. // `expr` must be canonicalized and gcd-reduced. @@ -325,8 +346,7 @@ struct Relation { } }; -// A repository of all the enforced linear constraints of size 1 or 2, and of -// all the non-enforced linear constraints of size 2. +// A repository of all the enforced linear constraints of size 1 or 2. // // TODO(user): This is not always needed, find a way to clean this once we // don't need it. @@ -344,25 +364,6 @@ class BinaryRelationRepository { return lit_to_relations_[lit]; } - // Returns the indices of the non-enforced relations that contain the given - // (positive) variable. - absl::Span IndicesOfRelationsContaining( - IntegerVariable var) const { - if (var >= var_to_relations_.size()) return {}; - return var_to_relations_[var]; - } - - // Returns the indices of the non-enforced relations that contain the given - // (positive) variables. - absl::Span IndicesOfRelationsBetween(IntegerVariable var1, - IntegerVariable var2) const { - if (var1 > var2) std::swap(var1, var2); - const std::pair key(var1, var2); - const auto it = var_pair_to_relations_.find(key); - if (it == var_pair_to_relations_.end()) return {}; - return it->second; - } - // Adds a conditional relation lit => expr \in [lhs, rhs] (one of the coeffs // can be zero). void Add(Literal lit, LinearExpression2 expr, IntegerValue lhs, @@ -374,7 +375,7 @@ class BinaryRelationRepository { // Builds the literal to relations mapping. This should be called once all the // relations have been added. - void Build(const RootLevelLinear2Bounds* root_level_bounds); + void Build(); // Assuming level-zero bounds + any (var >= value) in the input map, // fills "output" with a "propagated" set of bounds assuming lit is true (by @@ -386,7 +387,8 @@ class BinaryRelationRepository { // Important: by default this does not call output->clear() so we can take // the max with already inferred bounds. bool PropagateLocalBounds( - const IntegerTrail& integer_trail, Literal lit, + const IntegerTrail& integer_trail, + const RootLevelLinear2Bounds& root_level_bounds, Literal lit, const absl::flat_hash_map& input, absl::flat_hash_map* output) const; @@ -395,10 +397,6 @@ class BinaryRelationRepository { int num_enforced_relations_ = 0; std::vector relations_; CompactVectorVector lit_to_relations_; - CompactVectorVector var_to_relations_; - absl::flat_hash_map, - std::vector> - var_pair_to_relations_; }; // Class that keeps the best upper bound for a*x + b*y by using all the linear3 @@ -485,10 +483,6 @@ class ReifiedLinear2Bounds { LiteralIndex GetReifiedPrecedence(AffineExpression a, AffineExpression b); private: - // Return the pair (a - b) <= rhs. - std::pair FromDifference( - const AffineExpression& a, const AffineExpression& b) const; - IntegerEncoder* integer_encoder_; RootLevelLinear2Bounds* best_root_level_bounds_; diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 0579923c9a..c9c9dbf993 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -40,6 +40,7 @@ namespace { using ::google::protobuf::contrib::parse_proto::ParseTestProto; using ::testing::ElementsAre; +using ::testing::FieldsAre; using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; @@ -64,7 +65,7 @@ std::vector AddVariables(IntegerTrail* integer_trail) { TEST(EnforcedLinear2BoundsTest, BasicAPI) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); - auto* lin2_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); auto* precedence_builder = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); @@ -72,41 +73,40 @@ TEST(EnforcedLinear2BoundsTest, BasicAPI) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - EnforcedLinear2Bounds precedences(&model); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(d, c), -7); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); + root_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); + root_bounds->AddUpperBound(LinearExpression2::Difference(d, c), -7); + root_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); precedence_builder->Build(); EXPECT_EQ( - precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), + root_bounds->LevelZeroUpperBound(LinearExpression2::Difference(a, b)), -10); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), NegationOf(a))), -10); EXPECT_EQ( - precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, c)), + root_bounds->LevelZeroUpperBound(LinearExpression2::Difference(a, c)), -22); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(c), NegationOf(a))), -22); EXPECT_EQ( - precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, d)), + root_bounds->LevelZeroUpperBound(LinearExpression2::Difference(a, d)), -15); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(d), NegationOf(a))), -15); EXPECT_EQ( - precedences.LevelZeroUpperBound(LinearExpression2::Difference(d, a)), + root_bounds->LevelZeroUpperBound(LinearExpression2::Difference(d, a)), 100); // Once built, we can update the offsets. // Note however that this would not propagate through the precedence graphs. - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -15); + root_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -15); EXPECT_EQ( - precedences.LevelZeroUpperBound(LinearExpression2::Difference(a, b)), + root_bounds->LevelZeroUpperBound(LinearExpression2::Difference(a, b)), -15); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), NegationOf(a))), -15); } @@ -114,7 +114,7 @@ TEST(EnforcedLinear2BoundsTest, BasicAPI) { TEST(EnforcedLinear2BoundsTest, CornerCase1) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); - auto* lin2_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); auto* precedence_builder = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); @@ -122,21 +122,20 @@ TEST(EnforcedLinear2BoundsTest, CornerCase1) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - EnforcedLinear2Bounds precedences(&model); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, c), -7); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(b), a), + root_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -10); + root_bounds->AddUpperBound(LinearExpression2::Difference(b, c), -7); + root_bounds->AddUpperBound(LinearExpression2::Difference(b, d), -5); + root_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(b), a), -5); precedence_builder->Build(); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), a)), -5); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), c)), -22); - EXPECT_EQ(precedences.LevelZeroUpperBound( + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), d)), -20); } @@ -144,7 +143,7 @@ TEST(EnforcedLinear2BoundsTest, CornerCase1) { TEST(EnforcedLinear2BoundsTest, CornerCase2) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); - auto* lin2_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); auto* precedence_builder = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); @@ -152,13 +151,12 @@ TEST(EnforcedLinear2BoundsTest, CornerCase2) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2), c(4), d(6); - EnforcedLinear2Bounds precedences(&model); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(a), a), + root_bounds->AddUpperBound(LinearExpression2::Difference(NegationOf(a), a), -10); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -7); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, c), -5); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(a, d), -2); - EXPECT_EQ(precedences.LevelZeroUpperBound( + root_bounds->AddUpperBound(LinearExpression2::Difference(a, b), -7); + root_bounds->AddUpperBound(LinearExpression2::Difference(a, c), -5); + root_bounds->AddUpperBound(LinearExpression2::Difference(a, d), -2); + EXPECT_EQ(root_bounds->LevelZeroUpperBound( LinearExpression2::Difference(NegationOf(b), NegationOf(a))), -7); @@ -168,7 +166,7 @@ TEST(EnforcedLinear2BoundsTest, CornerCase2) { TEST(EnforcedLinear2BoundsTest, CoefficientGreaterThanOne) { Model model; IntegerTrail* integer_trail = model.GetOrCreate(); - auto* lin2_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); auto* precedence_builder = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); @@ -177,10 +175,10 @@ TEST(EnforcedLinear2BoundsTest, CoefficientGreaterThanOne) { IntegerVariable a(0), b(2), c(4); EnforcedLinear2Bounds precedences(&model); - lin2_bounds->AddUpperBound(LinearExpression2(a, b, 3, -4), 7); - lin2_bounds->AddUpperBound(LinearExpression2(a, c, 2, -3), -5); - lin2_bounds->AddUpperBound(LinearExpression2(a, b, 6, -8), 5); - EXPECT_EQ(precedences.LevelZeroUpperBound(LinearExpression2(a, b, 9, -12)), + root_bounds->AddUpperBound(LinearExpression2(a, b, 3, -4), 7); + root_bounds->AddUpperBound(LinearExpression2(a, c, 2, -3), -5); + root_bounds->AddUpperBound(LinearExpression2(a, b, 6, -8), 5); + EXPECT_EQ(root_bounds->LevelZeroUpperBound(LinearExpression2(a, b, 9, -12)), 6); precedence_builder->Build(); @@ -189,7 +187,9 @@ TEST(EnforcedLinear2BoundsTest, CoefficientGreaterThanOne) { TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { Model model; auto* sat_solver = model.GetOrCreate(); + auto* lin2_bounds = model.GetOrCreate(); auto* integer_trail = model.GetOrCreate(); + auto* precedences = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); const Literal l(model.Add(NewBooleanVariable()), true); @@ -197,17 +197,16 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { // Note that odd indices are for the negation. IntegerVariable a(0), b(2); - EnforcedLinear2Bounds precedences(&model); - precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 15); - precedences.PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 20); + precedences->PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 15); + precedences->PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 20); // We only keep the best one. EXPECT_EQ( - precedences.UpperBound(LinearExpression2::Difference(a, NegationOf(b))), + lin2_bounds->UpperBound(LinearExpression2::Difference(a, NegationOf(b))), 15); std::vector literal_reason; std::vector integer_reason; - precedences.AddReasonForUpperBoundLowerThan( + precedences->AddReasonForUpperBoundLowerThan( LinearExpression2::Difference(a, NegationOf(b)), 15, &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre(l.Negated())); @@ -215,11 +214,11 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { // Backtrack works. EXPECT_TRUE(sat_solver->ResetToLevelZero()); EXPECT_EQ( - precedences.UpperBound(LinearExpression2::Difference(a, NegationOf(b))), + lin2_bounds->UpperBound(LinearExpression2::Difference(a, NegationOf(b))), 200); literal_reason.clear(); integer_reason.clear(); - precedences.AddReasonForUpperBoundLowerThan( + precedences->AddReasonForUpperBoundLowerThan( LinearExpression2::Difference(a, NegationOf(b)), kMaxIntegerValue, &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, IsEmpty()); @@ -492,20 +491,20 @@ TEST(EnforcedLinear2BoundsTest, CollectPrecedences) { Model model; auto* integer_trail = model.GetOrCreate(); auto* relations = model.GetOrCreate(); - auto* lin2_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); std::vector vars = AddVariables(integer_trail); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[2]), IntegerValue(-1)); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[5]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[0], vars[5]), IntegerValue(-1)); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[1], vars[2]), IntegerValue(-1)); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[2], vars[4]), IntegerValue(-1)); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[3], vars[4]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[3], vars[4]), IntegerValue(-1)); - lin2_bounds->AddUpperBound(LinearExpression2::Difference(vars[4], vars[5]), + root_bounds->AddUpperBound(LinearExpression2::Difference(vars[4], vars[5]), IntegerValue(-1)); std::vector p; @@ -543,7 +542,7 @@ TEST(BinaryRelationRepositoryTest, Build) { root_level_bounds->Add(LinearExpression2(x, y, 3, -1), 5, 15); root_level_bounds->Add(LinearExpression2::Difference(x, z), 0, 10); repository.AddPartialRelation(lit_b, x, z); - repository.Build(root_level_bounds); + repository.Build(); auto get_rel = [&](absl::Span indexes) { std::vector result; @@ -558,12 +557,6 @@ TEST(BinaryRelationRepositoryTest, Build) { get_rel(all), UnorderedElementsAre( Relation{lit_a, LinearExpression2(x, y, -1, 1), 2, 8}, - Relation{Literal(kNoLiteralIndex), LinearExpression2(x, y, 1, -1), 0, - 5}, - Relation{Literal(kNoLiteralIndex), LinearExpression2(x, y, 3, -1), 5, - 15}, - Relation{Literal(kNoLiteralIndex), LinearExpression2(x, z, 1, -1), 0, - 10}, Relation{lit_a, LinearExpression2(x, y, -3, -2), 1, 15}, Relation{lit_b, LinearExpression2(kNoIntegerVariable, x, 0, -3), 3, 5}, @@ -578,41 +571,32 @@ TEST(BinaryRelationRepositoryTest, Build) { Relation{lit_b, LinearExpression2(kNoIntegerVariable, x, 0, -3), 3, 5}, Relation{lit_b, LinearExpression2(x, z, 1, 1), 0, 0})); + EXPECT_THAT(root_level_bounds->GetAllBoundsContainingVariable(x), + UnorderedElementsAre( + FieldsAre(LinearExpression2(x, NegationOf(y), 1, 1), 0, 5), + + FieldsAre(LinearExpression2(x, NegationOf(y), 3, 1), 5, 15), + FieldsAre(LinearExpression2(x, NegationOf(z), 1, 1), 0, 10))); EXPECT_THAT( - get_rel(repository.IndicesOfRelationsContaining(x)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 1, -1), 0, 5}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 3, -1), 5, 15}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, z, 1, -1), 0, 10})); + root_level_bounds->GetAllBoundsContainingVariable(y), + UnorderedElementsAre(FieldsAre(LinearExpression2(y, x, -1, 1), 0, 5), + FieldsAre(LinearExpression2(y, x, -1, 3), 5, 15))); EXPECT_THAT( - get_rel(repository.IndicesOfRelationsContaining(y)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 1, -1), 0, 5}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 3, -1), 5, 15})); + root_level_bounds->GetAllBoundsContainingVariable(z), + UnorderedElementsAre(FieldsAre(LinearExpression2(z, x, -1, 1), 0, 10))); EXPECT_THAT( - get_rel(repository.IndicesOfRelationsContaining(z)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, z, 1, -1), 0, 10})); + root_level_bounds->GetAllBoundsContainingVariables(x, y), + UnorderedElementsAre(FieldsAre(LinearExpression2(x, y, 1, -1), 0, 5), + FieldsAre(LinearExpression2(x, y, 3, -1), 5, 15))); EXPECT_THAT( - get_rel(repository.IndicesOfRelationsBetween(x, y)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 1, -1), 0, 5}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 3, -1), 5, 15})); + root_level_bounds->GetAllBoundsContainingVariables(y, x), + UnorderedElementsAre(FieldsAre(LinearExpression2(y, x, -1, 1), 0, 5), + FieldsAre(LinearExpression2(y, x, -1, 3), 5, 15))); EXPECT_THAT( - get_rel(repository.IndicesOfRelationsBetween(y, x)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 1, -1), 0, 5}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, y, 3, -1), 5, 15})); - EXPECT_THAT( - get_rel(repository.IndicesOfRelationsBetween(x, z)), - UnorderedElementsAre(Relation{Literal(kNoLiteralIndex), - LinearExpression2(x, z, 1, -1), 0, 10})); - EXPECT_THAT(repository.IndicesOfRelationsBetween(z, y), IsEmpty()); + root_level_bounds->GetAllBoundsContainingVariables(x, z), + UnorderedElementsAre(FieldsAre(LinearExpression2(x, z, 1, -1), 0, 10))); + EXPECT_THAT(root_level_bounds->GetAllBoundsContainingVariables(z, y), + IsEmpty()); } std::vector GetRelations(Model& model) { @@ -683,20 +667,16 @@ TEST(BinaryRelationRepositoryTest, LoadCpModelAddUnaryAndBinaryRelations) { LoadCpModel(model_proto, &model); const CpModelMapping& mapping = *model.GetOrCreate(); - EXPECT_THAT(GetRelations(model), - UnorderedElementsAre( - Relation{mapping.Literal(0), - LinearExpression2::Difference(mapping.Integer(2), - mapping.Integer(3)), - 0, 10}, - Relation{mapping.Literal(1), - LinearExpression2(kNoIntegerVariable, - mapping.Integer(2), 0, 1), - 5, 10}, - Relation{Literal(kNoLiteralIndex), - LinearExpression2(mapping.Integer(2), - mapping.Integer(3), 3, -2), - -10, 10})); + EXPECT_THAT( + GetRelations(model), + UnorderedElementsAre(Relation{mapping.Literal(0), + LinearExpression2::Difference( + mapping.Integer(2), mapping.Integer(3)), + 0, 10}, + Relation{mapping.Literal(1), + LinearExpression2(kNoIntegerVariable, + mapping.Integer(2), 0, 1), + 5, 10})); } TEST(BinaryRelationRepositoryTest, @@ -861,13 +841,13 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { model.GetOrCreate(); repository.Add(lit_a, LinearExpression2::Difference(y, x), 2, 10); // lit_a => y => x + 2 - repository.Build(root_level_bounds); + repository.Build(); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; - const bool result = - repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + const bool result = repository.PropagateLocalBounds( + *integer_trail, *root_level_bounds, lit_a, input, &output); EXPECT_TRUE(result); EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), @@ -886,13 +866,13 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_UnenforcedRelation) { 10); // lit_a => y => x - 5 root_level_bounds->Add(LinearExpression2(x, y, -1, 1), 2, 10); // y => x + 2 - repository.Build(root_level_bounds); + repository.Build(); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; - const bool result = - repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + const bool result = repository.PropagateLocalBounds( + *integer_trail, *root_level_bounds, lit_a, input, &output); EXPECT_TRUE(result); EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -98), @@ -913,13 +893,13 @@ TEST(BinaryRelationRepositoryTest, 10); // lit_a => y => x - 5 repository.Add(lit_b, LinearExpression2::Difference(y, x), 2, 10); // lit_b => y => x + 2 - repository.Build(root_level_bounds); + repository.Build(); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; - const bool result = - repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + const bool result = repository.PropagateLocalBounds( + *integer_trail, *root_level_bounds, lit_a, input, &output); EXPECT_TRUE(result); EXPECT_THAT(output, IsEmpty()); @@ -936,13 +916,13 @@ TEST(BinaryRelationRepositoryTest, model.GetOrCreate(); repository.Add(lit_a, LinearExpression2::Difference(y, x), 2, 10); // lit_a => y => x + 2 - repository.Build(root_level_bounds); + repository.Build(); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output = {{y, 8}}; - const bool result = - repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + const bool result = repository.PropagateLocalBounds( + *integer_trail, *root_level_bounds, lit_a, input, &output); EXPECT_TRUE(result); EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), @@ -959,13 +939,13 @@ TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_Infeasible) { model.GetOrCreate(); repository.Add(lit_a, LinearExpression2::Difference(y, x), 8, 10); // lit_a => y => x + 8 - repository.Build(root_level_bounds); + repository.Build(); IntegerTrail* integer_trail = model.GetOrCreate(); absl::flat_hash_map input = {{x, 3}}; absl::flat_hash_map output; - const bool result = - repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + const bool result = repository.PropagateLocalBounds( + *integer_trail, *root_level_bounds, lit_a, input, &output); EXPECT_FALSE(result); EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -2), @@ -984,15 +964,13 @@ TEST(GreaterThanAtLeastOneOfDetectorTest, AddGreaterThanAtLeastOneOf) { model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); auto* repository = model.GetOrCreate(); - RootLevelLinear2Bounds* root_level_bounds = - model.GetOrCreate(); repository->Add(lit_a, LinearExpression2::Difference(d, a), 2, 1000); // d >= a + 2 repository->Add(lit_b, LinearExpression2::Difference(d, b), -1, 1000); // d >= b -1 repository->Add(lit_c, LinearExpression2::Difference(d, c), 0, 1000); // d >= c - repository->Build(root_level_bounds); + repository->Build(); auto* detector = model.GetOrCreate(); auto* solver = model.GetOrCreate(); @@ -1017,14 +995,12 @@ TEST(GreaterThanAtLeastOneOfDetectorTest, model.Add(ClauseConstraint({lit_a, lit_b, lit_c})); auto* repository = model.GetOrCreate(); - RootLevelLinear2Bounds* root_level_bounds = - model.GetOrCreate(); repository->Add(lit_a, LinearExpression2(a, d, -1, 1), 2, 1000); // d >= a + 2 repository->Add(lit_b, LinearExpression2(b, d, -1, 1), -1, 1000); // d >= b -1 repository->Add(lit_c, LinearExpression2(c, d, -1, 1), 0, 1000); // d >= c - repository->Build(root_level_bounds); + repository->Build(); auto* detector = model.GetOrCreate(); auto* solver = model.GetOrCreate(); diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index cdf96d1edb..1da0ac49e1 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -124,6 +124,7 @@ MinOutgoingFlowHelper::MinOutgoingFlowHelper( trail_(*model->GetOrCreate()), integer_trail_(*model->GetOrCreate()), integer_encoder_(*model->GetOrCreate()), + root_level_bounds_(*model->GetOrCreate()), shared_stats_(model->GetOrCreate()), in_subset_(num_nodes, false), index_in_subset_(num_nodes, -1), @@ -629,7 +630,8 @@ int MinOutgoingFlowHelper::ComputeMinOutgoingFlow( // If this arc cannot be taken skip. tmp_lbs.clear(); if (!binary_relation_repository_.PropagateLocalBounds( - integer_trail_, lit, node_var_lower_bounds_[tail], &tmp_lbs)) { + integer_trail_, root_level_bounds_, lit, + node_var_lower_bounds_[tail], &tmp_lbs)) { continue; } @@ -755,8 +757,8 @@ int MinOutgoingFlowHelper::ComputeTightMinOutgoingFlow( // If this arc cannot be taken skip. tmp_lbs.clear(); if (!binary_relation_repository_.PropagateLocalBounds( - integer_trail_, literals_[outgoing_arc_index], path_bounds, - &tmp_lbs)) { + integer_trail_, root_level_bounds_, + literals_[outgoing_arc_index], path_bounds, &tmp_lbs)) { continue; } @@ -916,7 +918,7 @@ bool MinOutgoingFlowHelper::SubsetMightBeServedWithKRoutes( absl::flat_hash_map copy = state.lbs; return binary_relation_repository_.PropagateLocalBounds( - integer_trail_, unique_lit, copy, &state.lbs); + integer_trail_, root_level_bounds_, unique_lit, copy, &state.lbs); }; // We always start with the first node in this case. @@ -1011,7 +1013,8 @@ bool MinOutgoingFlowHelper::SubsetMightBeServedWithKRoutes( } } else { if (!binary_relation_repository_.PropagateLocalBounds( - integer_trail_, literal, from_state.lbs, &to_state.lbs)) { + integer_trail_, root_level_bounds_, literal, from_state.lbs, + &to_state.lbs)) { continue; } } @@ -1409,6 +1412,8 @@ class RouteRelationsBuilder { const auto& integer_encoder = *model->GetOrCreate(); const auto& trail = *model->GetOrCreate(); const auto& integer_trail = *model->GetOrCreate(); + const auto& root_level_bounds = + *model->GetOrCreate(); DCHECK_EQ(trail.CurrentDecisionLevel(), 0); flat_arc_dim_relations_ = std::vector( @@ -1532,13 +1537,12 @@ class RouteRelationsBuilder { // Check if we can use non-enforced relations to improve the relations. if (!tail_expr.IsEmpty() && !head_expr.IsEmpty()) { - for (const int relation_index : - binary_relation_repository_.IndicesOfRelationsBetween( + for (const auto& [expr, lb, ub] : + root_level_bounds.GetAllBoundsContainingVariables( tail_expr.var, head_expr.var)) { - ComputeArcRelation( - i, dimension, tail_expr, head_expr, - binary_relation_repository_.relation(relation_index), - integer_trail); + ComputeArcRelation(i, dimension, tail_expr, head_expr, + Relation{Literal(kNoLiteralIndex), expr, lb, ub}, + integer_trail); } } @@ -1858,7 +1862,7 @@ BinaryRelationRepository ComputePartialBinaryRelationRepository( ToPositiveIntegerVariable(vars[1])); } Model empty_model; - repository.Build(empty_model.GetOrCreate()); + repository.Build(); return repository; } @@ -1945,6 +1949,7 @@ class RoutingCutHelper { *model->GetOrCreate()), random_(model->GetOrCreate()), encoder_(model->GetOrCreate()), + root_level_bounds_(*model->GetOrCreate()), in_subset_(num_nodes, false), self_arc_literal_(num_nodes_), self_arc_lp_value_(num_nodes_), @@ -2078,6 +2083,7 @@ class RoutingCutHelper { const BinaryRelationRepository& binary_relation_repository_; ModelRandomGenerator* random_; IntegerEncoder* encoder_; + const RootLevelLinear2Bounds& root_level_bounds_; std::vector in_subset_; @@ -2783,7 +2789,8 @@ void RoutingCutHelper::GenerateCutsForInfeasiblePaths( const Literal next_literal = literals_[arc_index]; next_state.bounds = state.bounds; if (binary_relation_repository_.PropagateLocalBounds( - integer_trail_, next_literal, state.bounds, &next_state.bounds)) { + integer_trail_, root_level_bounds_, next_literal, state.bounds, + &next_state.bounds)) { // Do not explore "long" paths to keep the search time bounded. if (path_length < max_path_length) { path_nodes[next_state.last_node] = true; diff --git a/ortools/sat/routing_cuts.h b/ortools/sat/routing_cuts.h index 584e0cca19..0713f01bc4 100644 --- a/ortools/sat/routing_cuts.h +++ b/ortools/sat/routing_cuts.h @@ -545,6 +545,7 @@ class MinOutgoingFlowHelper { const Trail& trail_; const IntegerTrail& integer_trail_; const IntegerEncoder& integer_encoder_; + const RootLevelLinear2Bounds& root_level_bounds_; SharedStatistics* shared_stats_; // Temporary data used by ComputeMinOutgoingFlow(). Always contain default diff --git a/ortools/sat/routing_cuts_test.cc b/ortools/sat/routing_cuts_test.cc index 707b9f8b37..39bb0469ee 100644 --- a/ortools/sat/routing_cuts_test.cc +++ b/ortools/sat/routing_cuts_test.cc @@ -164,7 +164,7 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) { repository->Add(literal, LinearExpression2(loads[head], loads[tail], 1, -1), head_load, 1000); } - repository->Build(model.GetOrCreate()); + repository->Build(); // Subject under test. MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); @@ -239,7 +239,7 @@ TEST_P(DimensionBasedMinOutgoingFlowHelperTest, BasicCapacities) { demands[use_outgoing_load ? head : tail], 1000); } } - repository->Build(model.GetOrCreate()); + repository->Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -312,7 +312,7 @@ TEST_P(DimensionBasedMinOutgoingFlowHelperTest, demands[use_outgoing_load ? head : tail], 1000); } } - repository->Build(model.GetOrCreate()); + repository->Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -362,7 +362,7 @@ TEST(MinOutgoingFlowHelperTest, NodeExpressionWithConstant) { // Capacity constraint: (offset_load2 + offset) - load1 >= demand1 repository->Add(literals[0], LinearExpression2(offset_load2, load1, 1, -1), demand1 - offset, 1000); - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -404,7 +404,7 @@ TEST(MinOutgoingFlowHelperTest, ConstantNodeExpression) { repository->Add(literals[0], LinearExpression2(kNoIntegerVariable, load1, 0, -1), demand1 - load2, 1000); - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -461,7 +461,7 @@ TEST(MinOutgoingFlowHelperTest, NodeExpressionUsingArcLiteralAsVariable) { // Capacity constraint: load3 - load2 >= demand2. This expands to // (capacity - demand3) - (capacity - demand2 - demand3 * l) >= demand2 which, // when l is 1, simplifies to 0 >= 0. Hence this constraint is ignored. - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -520,7 +520,7 @@ TEST(MinOutgoingFlowHelperTest, // (capacity - demand3) - (capacity - demand2 - demand3 + demand3 * l) >= // demand2 which, when l is 0, simplifies to 0 >= 0. Hence this constraint is // ignored. - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -577,7 +577,7 @@ TEST(MinOutgoingFlowHelperTest, ArcNodeExpressionsWithSharedVariable) { // Capacity constraint: load3 - load2 >= demand2. This expands to // (capacity - demand3) - (capacity - demand2 - demand3) >= demand2, which // simplifies to 0 >= 0. Hence this constraint is ignored. - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create( num_nodes, tails, heads, literals, @@ -643,7 +643,7 @@ TEST(MinOutgoingFlowHelperTest, UnaryRelationForTwoNodeExpressions) { // demand1 * x >= capacity repository->Add(literals[1], LinearExpression2(load3, x, 1, demand1), capacity, 1000); - repository->Build(model.GetOrCreate()); + repository->Build(); std::unique_ptr route_relations_helper = RouteRelationsHelper::Create(num_nodes, tails, heads, literals, {AffineExpression(), AffineExpression(load1), @@ -700,7 +700,7 @@ TEST(MinOutgoingFlowHelperTest, NodeMustBeInnerNode) { LinearExpression2(loads[heads[i]], loads[tails[i]], 1, -1), demands[i], 1000); } - repository->Build(model.GetOrCreate()); + repository->Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); @@ -760,7 +760,7 @@ TEST(MinOutgoingFlowHelperTest, BetterUseOfUpperBound) { LinearExpression2::Difference(loads[heads[i]], loads[tails[i]]), demands[i], 1000); } - repository->Build(model.GetOrCreate()); + repository->Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( loads.size(), tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -799,7 +799,7 @@ TEST(MinOutgoingFlowHelperTest, DimensionBasedMinOutgoingFlow_IsolatedNodes) { LinearExpression2(variables[head], variables[0], 1, -1), 1, 100); } - repository->Build(model.GetOrCreate()); + repository->Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, *repository); std::unique_ptr route_relations_helper = @@ -850,7 +850,7 @@ TEST(MinOutgoingFlowHelperTest, TimeWindows) { repository->Add(literal, LinearExpression2(times[head], times[tail], 1, -1), travel_time, 1000); } - repository->Build(model.GetOrCreate()); + repository->Build(); // Subject under test. MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); @@ -985,7 +985,7 @@ TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutes) { LinearExpression2(cumul_vars_2[head], cumul_vars_2[tail], 1, -1), load2[head], 10000); } - repository->Build(model.GetOrCreate()); + repository->Build(); const int optimal = SolveTwoDimensionBinPacking(capacity, load1, load2); EXPECT_EQ(optimal, 2); @@ -1057,7 +1057,7 @@ TEST(MinOutgoingFlowHelperTest, SubsetMightBeServedWithKRoutesRandom) { LinearExpression2::Difference(cumul_vars_2[head], cumul_vars_2[tail]), load2[head], 10000); } - repository->Build(model.GetOrCreate()); + repository->Build(); // To check our indices mapping, lets remove a random nodes from the subset std::vector subset; @@ -1186,7 +1186,7 @@ TEST(MinOutgoingFlowHelperTest, LinearExpression2::Difference(cumul_vars[head], cumul_vars[tail]), travel_times[arc], 10000); } - repository->Build(model.GetOrCreate()); + repository->Build(); // Serve everyone but the depot. std::vector subset; @@ -1420,7 +1420,7 @@ TEST(RouteRelationsHelperTest, Basic) { repository.Add(literals[2], LinearExpression2(w, v, -1, 1), -100, -3); repository.Add(literals[3], LinearExpression2::Difference(x, w), 5, 100); repository.Add(literals[4], LinearExpression2::Difference(z, y), 7, 100); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1515,7 +1515,7 @@ TEST(RouteRelationsHelperTest, UnenforcedRelations) { bounds->Add(LinearExpression2(c, a, 3, -2), 1, 9); bounds->Add(LinearExpression2(c, a, 1, -1), 5, 5); bounds->Add(LinearExpression2(c, a, 2, -3), 3, 8); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1562,7 +1562,7 @@ TEST(RouteRelationsHelperTest, SeveralVariablesPerNode) { // Weird relation linking time and load variables, causing all the variables // to be in a single "dimension". repository.Add(literals[0], LinearExpression2::Difference(x, a), 0, 100); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1588,7 +1588,7 @@ TEST(RouteRelationsHelperTest, ComplexVariableRelations) { BinaryRelationRepository repository; // "complex" relation with non +1/-1 coefficients. repository.Add(literals[0], LinearExpression2(b, a, 10, 1), 0, 150); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = { .num_dimensions = 0, @@ -1621,7 +1621,7 @@ TEST(RouteRelationsHelperTest, TwoUnaryRelationsPerArc) { encoder.AssociateToIntegerEqualValue(literals[0], a, 20); encoder.AssociateToIntegerLiteral(literals[0], {b, 50}); BinaryRelationRepository repository; - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = { .num_dimensions = 0, @@ -1655,7 +1655,7 @@ TEST(RouteRelationsHelperTest, SeveralRelationsPerArc) { repository.Add(literals[1], LinearExpression2::Difference(c, b), 70, 1000); // Add a second relation for some arc. repository.Add(literals[1], LinearExpression2(c, b, 2, -3), 100, 200); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1689,7 +1689,7 @@ TEST(RouteRelationsHelperTest, SeveralArcsPerLiteral) { BinaryRelationRepository repository; repository.Add(literals[0], LinearExpression2::Difference(b, a), 50, 1000); repository.Add(literals[0], LinearExpression2::Difference(c, b), 40, 1000); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1736,7 +1736,7 @@ TEST(RouteRelationsHelperTest, InconsistentRelationIsSkipped) { repository.Add(literals[4], LinearExpression2::Difference(f, b), 4, 4); // Inconsistent relation for arc 5->3 (should be between f and d). repository.Add(literals[5], LinearExpression2(f, b, 2, -1), 5, 5); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -1799,7 +1799,7 @@ TEST(RouteRelationsHelperTest, InconsistentRelationWithMultipleArcsPerLiteral) { // be true at the same time, hence the crossed bounds below. repository.Add(literals[4], LinearExpression2::Difference(e, d), 4, 4); repository.Add(literals[5], LinearExpression2::Difference(e, d), 5, 5); - repository.Build(model.GetOrCreate()); + repository.Build(); const RoutingCumulExpressions cumuls = DetectDimensionsAndCumulExpressions( num_nodes, tails, heads, literals, repository); @@ -2436,7 +2436,7 @@ TEST(CreateCVRPCutGeneratorTest, InfeasiblePathCuts) { LinearExpression2(loads[head], loads[tail], 1, -1), demands[tail], 10000); } - repository->Build(model.GetOrCreate()); + repository->Build(); // Enable the cut generator. model.GetOrCreate() ->set_routing_cut_max_infeasible_path_length(10); diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index fa6fe706a5..6dd090fae4 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -371,23 +371,8 @@ bool SchedulingConstraintHelper::NotifyLevelZeroPrecedence(int a, int b) { CHECK(IsPresent(b)); CHECK_EQ(sat_solver_->CurrentDecisionLevel(), 0); - // Convert before <= after to linear2 <= rhs. - LinearExpression2 expr; - IntegerValue rhs; - { - const AffineExpression before = ends_[a]; - const AffineExpression after = starts_[b]; - expr.vars[0] = before.var; - expr.coeffs[0] = before.coeff; - expr.vars[1] = after.var; - expr.coeffs[1] = -after.coeff; - rhs = after.constant - before.constant; - } - - // Canonicalization. - expr.SimpleCanonicalization(); - const IntegerValue gcd = expr.DivideByGcd(); - rhs = FloorRatio(rhs, gcd); + // Convert ends_[a] <= starts[b] to linear2 <= rhs and canonicalize. + const auto [expr, rhs] = EncodeDifferenceLowerThan(ends_[a], starts_[b], 0); // Trivial case. if (expr.coeffs[0] == 0 && expr.coeffs[1] == 0) { From 627085032647df42ad71fa7aeb9a8cba868096fe Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:11:48 +0200 Subject: [PATCH 080/509] bump minizinc challenge tag --- ortools/flatzinc/challenge/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/flatzinc/challenge/Makefile b/ortools/flatzinc/challenge/Makefile index de38f0d3e6..68a33b5fd2 100644 --- a/ortools/flatzinc/challenge/Makefile +++ b/ortools/flatzinc/challenge/Makefile @@ -18,7 +18,7 @@ DOCKER_BUILD_CMD := docker build endif DOCKER_RUN_CMD := docker run --rm --init -MZN_SUFFIX=2025v1 +MZN_SUFFIX=2025v2 DOCKER_NAME=cp-sat-minizinc-challenge MZN_TAG=${DOCKER_NAME}:${MZN_SUFFIX} MZN_LS_TAG=${DOCKER_NAME}-ls:${MZN_SUFFIX} From 43c874a733965e7ce09bf48c80caac498980e3c1 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:18:26 +0200 Subject: [PATCH 081/509] fix --- ortools/sat/python/cp_model_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 2add07de02..47f32d28dd 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), "(-(2 * z) + (x - y))") + self.assertEqual(str(expr), '(x + (-y) + (-(2 * z)))') def test_equality_overload(self) -> None: model = cp_model.CpModel() From 7d58c118f6a53158970e7f074d050ada3dbabc46 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:52:00 +0200 Subject: [PATCH 082/509] improve python exprs --- .../python/model_builder_helper.cc | 39 +++++++++++++++++++ .../python/model_builder_test.py | 14 +++++++ .../wrappers/model_builder_helper.cc | 5 +-- ortools/sat/python/cp_model_helper_test.py | 6 +-- ortools/sat/python/cp_model_test.py | 2 +- 5 files changed, 59 insertions(+), 7 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index f085b0fe6a..48f9df1dd0 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -460,6 +460,26 @@ PYBIND11_MODULE(model_builder_helper, m) { return expr->AddFloat(cst); }, py::arg("cst"), "Returns `self` + `cst`.") + .def( + "__iadd__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other); + return expr; + }, + py::arg("other").none(false), + "Returns the sum of `self` and `other`.") + .def( + "__iadd__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(cst); + return expr; + }, + py::arg("cst"), "Returns `self` + `cst`.") .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), "Returns `self` + `other`.") .def( @@ -502,6 +522,25 @@ PYBIND11_MODULE(model_builder_helper, m) { return expr->SubFloat(cst); }, py::arg("cst"), "Returns `self` - `cst`.") + .def( + "__isub__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other->Neg()); + return expr; + }, + py::arg("other").none(false), "Returns `self` - `other`.") + .def( + "__isub__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(-cst); + return expr; + }, + py::arg("cst"), "Returns `self` - `cst`.") .def_property_readonly( "num_exprs", &SumArray::num_exprs, "Returns the number of linear expressions in the sum.") diff --git a/ortools/linear_solver/python/model_builder_test.py b/ortools/linear_solver/python/model_builder_test.py index 475289c4a2..78a3406755 100644 --- a/ortools/linear_solver/python/model_builder_test.py +++ b/ortools/linear_solver/python/model_builder_test.py @@ -364,6 +364,20 @@ ENDATA c5 = x - y == 3 self.assertEqual(str(c5), "(x - y) == 3") + def test_large_iadd(self): + model = mb.Model() + s = 0 + for _ in range(300000): + s += model.new_bool_var("") + model.add(s == 10) + + def test_large_isub(self): + model = mb.Model() + s = 0 + for _ in range(300000): + s -= model.new_bool_var("") + model.add(s == 10) + def test_variables(self): model = mb.Model() x = model.new_int_var(0.0, 4.0, "x") diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index def8d92952..611b07058c 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -816,9 +816,8 @@ std::shared_ptr LinearExpr::AddFloat(double cst) { std::shared_ptr LinearExpr::Sub(std::shared_ptr expr) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(expr); - std::vector coeffs = {1.0, -1.0}; - return std::make_shared(exprs, coeffs, 0.0); + exprs.push_back(expr->MulFloat(-1.0)); + return std::make_shared(exprs, 0.0); } std::shared_ptr LinearExpr::SubFloat(double cst) { diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index 46cd288225..b0dd988597 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -303,7 +303,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e5), "(x - 1)") e6 = x - 2 * y self.assertTrue(e6.is_integer()) - self.assertEqual(str(e6), "(x - (2 * y))") + self.assertEqual(str(e6), "(x + (-(2 * y)))") z = TestIntVar(2, "z", True) e7 = -z self.assertTrue(e7.is_integer()) @@ -323,7 +323,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") e12 = x - y - 2 * z - self.assertEqual(str(e12), "(-(2 * z) + (x - y))") + self.assertEqual(str(e12), "(x + (-y) + (-(2 * z)))") def test_float_lin_expr(self): x = TestIntVar(0, "x") @@ -351,7 +351,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e6), "(x + (2.4 * y))") e7 = x - 2.4 * y self.assertFalse(e7.is_integer()) - self.assertEqual(str(e7), "(x - (2.4 * y))") + self.assertEqual(str(e7), "(x + (-(2.4 * y)))") z = TestIntVar(2, "z") e8 = cmh.LinearExpr.sum([x, y, z, -2]) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 47f32d28dd..ce36281958 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), '(x + (-y) + (-(2 * z)))') + self.assertEqual(str(expr), "(x + (-y) + (-(2 * z)))") def test_equality_overload(self) -> None: model = cp_model.CpModel() From b1d5100c3bee0f664754ef257f3941a79e2fd6a9 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 14:04:51 +0200 Subject: [PATCH 083/509] improve python exprs --- .../python/model_builder_helper.cc | 19 ----------- .../wrappers/model_builder_helper.h | 20 +++++------ ortools/sat/python/cp_model_helper.cc | 34 ------------------- ortools/sat/python/cp_model_helper_test.py | 4 +-- ortools/sat/python/cp_model_test.py | 2 +- ortools/sat/python/linear_expr.h | 20 +++++------ 6 files changed, 23 insertions(+), 76 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index 48f9df1dd0..9a84fbc736 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -550,25 +550,6 @@ PYBIND11_MODULE(model_builder_helper, m) { py::class_, LinearExpr>(m, "AffineExpr") .def(py::init, double, double>()) - .def("__add__", &AffineExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") - .def("__add__", &AffineExpr::AddFloat, py::arg("cst"), - "Returns `self` + `cst`.") - .def("__radd__", &AffineExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") - .def("__radd__", &AffineExpr::AddFloat, py::arg("cst"), - "Returns `self` + `cst`.") - .def("__sub__", &AffineExpr::Sub, py::arg("other").none(false), - "Returns `self` - `other`.") - .def("__sub__", &AffineExpr::SubFloat, py::arg("cst"), - "Returns `self` - `cst`.") - .def("__rsub__", &AffineExpr::RSubFloat, py::arg("cst"), - "Returns `cst` - `self`.") - .def("__mul__", &AffineExpr::MulFloat, py::arg("cst"), - "Returns `self` * `cst`.") - .def("__rmul__", &AffineExpr::MulFloat, py::arg("cst"), - "Returns `self` * `cst`.") - .def("__neg__", &AffineExpr::Neg, "Returns -`self`.") .def_property_readonly("expression", &AffineExpr ::expression) .def_property_readonly("coefficient", &AffineExpr::coefficient) .def_property_readonly("offset", &AffineExpr::offset); diff --git a/ortools/linear_solver/wrappers/model_builder_helper.h b/ortools/linear_solver/wrappers/model_builder_helper.h index 7c6e4f026f..cfd3de5e0e 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.h +++ b/ortools/linear_solver/wrappers/model_builder_helper.h @@ -63,12 +63,12 @@ class LinearExpr : public std::enable_shared_from_this { static std::shared_ptr Constant(double value); std::shared_ptr Add(std::shared_ptr expr); - std::shared_ptr AddFloat(double cst); + virtual std::shared_ptr AddFloat(double cst); std::shared_ptr Sub(std::shared_ptr expr); - std::shared_ptr SubFloat(double cst); - std::shared_ptr RSubFloat(double cst); - std::shared_ptr MulFloat(double cst); - std::shared_ptr Neg(); + virtual std::shared_ptr SubFloat(double cst); + virtual std::shared_ptr RSubFloat(double cst); + virtual std::shared_ptr MulFloat(double cst); + virtual std::shared_ptr Neg(); std::shared_ptr Eq(std::shared_ptr rhs); std::shared_ptr EqCst(double rhs); @@ -243,11 +243,11 @@ class AffineExpr : public LinearExpr { double coefficient() const { return coeff_; } double offset() const { return offset_; } - std::shared_ptr AddFloat(double cst); - std::shared_ptr SubFloat(double cst); - std::shared_ptr RSubFloat(double cst); - std::shared_ptr MulFloat(double cst); - std::shared_ptr Neg(); + std::shared_ptr AddFloat(double cst) override; + std::shared_ptr SubFloat(double cst) override; + std::shared_ptr RSubFloat(double cst) override; + std::shared_ptr MulFloat(double cst) override; + std::shared_ptr Neg() override; private: std::shared_ptr expr_; diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 371a87b7a2..87ef120b8f 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -1079,40 +1079,6 @@ PYBIND11_MODULE(cp_model_helper, m) { py::class_, LinearExpr>( m, "IntAffine", DOC(operations_research, sat, python, IntAffine)) .def(py::init, int64_t, int64_t>()) - .def("__add__", &LinearExpr::Add, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Add)) - .def("__add__", &IntAffine::AddInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddInt)) - .def("__add__", &LinearExpr::AddFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddFloat)) - .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Add)) - .def("__radd__", &IntAffine::AddInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddInt)) - .def("__radd__", &LinearExpr::AddFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddFloat)) - .def("__sub__", &LinearExpr::Sub, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Sub)) - .def("__sub__", &IntAffine::SubInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, SubInt)) - .def("__sub__", &LinearExpr::SubFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, SubFloat)) - .def("__rsub__", &LinearExpr::RSub, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, RSub)) - .def("__rsub__", &IntAffine::RSubInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, RSubInt)) - .def("__rsub__", &LinearExpr::SubFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, RSubFloat)) - .def("__mul__", &IntAffine::MulInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulInt)) - .def("__mul__", &LinearExpr::MulFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulFloat)) - .def("__rmul__", &IntAffine::MulInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulInt)) - .def("__rmul__", &LinearExpr::MulFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulFloat)) - .def("__neg__", &IntAffine::Neg, - DOC(operations_research, sat, python, LinearExpr, Neg)) .def_property_readonly("expression", &IntAffine::expression, "Returns the linear expression.") .def_property_readonly("coefficient", &IntAffine::coefficient, diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index b0dd988597..d5901787a7 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -303,7 +303,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e5), "(x - 1)") e6 = x - 2 * y self.assertTrue(e6.is_integer()) - self.assertEqual(str(e6), "(x + (-(2 * y)))") + self.assertEqual(str(e6), "(x + (-2 * y))") z = TestIntVar(2, "z", True) e7 = -z self.assertTrue(e7.is_integer()) @@ -323,7 +323,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") e12 = x - y - 2 * z - self.assertEqual(str(e12), "(x + (-y) + (-(2 * z)))") + self.assertEqual(str(e12), "(x + (-y) + (-2 * z))") def test_float_lin_expr(self): x = TestIntVar(0, "x") diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index ce36281958..aa06c59b2e 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), "(x + (-y) + (-(2 * z)))") + self.assertEqual(str(expr), "(x + (-y) + (-2 * z))") def test_equality_overload(self) -> None: model = cp_model.CpModel() diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index 631f17f05f..ae92d1c676 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -103,27 +103,27 @@ class LinearExpr : public std::enable_shared_from_this { /// Returns (this) + (expr). std::shared_ptr Add(std::shared_ptr other); /// Returns (this) + (cst). - std::shared_ptr AddInt(int64_t cst); + virtual std::shared_ptr AddInt(int64_t cst); /// Returns (this) + (cst). std::shared_ptr AddFloat(double cst); /// Returns (this) - (expr). std::shared_ptr Sub(std::shared_ptr other); /// Returns (this) - (cst). - std::shared_ptr SubInt(int64_t cst); + virtual std::shared_ptr SubInt(int64_t cst); /// Returns (this) - (cst). std::shared_ptr SubFloat(double cst); /// Returns (expr) - (this). std::shared_ptr RSub(std::shared_ptr other); /// Returns (cst) - (this). - std::shared_ptr RSubInt(int64_t cst); + virtual std::shared_ptr RSubInt(int64_t cst); /// Returns (cst) - (this). std::shared_ptr RSubFloat(double cst); /// Returns (this) * (cst). - std::shared_ptr MulInt(int64_t cst); + virtual std::shared_ptr MulInt(int64_t cst); /// Returns (this) * (cst). std::shared_ptr MulFloat(double cst); /// Returns -(this). - std::shared_ptr Neg(); + virtual std::shared_ptr Neg(); /// Returns (this) == (rhs). std::shared_ptr Eq(std::shared_ptr rhs); @@ -381,11 +381,11 @@ class IntAffine : public LinearExpr { /// Returns the offset. int64_t offset() const { return offset_; } - std::shared_ptr AddInt(int64_t cst); - std::shared_ptr SubInt(int64_t cst); - std::shared_ptr RSubInt(int64_t cst); - std::shared_ptr MulInt(int64_t cst); - std::shared_ptr Neg(); + std::shared_ptr AddInt(int64_t cst) override; + std::shared_ptr SubInt(int64_t cst) override; + std::shared_ptr RSubInt(int64_t cst) override; + std::shared_ptr MulInt(int64_t cst) override; + std::shared_ptr Neg() override; private: std::shared_ptr expr_; From 4953c79d18004250ccad6b0a0a857ec130e5d1d9 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 14:05:23 +0200 Subject: [PATCH 084/509] [CP-SAT] more work on precedences --- ortools/sat/disjunctive.cc | 47 +++++++++++++++++------------ ortools/sat/integer_search.cc | 17 +++++------ ortools/sat/no_overlap_2d_helper.cc | 8 ++--- ortools/sat/scheduling_helpers.cc | 30 +++++++++++------- ortools/sat/scheduling_helpers.h | 29 +++++++++++++----- 5 files changed, 81 insertions(+), 50 deletions(-) diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index fd4e6b4e58..f5bfad4950 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -276,8 +276,8 @@ bool DisjunctiveWithTwoItems::Propagate() { helper_->ClearReason(); helper_->AddPresenceReason(task_before); helper_->AddPresenceReason(task_after); - helper_->AddReasonForBeingBefore(task_before, task_after); - helper_->AddReasonForBeingBefore(task_after, task_before); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_before, task_after); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_after, task_before); return helper_->ReportConflict(); } @@ -295,7 +295,8 @@ bool DisjunctiveWithTwoItems::Propagate() { if (helper_->StartMin(task_after) < end_min_before) { // Reason for precedences if both present. helper_->ClearReason(); - helper_->AddReasonForBeingBefore(task_before, task_after); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_before, + task_after); // Reason for the bound push. helper_->AddPresenceReason(task_before); @@ -311,7 +312,8 @@ bool DisjunctiveWithTwoItems::Propagate() { if (helper_->EndMax(task_before) > start_max_after) { // Reason for precedences if both present. helper_->ClearReason(); - helper_->AddReasonForBeingBefore(task_before, task_after); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task_before, + task_after); // Reason for the bound push. helper_->AddPresenceReason(task_after); @@ -527,7 +529,7 @@ bool DisjunctiveOverloadChecker::Propagate() { const int to_push = task_with_max_end_min.task_index; helper_->ClearReason(); helper_->AddPresenceReason(task); - helper_->AddReasonForBeingBefore(task, to_push); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task, to_push); helper_->AddEndMinReason(task, end_min); if (!helper_->IncreaseStartMin(to_push, end_min)) { @@ -754,7 +756,7 @@ bool DisjunctiveSimplePrecedences::Push(TaskTime before, int t) { DCHECK_NE(t_before, t); helper_->ClearReason(); helper_->AddPresenceReason(t_before); - helper_->AddReasonForBeingBefore(t_before, t); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(t_before, t); helper_->AddEndMinReason(t_before, before.time); if (!helper_->IncreaseStartMin(t, before.time)) { return false; @@ -823,8 +825,8 @@ bool DisjunctiveSimplePrecedences::PropagateOneDirection() { helper_->ClearReason(); helper_->AddPresenceReason(blocking_task); helper_->AddPresenceReason(t); - helper_->AddReasonForBeingBefore(blocking_task, t); - helper_->AddReasonForBeingBefore(t, blocking_task); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(blocking_task, t); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(t, blocking_task); return helper_->ReportConflict(); } else if (end_min > best_task_before.time) { best_task_before = {t, end_min}; @@ -932,9 +934,13 @@ bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min, // Heuristic, if some tasks are known to be after the first one, // we just add the min-size as a reason. + // + // TODO(user): ideally we don't want to do that if we don't have a level + // zero precedence... if (i > critical_index && helper_->GetCurrentMinDistanceBetweenTasks( - sorted_tasks[critical_index].task, ct, - /*add_reason_if_after=*/true) >= 0) { + sorted_tasks[critical_index].task, ct) >= 0) { + helper_->AddReasonForBeingBeforeAssumingNoOverlap( + sorted_tasks[critical_index].task, ct); helper_->AddSizeMinReason(ct); } else { helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, window_start); @@ -942,9 +948,9 @@ bool DisjunctiveDetectablePrecedences::Push(IntegerValue task_set_end_min, // We only need the reason for being before if we don't already have // a static precedence between the tasks. - const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks( - ct, t, /*add_reason_if_after=*/true); + const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks(ct, t); if (dist >= 0) { + helper_->AddReasonForBeingBeforeAssumingNoOverlap(ct, t); energy_of_task_before += sorted_tasks[i].size_min; min_slack = std::min(min_slack, dist); } else { @@ -1052,8 +1058,8 @@ bool DisjunctiveDetectablePrecedences::PropagateWithRanks() { helper_->ClearReason(); helper_->AddPresenceReason(blocking_task); helper_->AddPresenceReason(t); - helper_->AddReasonForBeingBefore(blocking_task, t); - helper_->AddReasonForBeingBefore(t, blocking_task); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(blocking_task, t); + helper_->AddReasonForBeingBeforeAssumingNoOverlap(t, blocking_task); return helper_->ReportConflict(); } else { if (!some_propagation && rank > highest_rank) { @@ -1523,8 +1529,9 @@ bool DisjunctiveNotLast::PropagateSubwindow() { helper_->AddPresenceReason(ct); helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min, window_start); - if (helper_->GetCurrentMinDistanceBetweenTasks( - ct, t, /*add_reason_if_after=*/true) < 0) { + if (helper_->GetCurrentMinDistanceBetweenTasks(ct, t) >= 0) { + helper_->AddReasonForBeingBeforeAssumingNoOverlap(ct, t); + } else { helper_->AddStartMaxReason(ct, largest_ct_start_max); } } @@ -1770,9 +1777,11 @@ bool DisjunctiveEdgeFinding::PropagateSubwindow(IntegerValue window_end_min) { task, event_size_[event], event >= second_event ? second_start : first_start); - const IntegerValue dist = helper_->GetCurrentMinDistanceBetweenTasks( - task, gray_task, /*add_reason_if_after=*/true); - if (dist < 0) { + const IntegerValue dist = + helper_->GetCurrentMinDistanceBetweenTasks(task, gray_task); + if (dist >= 0) { + helper_->AddReasonForBeingBeforeAssumingNoOverlap(task, gray_task); + } else { all_before = false; helper_->AddEndMaxReason(task, window_end); } diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 5af0f3cc82..f98b2935b6 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -872,7 +872,6 @@ std::function CumulativePrecedenceSearchHeuristic( // TODO(user): Add heuristic ordering for creating interesting precedence // first. bool found_precedence_to_add = false; - std::vector conflict; helper->ClearReason(); for (const int s : open_tasks) { for (const int t : open_tasks) { @@ -897,13 +896,13 @@ std::function CumulativePrecedenceSearchHeuristic( // fixed all literal, but if it is not, we can just return this // decision. if (trail->Assignment().LiteralIsFalse(Literal(existing))) { - conflict.push_back(Literal(existing)); + helper->MutableLiteralReason()->push_back(Literal(existing)); continue; } } else { // Make sure s could be before t. if (helper->EndMin(s) > helper->StartMax(t)) { - helper->AddReasonForBeingBefore(t, s); + helper->AddReasonForBeingBeforeAssumingNoOverlap(t, s); continue; } @@ -929,24 +928,24 @@ std::function CumulativePrecedenceSearchHeuristic( // // TODO(user): We need to add the reason for demand_min and capacity_max. // TODO(user): unfortunately we can't report it from here. - std::vector integer_reason = - *helper->MutableIntegerReason(); if (!h.capacity.IsConstant()) { - integer_reason.push_back( + helper->MutableIntegerReason()->push_back( integer_trail->UpperBoundAsLiteral(h.capacity)); } const auto& demands = h.demand_helper->Demands(); for (const int t : open_tasks) { if (helper->IsOptional(t)) { CHECK(trail->Assignment().LiteralIsTrue(helper->PresenceLiteral(t))); - conflict.push_back(helper->PresenceLiteral(t).Negated()); + helper->MutableLiteralReason()->push_back( + helper->PresenceLiteral(t).Negated()); } const AffineExpression d = demands[t]; if (!d.IsConstant()) { - integer_reason.push_back(integer_trail->LowerBoundAsLiteral(d)); + helper->MutableIntegerReason()->push_back( + integer_trail->LowerBoundAsLiteral(d)); } } - integer_trail->ReportConflict(conflict, integer_reason); + (void)helper->ReportConflict(); search_helper->NotifyThatConflictWasFoundDuringGetDecision(); if (VLOG_IS_ON(2)) { LOG(INFO) << "Conflict between precedences !"; diff --git a/ortools/sat/no_overlap_2d_helper.cc b/ortools/sat/no_overlap_2d_helper.cc index 9fee042fff..94484b160e 100644 --- a/ortools/sat/no_overlap_2d_helper.cc +++ b/ortools/sat/no_overlap_2d_helper.cc @@ -97,8 +97,8 @@ void ClearAndAddMandatoryOverlapReason(int box1, int box2, y->ClearReason(); y->AddPresenceReason(box1); y->AddPresenceReason(box2); - y->AddReasonForBeingBefore(box1, box2); - y->AddReasonForBeingBefore(box2, box1); + y->AddReasonForBeingBeforeAssumingNoOverlap(box1, box2); + y->AddReasonForBeingBeforeAssumingNoOverlap(box2, box1); } } // namespace @@ -162,7 +162,7 @@ bool LeftBoxBeforeRightBoxOnFirstDimension(int left, int right, x->ClearReason(); x->AddPresenceReason(left); x->AddPresenceReason(right); - x->AddReasonForBeingBefore(left, right); + x->AddReasonForBeingBeforeAssumingNoOverlap(left, right); x->AddEndMinReason(left, left_end_min); // left and right must overlap on y. ClearAndAddMandatoryOverlapReason(left, right, y); @@ -177,7 +177,7 @@ bool LeftBoxBeforeRightBoxOnFirstDimension(int left, int right, x->ClearReason(); x->AddPresenceReason(left); x->AddPresenceReason(right); - x->AddReasonForBeingBefore(left, right); + x->AddReasonForBeingBeforeAssumingNoOverlap(left, right); x->AddStartMaxReason(right, right_start_max); // left and right must overlap on y. ClearAndAddMandatoryOverlapReason(left, right, y); diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index 6dd090fae4..9d25b17a7f 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -343,22 +343,15 @@ bool SchedulingConstraintHelper::SynchronizeAndSetTimeDirection( } IntegerValue SchedulingConstraintHelper::GetCurrentMinDistanceBetweenTasks( - int a, int b, bool add_reason_if_after) { + int a, int b) { const AffineExpression before = ends_[a]; const AffineExpression after = starts_[b]; const LinearExpression2 expr(before.var, after.var, before.coeff, -after.coeff); - const IntegerValue expr_ub = linear2_bounds_->UpperBound(expr); const IntegerValue needed_offset = before.constant - after.constant; const IntegerValue ub_of_end_minus_start = expr_ub + needed_offset; const IntegerValue distance = -ub_of_end_minus_start; - if (add_reason_if_after && distance >= 0) { - // TODO(user): be more precise when we know a and b are in disjunction. we - // really just need end_b > start_a. - linear2_bounds_->AddReasonForUpperBoundLowerThan( - expr, expr_ub, MutableLiteralReason(), MutableIntegerReason()); - } return distance; } @@ -484,12 +477,27 @@ SchedulingConstraintHelper::GetEnergyProfile() { return energy_profile_; } -// Produces a relaxed reason for StartMax(before) < EndMin(after). -void SchedulingConstraintHelper::AddReasonForBeingBefore(int before, - int after) { +void SchedulingConstraintHelper::AddReasonForBeingBeforeAssumingNoOverlap( + int before, int after) { AddOtherReason(before); AddOtherReason(after); + // Prefer the linear2 explanation as it is more likely this comes from + // level zero or a single enforcement literal. + // We need Start(after) >= End(before) - SizeMin(before). + // we rewrite as "End(before) - Start(after) <= SizeMin(before). + const auto [expr, ub] = + EncodeDifferenceLowerThan(ends_[before], starts_[after], SizeMin(before)); + if (linear2_bounds_->UpperBound(expr) <= ub) { + AddSizeMinReason(before); + linear2_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, &literal_reason_, + &integer_reason_); + return; + } + + // We will explain StartMax(before) < EndMin(after); + DCHECK_LT(StartMax(before), EndMin(after)); + // The reason will be a linear expression greater than a value. Note that all // coeff must be positive, and we will use the variable lower bound. std::vector vars; diff --git a/ortools/sat/scheduling_helpers.h b/ortools/sat/scheduling_helpers.h index d6b53e9dbd..2d1daa3876 100644 --- a/ortools/sat/scheduling_helpers.h +++ b/ortools/sat/scheduling_helpers.h @@ -205,10 +205,12 @@ class SchedulingConstraintHelper : public PropagatorInterface { bool IsPresent(LiteralIndex lit) const; bool IsAbsent(LiteralIndex lit) const; - // Return a value so that End(a) + dist <= Start(b). - // Returns kMinInterValue if we don't have any such relation. - IntegerValue GetCurrentMinDistanceBetweenTasks( - int a, int b, bool add_reason_if_after = false); + // Returns a value so that End(a) + dist <= Start(b). + // + // TODO(user): we use this to optimize some reason, but ideally we only want + // to use linear2 bounds here, not bounds coming from trivial bounds. Make + // sure we have the best possible reason. + IntegerValue GetCurrentMinDistanceBetweenTasks(int a, int b); // We detected a precedence between two tasks at level zero. // This register a new constraint and notify the linear2 root level bounds @@ -275,9 +277,22 @@ class SchedulingConstraintHelper : public PropagatorInterface { void AddEnergyAfterReason(int t, IntegerValue energy_min, IntegerValue time); void AddEnergyMinInIntervalReason(int t, IntegerValue min, IntegerValue max); - // Adds the reason why task "before" must be before task "after". - // That is StartMax(before) < EndMin(after). - void AddReasonForBeingBefore(int before, int after); + // Adds the reason why the task "before" must be before task "after", in + // the sense that "after" can only start at the same time or later than the + // task "before" ends. + // + // Important: this assumes that the two task cannot overlap. So we can have + // a more relaxed reason than Start(after) >= Ends(before). + // + // There are actually many possibilities to explain such relation: + // - StartMax(before) < EndMin(after). + // - We have a linear2: Start(after) >= End(before) - SizeMin(before); + // - etc... + // We try to pick the best one. + // + // TODO(user): Refine the heuritic. Also consider other reason for the + // complex cases where Start() and End() do not use the same integer variable. + void AddReasonForBeingBeforeAssumingNoOverlap(int before, int after); // It is also possible to directly manipulates the underlying reason vectors // that will be used when pushing something. From 8abc6da4ddd72d720321f048d308c8a5013ac6f5 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 13 Jun 2025 13:13:26 +0200 Subject: [PATCH 085/509] cleanup python expression code for cp-sat and model-builder --- .../python/model_builder_helper.cc | 81 ++++++-------- .../wrappers/model_builder_helper.cc | 66 +++++++++++ .../wrappers/model_builder_helper.h | 62 ++--------- ortools/sat/python/cp_model_helper.cc | 103 ++++++------------ ortools/sat/python/cp_model_test.py | 8 ++ ortools/sat/python/linear_expr.cc | 14 ++- ortools/sat/python/linear_expr.h | 6 +- 7 files changed, 161 insertions(+), 179 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index 9a84fbc736..8036828159 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -439,11 +439,7 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other); - return expr; - } - return expr->Add(other); + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("other").none(false), "Returns the sum of `self` and `other`.") @@ -453,46 +449,43 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("cst"), "Returns `self` + `cst`.") .def( - "__iadd__", + "__radd__", [](py::object self, std::shared_ptr other) -> std::shared_ptr { + const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - expr->AddInPlace(other); - return expr; - }, - py::arg("other").none(false), - "Returns the sum of `self` and `other`.") - .def( - "__iadd__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(cst); - return expr; + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("cst"), "Returns `self` + `cst`.") - .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") .def( "__radd__", [](py::object self, double cst) -> std::shared_ptr { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); + }, + py::arg("cst"), "Returns `self` + `cst`.") + .def( + "__iadd__", + [](std::shared_ptr expr, + std::shared_ptr other) -> std::shared_ptr { + return expr->AddInPlace(other); + }, + py::arg("other").none(false), + "Returns the sum of `self` and `other`.") + .def( + "__iadd__", + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(cst); }, py::arg("cst"), "Returns `self` + `cst`.") .def( @@ -502,11 +495,8 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other->Neg()); - return expr; - } - return expr->Sub(other); + return (num_uses == 4) ? expr->AddInPlace(other->Neg()) + : expr->Sub(other); }, py::arg("other").none(false), "Returns `self` - `other`.") .def( @@ -515,30 +505,23 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(-cst); - return expr; - } - return expr->SubFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(-cst) + : expr->SubFloat(cst); }, py::arg("cst"), "Returns `self` - `cst`.") .def( "__isub__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); expr->AddInPlace(other->Neg()); - return expr; + return expr->AddInPlace(other->Neg()); }, py::arg("other").none(false), "Returns `self` - `other`.") .def( "__isub__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(-cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(-cst); }, py::arg("cst"), "Returns `self` - `cst`.") .def_property_readonly( diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 611b07058c..bc5bcdf5eb 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -988,6 +988,72 @@ std::string FlatExpr::DebugString() const { return s; } +SumArray::SumArray(std::vector> exprs, + double offset) + : exprs_(std::move(exprs)), offset_(offset) {} + +void SumArray::Visit(ExprVisitor& lin, double c) { + for (int i = 0; i < exprs_.size(); ++i) { + lin.AddToProcess(exprs_[i], c); + } + if (offset_ != 0.0) { + lin.AddConstant(offset_ * c); + } +} + +std::string SumArray::ToString() const { + if (exprs_.empty()) { + if (offset_ != 0.0) { + return absl::StrCat(offset_); + } + } + std::string s = "("; + for (int i = 0; i < exprs_.size(); ++i) { + if (i > 0) { + absl::StrAppend(&s, " + "); + } + absl::StrAppend(&s, exprs_[i]->ToString()); + } + if (offset_ != 0.0) { + if (offset_ > 0.0) { + absl::StrAppend(&s, " + ", offset_); + } else { + absl::StrAppend(&s, " - ", -offset_); + } + } + absl::StrAppend(&s, ")"); + return s; +} + +std::string SumArray::DebugString() const { + std::string s = absl::StrCat( + "SumArray(", + absl::StrJoin(exprs_, ", ", + [](std::string* out, std::shared_ptr expr) { + absl::StrAppend(out, expr->DebugString()); + })); + if (offset_ != 0.0) { + absl::StrAppend(&s, ", offset=", offset_); + } + absl::StrAppend(&s, ")"); + return s; +} + +std::shared_ptr SumArray::AddInPlace( + std::shared_ptr expr) { + exprs_.push_back(std::move(expr)); + return shared_from_this(); +} + +std::shared_ptr SumArray::AddFloatInPlace(double cst) { + offset_ += cst; + return shared_from_this(); +} + +int SumArray::num_exprs() const { return exprs_.size(); } + +double SumArray::offset() const { return offset_; } + void FixedValue::Visit(ExprVisitor& lin, double c) { lin.AddConstant(value_ * c); } diff --git a/ortools/linear_solver/wrappers/model_builder_helper.h b/ortools/linear_solver/wrappers/model_builder_helper.h index cfd3de5e0e..4f2371da2c 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.h +++ b/ortools/linear_solver/wrappers/model_builder_helper.h @@ -26,8 +26,6 @@ #include "absl/container/btree_map.h" #include "absl/container/fixed_array.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_exporter.h" #include "ortools/util/solve_interrupter.h" @@ -150,61 +148,17 @@ class FlatExpr : public LinearExpr { class SumArray : public LinearExpr { public: explicit SumArray(std::vector> exprs, - double offset) - : exprs_(std::move(exprs)), offset_(offset) {} + double offset); ~SumArray() override = default; - void Visit(ExprVisitor& lin, double c) override { - for (int i = 0; i < exprs_.size(); ++i) { - lin.AddToProcess(exprs_[i], c); - } - if (offset_ != 0.0) { - lin.AddConstant(offset_ * c); - } - } + void Visit(ExprVisitor& lin, double c) override; - std::string ToString() const override { - if (exprs_.empty()) { - if (offset_ != 0.0) { - return absl::StrCat(offset_); - } - } - std::string s = "("; - for (int i = 0; i < exprs_.size(); ++i) { - if (i > 0) { - absl::StrAppend(&s, " + "); - } - absl::StrAppend(&s, exprs_[i]->ToString()); - } - if (offset_ != 0.0) { - if (offset_ > 0.0) { - absl::StrAppend(&s, " + ", offset_); - } else { - absl::StrAppend(&s, " - ", -offset_); - } - } - absl::StrAppend(&s, ")"); - return s; - } - - std::string DebugString() const override { - std::string s = absl::StrCat( - "SumArray(", - absl::StrJoin(exprs_, ", ", - [](std::string* out, std::shared_ptr expr) { - absl::StrAppend(out, expr->DebugString()); - })); - if (offset_ != 0.0) { - absl::StrAppend(&s, ", offset=", offset_); - } - absl::StrAppend(&s, ")"); - return s; - } - - void AddInPlace(std::shared_ptr expr) { exprs_.push_back(expr); } - void AddFloatInPlace(double cst) { offset_ += cst; } - int num_exprs() const { return exprs_.size(); } - double offset() const { return offset_; } + std::string ToString() const override; + std::string DebugString() const override; + std::shared_ptr AddInPlace(std::shared_ptr expr); + std::shared_ptr AddFloatInPlace(double cst); + int num_exprs() const; + double offset() const; private: std::vector> exprs_; diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 87ef120b8f..10e57c7657 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -897,11 +897,7 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other); - return expr; - } - return expr->Add(other); + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Add)) @@ -911,11 +907,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(cst); - return expr; - } - return expr->AddInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(cst) + : expr->AddInt(cst); }, DOC(operations_research, sat, python, LinearExpr, AddInt)) .def( @@ -924,11 +917,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, AddFloat)) @@ -938,11 +928,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(cst); - return expr; - } - return expr->AddInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(cst) + : expr->AddInt(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddInt)) @@ -952,41 +939,31 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) .def( "__iadd__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddInPlace(other); - return expr; + return expr->AddInPlace(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Add)) .def( "__iadd__", - [](py::object self, int64_t cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddIntInPlace(cst); - return expr; + [](std::shared_ptr expr, + int64_t cst) -> std::shared_ptr { + return expr->AddIntInPlace(cst); }, DOC(operations_research, sat, python, LinearExpr, AddInt)) .def( "__iadd__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, AddFloat)) @@ -997,11 +974,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other->Neg()); - return expr; - } - return expr->Sub(other); + return (num_uses == 4) ? expr->AddInPlace(other->Neg()) + : expr->Sub(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Sub)) @@ -1011,11 +985,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(-cst); - return expr; - } - return expr->SubInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(-cst) + : expr->SubInt(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubInt)) @@ -1025,41 +996,31 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(-cst); - return expr; - } - return expr->SubFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(-cst) + : expr->SubFloat(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) .def( "__isub__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddInPlace(other->MulInt(-1)); - return expr; + return expr->AddInPlace(other->Neg()); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Sub)) .def( "__isub__", - [](py::object self, int64_t cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddIntInPlace(-cst); - return expr; + [](std::shared_ptr expr, + int64_t cst) -> std::shared_ptr { + return expr->AddIntInPlace(-cst); }, DOC(operations_research, sat, python, LinearExpr, SubInt)) .def( "__isub__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(-cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(-cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, SubFloat)) @@ -1074,8 +1035,6 @@ PYBIND11_MODULE(cp_model_helper, m) { .def_property_readonly("coefficient", &FloatAffine::coefficient) .def_property_readonly("offset", &FloatAffine::offset); - // We adding an operator like __add__(int), we need to add all overloads, - // otherwise they are not found. py::class_, LinearExpr>( m, "IntAffine", DOC(operations_research, sat, python, IntAffine)) .def(py::init, int64_t, int64_t>()) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index aa06c59b2e..9bbaee5513 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2461,6 +2461,14 @@ TRFM""" s -= model.new_bool_var("") model.add(s == 10) + def test_radd(self): + model = cp_model.CpModel() + x = [model.new_int_var(0, 10, f"x{i}") for i in range(10)] + expr = 1 + sum(x) + self.assertEqual( + str(expr), "(x0 + x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + 1)" + ) + def test_simplification1(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index 42077ef46f..f8c2954f62 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -340,8 +340,20 @@ SumArray::SumArray(std::vector> exprs, DCHECK_GE(exprs_.size(), 2); } -void SumArray::AddInPlace(std::shared_ptr expr) { +std::shared_ptr SumArray::AddInPlace( + std::shared_ptr expr) { exprs_.push_back(std::move(expr)); + return shared_from_this(); +} + +std::shared_ptr SumArray::AddIntInPlace(int64_t cst) { + int_offset_ += cst; + return shared_from_this(); +} + +std::shared_ptr SumArray::AddFloatInPlace(double cst) { + double_offset_ += cst; + return shared_from_this(); } bool SumArray::VisitAsInt(IntExprVisitor& lin, int64_t c) { diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index ae92d1c676..06d973f9ea 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -286,9 +286,9 @@ class SumArray : public LinearExpr { std::string ToString() const override; std::string DebugString() const override; - void AddInPlace(std::shared_ptr expr); - void AddIntInPlace(int64_t cst) { int_offset_ += cst; } - void AddFloatInPlace(double cst) { double_offset_ += cst; } + std::shared_ptr AddInPlace(std::shared_ptr expr); + std::shared_ptr AddIntInPlace(int64_t cst); + std::shared_ptr AddFloatInPlace(double cst); int num_exprs() const { return exprs_.size(); } int64_t int_offset() const { return int_offset_; } double double_offset() const { return double_offset_; } From 8ff5dbee6a694c0cc49932d0e2353926a8c87212 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 13 Jun 2025 13:13:51 +0200 Subject: [PATCH 086/509] [CP-SAT] split intervals in disjoint sets for scheduling cuts --- ortools/sat/scheduling_cuts.cc | 125 ++++++++++++++++++++++----------- 1 file changed, 85 insertions(+), 40 deletions(-) diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 9f79aad290..bcbc07ddd0 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -124,6 +124,24 @@ bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t, return true; } +template +std::vector> SplitEventsInIndendentSets(std::vector& events) { + std::sort(events.begin(), events.end(), [](const E& a, const E& b) { + return std::tie(a.start_min, a.end_max) < std::tie(b.start_min, b.end_max); + }); + std::vector> result; + IntegerValue max_end_max = kMinIntegerValue; + for (const E& event : events) { + if (event.start_min >= max_end_max) { + result.push_back({event}); + } else { + result.back().push_back(event); + } + max_end_max = std::max(max_end_max, event.end_max); + } + return result; +} + } // namespace struct EnergyEvent { @@ -664,15 +682,20 @@ CutGenerator CreateCumulativeEnergyCutGenerator( events.push_back(e); } - if (makespan.has_value() && integer_trail->IsFixed(capacity)) { - GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( - "CumulativeEnergyM", lp_values, events, - integer_trail->FixedValue(capacity), makespan.value(), time_limit, - model, manager); + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + if (makespan.has_value() && integer_trail->IsFixed(capacity)) { + GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( + "CumulativeEnergyM", lp_values, std::move(cluster), + integer_trail->FixedValue(capacity), makespan.value(), time_limit, + model, manager); - } else { - GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, events, - capacity, time_limit, model, manager); + } else { + GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, + std::move(cluster), capacity, + time_limit, model, manager); + } } return true; }; @@ -716,15 +739,19 @@ CutGenerator CreateNoOverlapEnergyCutGenerator( events.push_back(e); } - if (makespan.has_value()) { - GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( - "NoOverlapEnergyM", lp_values, events, - /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model, - manager); - } else { - GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, events, - /*capacity=*/IntegerValue(1), time_limit, - model, manager); + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + if (makespan.has_value()) { + GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( + "NoOverlapEnergyM", lp_values, std::move(cluster), + /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model, + manager); + } else { + GenerateCumulativeEnergeticCuts( + "NoOverlapEnergy", lp_values, std::move(cluster), + /*capacity=*/IntegerValue(1), time_limit, model, manager); + } } return true; }; @@ -1014,9 +1041,15 @@ CutGenerator CreateCumulativePrecedenceCutGenerator( } const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - GenerateCutsBetweenPairOfNonOverlappingTasks( - "Cumulative", /* ignore_zero_size_intervals= */ true, - manager->LpValues(), std::move(events), capacity_max, model, manager); + + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + GenerateCutsBetweenPairOfNonOverlappingTasks( + "Cumulative", /* ignore_zero_size_intervals= */ true, + manager->LpValues(), std::move(cluster), capacity_max, model, + manager); + } return true; }; return result; @@ -1042,10 +1075,14 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator( events.push_back(event); } - GenerateCutsBetweenPairOfNonOverlappingTasks( - "NoOverlap", /* ignore_zero_size_intervals= */ false, - manager->LpValues(), std::move(events), IntegerValue(1), model, - manager); + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + GenerateCutsBetweenPairOfNonOverlappingTasks( + "NoOverlap", /* ignore_zero_size_intervals= */ false, + manager->LpValues(), std::move(cluster), IntegerValue(1), model, + manager); + } return true; }; @@ -1825,15 +1862,19 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( CtExhaustiveHelper helper; helper.Init(events, model); - if (!GenerateShortCompletionTimeCutsWithExactBound( - "NoOverlapCompletionTimeExhaustive", events, - /*capacity_max=*/IntegerValue(1), helper, model, manager)) { - return false; - } + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + if (!GenerateShortCompletionTimeCutsWithExactBound( + "NoOverlapCompletionTimeExhaustive", cluster, + /*capacity_max=*/IntegerValue(1), helper, model, manager)) { + return false; + } - GenerateCompletionTimeCutsWithEnergy( - "NoOverlapCompletionTimeQueyrane", std::move(events), - /*capacity_max=*/IntegerValue(1), model, manager); + GenerateCompletionTimeCutsWithEnergy( + "NoOverlapCompletionTimeQueyrane", std::move(cluster), + /*capacity_max=*/IntegerValue(1), model, manager); + } return true; }; if (!generate_cuts(/*time_is_forward=*/true)) return false; @@ -1889,15 +1930,19 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( helper.Init(events, model); const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - if (!GenerateShortCompletionTimeCutsWithExactBound( - "CumulativeCompletionTimeExhaustive", events, capacity_max, - helper, model, manager)) { - return false; - } + std::vector> disjoint_events = + SplitEventsInIndendentSets(events); + for (auto& cluster : disjoint_events) { + if (!GenerateShortCompletionTimeCutsWithExactBound( + "CumulativeCompletionTimeExhaustive", cluster, capacity_max, + helper, model, manager)) { + return false; + } - GenerateCompletionTimeCutsWithEnergy("CumulativeCompletionTimeQueyrane", - std::move(events), capacity_max, - model, manager); + GenerateCompletionTimeCutsWithEnergy("CumulativeCompletionTimeQueyrane", + std::move(cluster), capacity_max, + model, manager); + } return true; }; From 63b9ecdfd7bdaafa975b2bd50028c4130c52f5eb Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 13 Jun 2025 13:14:13 +0200 Subject: [PATCH 087/509] [CP-SAT] tweak and improve code --- ortools/sat/integer.cc | 30 ++++++++++++------------------ ortools/sat/integer.h | 34 ++++++++++++++++++++-------------- ortools/sat/integer_base.cc | 20 -------------------- ortools/sat/integer_base.h | 29 ++++++++++++++++++++++++++--- ortools/sat/precedences.cc | 3 +-- 5 files changed, 59 insertions(+), 57 deletions(-) diff --git a/ortools/sat/integer.cc b/ortools/sat/integer.cc index 3adcb4d8ba..5ecc0455a8 100644 --- a/ortools/sat/integer.cc +++ b/ortools/sat/integer.cc @@ -983,7 +983,8 @@ int IntegerTrail::FindTrailIndexOfVarBefore(IntegerVariable var, int IntegerTrail::FindLowestTrailIndexThatExplainBound( IntegerLiteral i_lit) const { DCHECK_LE(i_lit.bound, var_lbs_[i_lit.var]); - if (i_lit.bound <= LevelZeroLowerBound(i_lit.var)) return -1; + DCHECK(!IsTrueAtLevelZero(i_lit)); + int trail_index = var_trail_index_[i_lit.var]; // Check the validity of the cached index and use it if possible. This caching @@ -1003,6 +1004,7 @@ int IntegerTrail::FindLowestTrailIndexThatExplainBound( int prev_trail_index = trail_index; while (true) { + ++work_done_in_explain_lower_than_; if (trail_index >= var_trail_index_cache_threshold_) { var_trail_index_cache_[i_lit.var] = trail_index; } @@ -1171,10 +1173,9 @@ std::vector* IntegerTrail::InitializeConflict( lazy_reasons_.back().Explain(conflict, &tmp_queue_); } else { conflict->assign(literals_reason.begin(), literals_reason.end()); - const int num_vars = var_lbs_.size(); for (const IntegerLiteral& literal : bounds_reason) { - const int trail_index = FindLowestTrailIndexThatExplainBound(literal); - if (trail_index >= num_vars) tmp_queue_.push_back(trail_index); + if (IsTrueAtLevelZero(literal)) continue; + tmp_queue_.push_back(FindLowestTrailIndexThatExplainBound(literal)); } } return conflict; @@ -1553,9 +1554,8 @@ bool IntegerTrail::EnqueueInternal( // efficiency and a potential smaller reason. auto* conflict = InitializeConflict(i_lit, use_lazy_reason, literal_reason, integer_reason); - { - const int trail_index = FindLowestTrailIndexThatExplainBound(ub_reason); - if (trail_index >= 0) tmp_queue_.push_back(trail_index); + if (!IsTrueAtLevelZero(ub_reason)) { + tmp_queue_.push_back(FindLowestTrailIndexThatExplainBound(ub_reason)); } MergeReasonIntoInternal(conflict, NextConflictId()); return false; @@ -1771,12 +1771,10 @@ absl::Span IntegerTrail::Dependencies(int reason_index) const { int new_size = 0; int* data = trail_index_reason_buffer_.data() + start; - const int num_vars = var_lbs_.size(); for (int i = start; i < end; ++i) { - const int dep = - FindLowestTrailIndexThatExplainBound(bounds_reason_buffer_[i]); - if (dep >= num_vars) { - data[new_size++] = dep; + const IntegerLiteral to_explain = bounds_reason_buffer_[i]; + if (!IsTrueAtLevelZero(to_explain)) { + data[new_size++] = FindLowestTrailIndexThatExplainBound(to_explain); } } cached_sizes_[reason_index] = new_size; @@ -1818,14 +1816,10 @@ std::vector IntegerTrail::ReasonFor(IntegerLiteral literal) const { void IntegerTrail::MergeReasonInto(absl::Span literals, std::vector* output) const { DCHECK(tmp_queue_.empty()); - const int num_vars = var_lbs_.size(); for (const IntegerLiteral& literal : literals) { if (literal.IsAlwaysTrue()) continue; - const int trail_index = FindLowestTrailIndexThatExplainBound(literal); - - // Any indices lower than that means that there is no reason needed. - // Note that it is important for size to be signed because of -1 indices. - if (trail_index >= num_vars) tmp_queue_.push_back(trail_index); + if (IsTrueAtLevelZero(literal)) continue; + tmp_queue_.push_back(FindLowestTrailIndexThatExplainBound(literal)); } return MergeReasonIntoInternal(output, -1); } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 9802f74a75..14e485fdad 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -523,6 +523,7 @@ class IntegerTrail final : public SatPropagator { // Returns the current value (if known) of an IntegerLiteral. bool IntegerLiteralIsTrue(IntegerLiteral l) const; bool IntegerLiteralIsFalse(IntegerLiteral l) const; + bool IsTrueAtLevelZero(IntegerLiteral l) const; // Returns globally valid lower/upper bound on the given integer variable. IntegerValue LevelZeroLowerBound(IntegerVariable var) const; @@ -796,39 +797,38 @@ class IntegerTrail final : public SatPropagator { void AddAllGreaterThanConstantReason(absl::Span exprs, IntegerValue target_min, std::vector* indices) const { - int64_t num_processed = 0; + constexpr int64_t check_period = 1e6; + int64_t limit_check = work_done_in_explain_lower_than_ + check_period; for (const AffineExpression& expr : exprs) { if (expr.IsConstant()) { DCHECK_GE(expr.constant, target_min); continue; } DCHECK_NE(expr.var, kNoIntegerVariable); + const IntegerLiteral to_explain = expr.GreaterOrEqual(target_min); + if (IsTrueAtLevelZero(to_explain)) continue; // On large routing problems, we can spend a lot of time in this loop. - // We check the time limit every 5 processed expressions. - if (++num_processed % 5 == 0 && time_limit_->LimitReached()) return; + if (work_done_in_explain_lower_than_ > limit_check) { + limit_check = work_done_in_explain_lower_than_ + check_period; + if (time_limit_->LimitReached()) return; + } // Skip if we already have an explanation for expr >= target_min. Note // that we already do that while processing the returned indices, so this // mainly save a FindLowestTrailIndexThatExplainBound() call per skipped // indices, which can still be costly. { - const int index = tmp_var_to_trail_index_in_queue_[expr.var]; + const int index = tmp_var_to_trail_index_in_queue_[to_explain.var]; if (index == std::numeric_limits::max()) continue; - if (index > 0 && - expr.ValueAt(integer_trail_[index].bound) >= target_min) { + if (index > 0 && integer_trail_[index].bound >= to_explain.bound) { has_dependency_ = true; continue; } } // We need to find the index that explain the bound. - // Note that this will skip if the condition is true at level zero. - const int index = - FindLowestTrailIndexThatExplainBound(expr.GreaterOrEqual(target_min)); - if (index >= 0) { - indices->push_back(index); - } + indices->push_back(FindLowestTrailIndexThatExplainBound(to_explain)); } } @@ -885,8 +885,8 @@ class IntegerTrail final : public SatPropagator { int64_t conflict_id) const; // Returns the lowest trail index of a TrailEntry that can be used to explain - // the given IntegerLiteral. The literal must be currently true (CHECKed). - // Returns -1 if the explanation is trivial. + // the given IntegerLiteral. The literal must be currently true but not true + // at level zero (DCHECKed). int FindLowestTrailIndexThatExplainBound(IntegerLiteral i_lit) const; // This must be called before Dependencies() or AppendLiteralsReason(). @@ -1033,6 +1033,8 @@ class IntegerTrail final : public SatPropagator { std::vector*> watchers_; std::vector reversible_classes_; + mutable int64_t work_done_in_explain_lower_than_ = 0; + mutable Domain temp_domain_; DelayedRootLevelDeduction* delayed_to_fix_; IntegerDomains* domains_; @@ -1417,6 +1419,10 @@ inline bool IntegerTrail::IntegerLiteralIsFalse(IntegerLiteral l) const { return l.bound > UpperBound(l.var); } +inline bool IntegerTrail::IsTrueAtLevelZero(IntegerLiteral l) const { + return l.bound <= LevelZeroLowerBound(l.var); +} + // The level zero bounds are stored at the beginning of the trail and they also // serves as sentinels. Their index match the variables index. inline IntegerValue IntegerTrail::LevelZeroLowerBound( diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index d514001c31..f39463353f 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -214,26 +214,6 @@ IntegerValue BestBinaryRelationBounds::GetUpperBound( return kMaxIntegerValue; } -// TODO(user): Maybe introduce a CanonicalizedLinear2 class so we automatically -// get the better function, and it documents when we have canonicalized -// expression. -IntegerValue BestBinaryRelationBounds::UpperBoundWhenCanonicalized( - LinearExpression2 expr) const { - DCHECK_EQ(expr.DivideByGcd(), 1); - DCHECK(expr.IsCanonicalized()); - const bool negated = expr.NegateForCanonicalization(); - const auto it = best_bounds_.find(expr); - if (it != best_bounds_.end()) { - const auto [known_lb, known_ub] = it->second; - if (negated) { - return -known_lb; - } else { - return known_ub; - } - } - return kMaxIntegerValue; -} - std::vector> BestBinaryRelationBounds::GetSortedNonTrivialUpperBounds() const { std::vector> root_relations_sorted; diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index 572f62a906..ad4331e5a5 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -559,6 +559,28 @@ std::ostream& operator<<(std::ostream& os, const ValueLiteralPair& p); DEFINE_STRONG_INDEX_TYPE(IntervalVariable); const IntervalVariable kNoIntervalVariable(-1); +// This functions appears in hot spot, and so it is important to inline it. +// +// TODO(user): Maybe introduce a CanonicalizedLinear2 class so we automatically +// get the better function, and it documents when we have canonicalized +// expression. +inline IntegerValue BestBinaryRelationBounds::UpperBoundWhenCanonicalized( + LinearExpression2 expr) const { + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK(expr.IsCanonicalized()); + const bool negated = expr.NegateForCanonicalization(); + const auto it = best_bounds_.find(expr); + if (it != best_bounds_.end()) { + const auto [known_lb, known_ub] = it->second; + if (negated) { + return -known_lb; + } else { + return known_ub; + } + } + return kMaxIntegerValue; +} + // ============================================================================ // Implementation. // ============================================================================ @@ -599,8 +621,8 @@ inline IntegerLiteral AffineExpression::GreaterOrEqual( : IntegerLiteral::FalseLiteral(); } DCHECK_GT(coeff, 0); - return IntegerLiteral::GreaterOrEqual(var, - CeilRatio(bound - constant, coeff)); + return IntegerLiteral::GreaterOrEqual( + var, coeff == 1 ? bound - constant : CeilRatio(bound - constant, coeff)); } // var * coeff + constant <= bound. @@ -610,7 +632,8 @@ inline IntegerLiteral AffineExpression::LowerOrEqual(IntegerValue bound) const { : IntegerLiteral::FalseLiteral(); } DCHECK_GT(coeff, 0); - return IntegerLiteral::LowerOrEqual(var, FloorRatio(bound - constant, coeff)); + return IntegerLiteral::LowerOrEqual( + var, coeff == 1 ? bound - constant : FloorRatio(bound - constant, coeff)); } } // namespace sat diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 2e2a5bcf47..b82d97b8fb 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -1943,8 +1943,7 @@ IntegerValue Linear2Bounds::NonTrivialUpperBoundForGcd1( } DCHECK_NE(expr.coeffs[1], 0); DCHECK_EQ(1, expr.DivideByGcd()); - IntegerValue ub = kMaxIntegerValue; - ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(expr)); + IntegerValue ub = root_level_bounds_->GetUpperBoundNoTrail(expr); ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(expr)); ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); return ub; From 8a2a537cc5f9009b4902ac14a0207147434f20ce Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:48:43 +0200 Subject: [PATCH 088/509] cmake: format samples --- cmake/samples/dotnet/CPSample.cs | 28 +++++++------ cmake/samples/dotnet/LPSample.cs | 22 +++++----- cmake/samples/dotnet/RoutingSample.cs | 60 ++++++++++++++------------- cmake/samples/dotnet/SATSample.cs | 34 ++++++++------- cmake/samples/python/sample.py | 49 ++++++++++++---------- 5 files changed, 105 insertions(+), 88 deletions(-) diff --git a/cmake/samples/dotnet/CPSample.cs b/cmake/samples/dotnet/CPSample.cs index 35cb078ddc..f0a64857f6 100644 --- a/cmake/samples/dotnet/CPSample.cs +++ b/cmake/samples/dotnet/CPSample.cs @@ -16,22 +16,26 @@ using Xunit; using Google.OrTools.ConstraintSolver; -namespace Google.OrTools.Tests { - public class ConstraintSolverTest { +namespace Google.OrTools.Tests +{ +public class ConstraintSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - Solver solver = new Solver("Solver"); - IntVar x = solver.MakeIntVar(3, 7, "x"); + public void SolverTest(bool callGC) + { + Solver solver = new Solver("Solver"); + IntVar x = solver.MakeIntVar(3, 7, "x"); - if (callGC) { - GC.Collect(); - } + if (callGC) + { + GC.Collect(); + } - Assert.Equal(3, x.Min()); - Assert.Equal(7, x.Max()); - Assert.Equal("x(3..7)", x.ToString()); + Assert.Equal(3, x.Min()); + Assert.Equal(7, x.Max()); + Assert.Equal("x(3..7)", x.ToString()); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/LPSample.cs b/cmake/samples/dotnet/LPSample.cs index 523a3db9f2..fecda50312 100644 --- a/cmake/samples/dotnet/LPSample.cs +++ b/cmake/samples/dotnet/LPSample.cs @@ -16,19 +16,21 @@ using Xunit; using Google.OrTools.LinearSolver; -namespace Google.OrTools.Tests { - public class LinearSolverTest { +namespace Google.OrTools.Tests +{ +public class LinearSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - Solver solver = new Solver( - "Solver", - Solver.OptimizationProblemType.CLP_LINEAR_PROGRAMMING); + public void SolverTest(bool callGC) + { + Solver solver = new Solver("Solver", Solver.OptimizationProblemType.CLP_LINEAR_PROGRAMMING); - if (callGC) { - GC.Collect(); - } + if (callGC) + { + GC.Collect(); + } } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/RoutingSample.cs b/cmake/samples/dotnet/RoutingSample.cs index c2c434ab66..e2ccfb9996 100644 --- a/cmake/samples/dotnet/RoutingSample.cs +++ b/cmake/samples/dotnet/RoutingSample.cs @@ -17,36 +17,40 @@ using Xunit; using Google.OrTools.ConstraintSolver; using Google.OrTools.Routing; -namespace Google.OrTools.Tests { - public class RoutingSolverTest { +namespace Google.OrTools.Tests +{ +public class RoutingSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - // Create Routing Index Manager - RoutingIndexManager manager = new RoutingIndexManager( - 5/*locations*/, 1/*vehicle*/, 0/*depot*/); - // Create Routing Model. - RoutingModel routing = new RoutingModel(manager); - // Create a distance callback. - int transitCallbackIndex = routing.RegisterTransitCallback( - (long fromIndex, long toIndex) => { - // Convert from routing variable Index to distance matrix NodeIndex. - var fromNode = manager.IndexToNode(fromIndex); - var toNode = manager.IndexToNode(toIndex); - return Math.Abs(toNode - fromNode); - }); - // Define cost of each arc. - routing.SetArcCostEvaluatorOfAllVehicles(transitCallbackIndex); - if (callGC) { - GC.Collect(); - } - // Setting first solution heuristic. - RoutingSearchParameters searchParameters = RoutingGlobals.DefaultRoutingSearchParameters(); - searchParameters.FirstSolutionStrategy = FirstSolutionStrategy.Types.Value.PathCheapestArc; - Assignment solution = routing.SolveWithParameters(searchParameters); - // 0 --(+1)-> 1 --(+1)-> 2 --(+1)-> 3 --(+1)-> 4 --(+4)-> 0 := +8 - Assert.Equal(8, solution.ObjectiveValue()); + public void SolverTest(bool callGC) + { + // Create Routing Index Manager + RoutingIndexManager manager = new RoutingIndexManager(5 /*locations*/, 1 /*vehicle*/, 0 /*depot*/); + // Create Routing Model. + RoutingModel routing = new RoutingModel(manager); + // Create a distance callback. + int transitCallbackIndex = routing.RegisterTransitCallback((long fromIndex, long toIndex) => + { + // Convert from routing variable Index to + // distance matrix NodeIndex. + var fromNode = manager.IndexToNode(fromIndex); + var toNode = manager.IndexToNode(toIndex); + return Math.Abs(toNode - fromNode); + }); + // Define cost of each arc. + routing.SetArcCostEvaluatorOfAllVehicles(transitCallbackIndex); + if (callGC) + { + GC.Collect(); + } + // Setting first solution heuristic. + RoutingSearchParameters searchParameters = RoutingGlobals.DefaultRoutingSearchParameters(); + searchParameters.FirstSolutionStrategy = FirstSolutionStrategy.Types.Value.PathCheapestArc; + Assignment solution = routing.SolveWithParameters(searchParameters); + // 0 --(+1)-> 1 --(+1)-> 2 --(+1)-> 3 --(+1)-> 4 --(+4)-> 0 := +8 + Assert.Equal(8, solution.ObjectiveValue()); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/SATSample.cs b/cmake/samples/dotnet/SATSample.cs index 51e287e804..7e74e24860 100644 --- a/cmake/samples/dotnet/SATSample.cs +++ b/cmake/samples/dotnet/SATSample.cs @@ -16,26 +16,30 @@ using Xunit; using Google.OrTools.Sat; -namespace Google.OrTools.Tests { - public class SatSolverTest { +namespace Google.OrTools.Tests +{ +public class SatSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - CpModel model = new CpModel(); + public void SolverTest(bool callGC) + { + CpModel model = new CpModel(); - int num_vals = 3; - IntVar x = model.NewIntVar(0, num_vals - 1, "x"); - IntVar y = model.NewIntVar(0, num_vals - 1, "y"); - IntVar z = model.NewIntVar(0, num_vals - 1, "z"); + int num_vals = 3; + IntVar x = model.NewIntVar(0, num_vals - 1, "x"); + IntVar y = model.NewIntVar(0, num_vals - 1, "y"); + IntVar z = model.NewIntVar(0, num_vals - 1, "z"); - model.Add(x != y); + model.Add(x != y); - CpSolver solver = new CpSolver(); - if (callGC) { - GC.Collect(); - } - CpSolverStatus status = solver.Solve(model); + CpSolver solver = new CpSolver(); + if (callGC) + { + GC.Collect(); + } + CpSolverStatus status = solver.Solve(model); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/python/sample.py b/cmake/samples/python/sample.py index 758346bcf1..ebeb7be394 100644 --- a/cmake/samples/python/sample.py +++ b/cmake/samples/python/sample.py @@ -14,12 +14,15 @@ """Sample to test or-tools installation.""" import ortools + # from ortools.algorithms import knapsack_solver from ortools.constraint_solver import pywrapcp + # from ortools.graph.python import linear_sum_assignment # from ortools.graph.python import max_flow # from ortools.graph.python import min_cost_flow from ortools.linear_solver import pywraplp + # from ortools.linear_solver import linear_solver_pb2 # from ortools.sat.python import cp_model_helper # from ortools.sat.python import cp_model @@ -28,34 +31,34 @@ from ortools.linear_solver import pywraplp def lpsolver_test(): - """Test pywraplp.""" - print('Test lpsolver...') - lpsolver = pywraplp.Solver('LinearTest', - pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) - lpsolver.Solve() - print('Test lpsolver...DONE') + """Test pywraplp.""" + print("Test lpsolver...") + lpsolver = pywraplp.Solver("LinearTest", pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) + lpsolver.Solve() + print("Test lpsolver...DONE") def cpsolver_test(): - """Test pywrapcp.""" - print('Test cpsolver...') - cpsolver = pywrapcp.Solver('ConstraintTest') - num_vals = 3 - x = cpsolver.IntVar(0, num_vals - 1, 'x') - y = cpsolver.IntVar(0, num_vals - 1, 'y') - z = cpsolver.IntVar(0, num_vals - 1, 'z') - cpsolver.Add(x != y) - db = cpsolver.Phase([x, y, z], cpsolver.CHOOSE_FIRST_UNBOUND, - cpsolver.ASSIGN_MIN_VALUE) - cpsolver.Solve(db) - print('Test cpsolver...DONE') + """Test pywrapcp.""" + print("Test cpsolver...") + cpsolver = pywrapcp.Solver("ConstraintTest") + num_vals = 3 + x = cpsolver.IntVar(0, num_vals - 1, "x") + y = cpsolver.IntVar(0, num_vals - 1, "y") + z = cpsolver.IntVar(0, num_vals - 1, "z") + cpsolver.Add(x != y) + db = cpsolver.Phase( + [x, y, z], cpsolver.CHOOSE_FIRST_UNBOUND, cpsolver.ASSIGN_MIN_VALUE + ) + cpsolver.Solve(db) + print("Test cpsolver...DONE") def main(): - print(ortools.__version__) - lpsolver_test() - cpsolver_test() + print(ortools.__version__) + lpsolver_test() + cpsolver_test() -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() From dd9029afdb8045ac2a0605102d73bed9887059b1 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 08:30:06 +0200 Subject: [PATCH 089/509] tools/release: Workaround for macos x86_64 python build failure --- tools/release/build_delivery_macos.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index c3052fb932..977c27799f 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -277,7 +277,17 @@ function build_python() { echo "DONE" | tee -a build.log echo -n "Build Python ${PY_VERSION}..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" - cmake --build "temp_python${PY_VERSION}" -j8 -v + cmake --build "temp_python${PY_VERSION}" --target ortools -j8 -v + + if [[ ${PLATFORM} == "x86_64" ]]; then + # on macos X86_64 stubgen will timeout -> need to build 2 times + cmake --build "temp_python${PY_VERSION}" -j8 -v || true + sleep 5 + cmake --build "temp_python${PY_VERSION}" -j8 -v + else + cmake --build "temp_python${PY_VERSION}" -j8 -v + fi + echo " Check libortools.dylib..." | tee -a build.log otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" echo " DONE" | tee -a build.log From 9831cdb403925c91f3283467ed5cf0ba77bab7dc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:40:09 +0200 Subject: [PATCH 090/509] tools/release: add python build log --- tools/release/build_delivery_macos.sh | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 977c27799f..89f401031a 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -275,23 +275,35 @@ function build_python() { echo -n "Cleaning Python ${PY_VERSION}..." | tee -a build.log rm -rf "temp_python${PY_VERSION}" echo "DONE" | tee -a build.log - echo -n "Build Python ${PY_VERSION}..." | tee -a build.log + + echo "Build Python ${PY_VERSION}..." | tee -a build.log + echo -n " CMake configure..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" + echo "DONE" | tee -a build.log + + echo -n " Build libortools..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" --target ortools -j8 -v + echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "x86_64" ]]; then + echo -n " Build all..." | tee -a build.log # on macos X86_64 stubgen will timeout -> need to build 2 times cmake --build "temp_python${PY_VERSION}" -j8 -v || true + echo "DONE" | tee -a build.log sleep 5 + echo -n " ReBuild all..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" -j8 -v + echo "DONE" | tee -a build.log else + echo -n " Build all..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" -j8 -v + echo "DONE" | tee -a build.log fi - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log + echo "Build Python ${PY_VERSION}...DONE" | tee -a build.log #cmake --build temp_python${PY_VERSION} --target test #echo "cmake test_python${PY_VERSION}: DONE" | tee -a build.log From 2524635377ce58edab59616e15a949d3a50471b7 Mon Sep 17 00:00:00 2001 From: Florian OMNES Date: Mon, 16 Jun 2025 10:24:21 +0200 Subject: [PATCH 091/509] Fix bz2.dll install path for windows-cpp archive --- patches/bzip2.patch | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/patches/bzip2.patch b/patches/bzip2.patch index ace4852290..ee1caf8d53 100644 --- a/patches/bzip2.patch +++ b/patches/bzip2.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index c4b0b6e..30f7652 100644 +index c4b0b6e..ee39341 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,10 @@ @@ -24,7 +24,7 @@ index c4b0b6e..30f7652 100644 # Windows resource file set(BZ2_RES "") -@@ -299,21 +304,30 @@ endif() +@@ -299,21 +304,32 @@ endif() if(ENABLE_SHARED_LIB) # The libbz2 shared library. @@ -59,13 +59,15 @@ index c4b0b6e..30f7652 100644 + ) + install(TARGETS BZip2 + EXPORT ${PROJECT_NAME}Targets -+ DESTINATION ${CMAKE_INSTALL_LIBDIR}) ++ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} # For Windows DLLs and executables ++ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} # For shared libraries on UNIX ++ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) # For static libs or import libs install(FILES bzlib.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + add_library(BZip2::BZip2 ALIAS BZip2) if(USE_OLD_SONAME) # Hack to support the old libbz2.so.1.0 version by including an extra copy. -@@ -323,16 +337,22 @@ if(ENABLE_SHARED_LIB) +@@ -323,16 +339,22 @@ if(ENABLE_SHARED_LIB) add_library(bz2_old_soname SHARED ${BZ2_RES}) target_sources(bz2_old_soname PRIVATE ${BZ2_SOURCES} @@ -92,7 +94,7 @@ index c4b0b6e..30f7652 100644 endif() endif() endif() -@@ -341,9 +361,13 @@ if(ENABLE_STATIC_LIB) +@@ -341,9 +363,13 @@ if(ENABLE_STATIC_LIB) # The libbz2 static library. add_library(bz2_static STATIC) target_sources(bz2_static @@ -109,7 +111,7 @@ index c4b0b6e..30f7652 100644 # Use '-fPIC'/'-fPIE' option for static libraries by default. # You may build with ENABLE_STATIC_LIB_IS_PIC=OFF to disable PIC for the static library. -@@ -357,8 +381,13 @@ if(ENABLE_STATIC_LIB) +@@ -357,8 +383,13 @@ if(ENABLE_STATIC_LIB) SOVERSION ${LT_SOVERSION} ARCHIVE_OUTPUT_NAME bz2_static) target_compile_definitions(bz2_static PUBLIC BZ2_STATICLIB) @@ -124,7 +126,7 @@ index c4b0b6e..30f7652 100644 endif() if(ENABLE_APP) -@@ -373,7 +402,9 @@ if(ENABLE_APP) +@@ -373,7 +404,9 @@ if(ENABLE_APP) else() target_compile_definitions(bzip2 PUBLIC BZ_LCCWIN32=0 BZ_UNIX) endif() @@ -135,7 +137,7 @@ index c4b0b6e..30f7652 100644 # Create bzip2 copies bzcat and bunzip. # The default behavior is altered in bzip2.c code by checking the program name. -@@ -391,7 +422,9 @@ if(ENABLE_APP) +@@ -391,7 +424,9 @@ if(ENABLE_APP) else() target_compile_definitions(bzip2recover PUBLIC BZ_LCCWIN32=0 BZ_UNIX) endif() @@ -146,7 +148,7 @@ index c4b0b6e..30f7652 100644 if(ENABLE_EXAMPLES) if(ENABLE_SHARED_LIB) -@@ -399,8 +432,10 @@ if(ENABLE_APP) +@@ -399,8 +434,10 @@ if(ENABLE_APP) add_executable(dlltest) target_sources(dlltest PRIVATE dlltest.c) @@ -159,7 +161,7 @@ index c4b0b6e..30f7652 100644 endif() endif() -@@ -419,6 +454,10 @@ if(ENABLE_APP) +@@ -419,6 +456,10 @@ if(ENABLE_APP) endif() From 35ee27b271f7f1d16f56524d4c37b7e65ccdc544 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 14:54:04 +0200 Subject: [PATCH 092/509] graph: export from google3 dump_vars: Add support for StrongInt and StrongVector --- ortools/base/BUILD.bazel | 4 + ortools/base/dump_vars.h | 11 + ortools/base/dump_vars_test.cc | 18 + ortools/graph/BUILD.bazel | 7 +- ortools/graph/bounded_dijkstra.h | 252 ++++---- ortools/graph/bounded_dijkstra_test.cc | 542 ++++++++++-------- ortools/graph/graph.h | 50 +- ortools/graph/graph_io.h | 4 +- ortools/graph/graph_test.cc | 240 +++++--- .../assignment_linear_sum_assignment.py | 1 + ortools/graph/samples/assignment_min_flow.py | 1 + ortools/graph/samples/balance_min_flow.py | 1 + ortools/graph/samples/dijkstra_directed.cc | 4 +- ortools/graph/samples/dijkstra_undirected.cc | 4 +- .../graph/samples/simple_max_flow_program.py | 1 + .../samples/simple_min_cost_flow_program.py | 1 + 16 files changed, 674 insertions(+), 467 deletions(-) diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index 10318be069..e86da36068 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -183,6 +183,8 @@ cc_library( "//conditions:default": [], }), deps = [ + ":strong_int", + ":strong_vector", "@abseil-cpp//absl/container:inlined_vector", ], ) @@ -199,6 +201,8 @@ cc_test( }), deps = [ ":dump_vars", + ":strong_int", + ":strong_vector", "@abseil-cpp//absl/strings", "@googletest//:gtest_main", ], diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h index 8413948cd3..61e6073084 100644 --- a/ortools/base/dump_vars.h +++ b/ortools/base/dump_vars.h @@ -48,6 +48,8 @@ #include #include "absl/container/inlined_vector.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" /* need extra level to force extra eval */ #define DUMP_FOR_EACH_N0(F) @@ -138,6 +140,15 @@ std::ostream& operator<<(std::ostream& os, const ::std::optional& opt) { return os; } +// needed by graph tests +template +std::ostream& operator<<(std::ostream& os, const ::util_intops::StrongVector& vec) { + for (U it : vec) { + os << ::std::to_string(it) << ','; + } + return os; +} + using DumpNames = ::std::vector<::std::string>; struct print_fields { diff --git a/ortools/base/dump_vars_test.cc b/ortools/base/dump_vars_test.cc index 1a295f386a..2dccc6381d 100644 --- a/ortools/base/dump_vars_test.cc +++ b/ortools/base/dump_vars_test.cc @@ -21,6 +21,12 @@ #include #include "gtest/gtest.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" + +namespace util_intops { +DEFINE_STRONG_INT_TYPE(CustomStrongInt, uint32_t); +} // namespace util_intops namespace operations_research::base { namespace { @@ -124,6 +130,18 @@ TEST(DumpVars, Vector) { EXPECT_EQ("vec = 49.299999,3.140000,", DUMP_VARS(vec).str()); } +TEST(DumpVars, StrongInt) { + ::util_intops::CustomStrongInt val(42); + EXPECT_EQ(R"(val = 42)", ToString(DUMP_VARS(val))); + EXPECT_EQ(R"(val = 42)", DUMP_VARS(val).str()); +} + +TEST(DumpVars, StrongVector) { + ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = {49.3, 3.14}; + EXPECT_EQ(R"(vec = 49.299999,3.140000,)", ToString(DUMP_VARS(vec))); + EXPECT_EQ(R"(vec = 49.299999,3.140000,)", DUMP_VARS(vec).str()); +} + TEST(DumpVars, Optional) { std::optional of = {}; EXPECT_EQ("of = (none)", ToString(DUMP_VARS(of))); diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index d8d0a5c07d..fac523588a 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -52,6 +52,8 @@ cc_test( ":graph", "//ortools/base:gmock_main", "//ortools/base:intops", + "//ortools/base:strong_vector", + "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", @@ -86,7 +88,9 @@ cc_library( hdrs = ["bounded_dijkstra.h"], deps = [ ":graph", + "//ortools/base:intops", "//ortools/base:iterator_adaptors", + "//ortools/base:strong_vector", "//ortools/base:threadpool", "//ortools/base:top_n", "@abseil-cpp//absl/algorithm:container", @@ -107,6 +111,7 @@ cc_test( ":test_util", "//ortools/base:dump_vars", "//ortools/base:gmock_main", + "//ortools/base:intops", "//ortools/util:flat_matrix", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", @@ -858,7 +863,7 @@ cc_test( deps = [ ":iterators", "//ortools/base:gmock_main", - "//ortools/base:strong_int", + "//ortools/base:intops", ], ) diff --git a/ortools/graph/bounded_dijkstra.h b/ortools/graph/bounded_dijkstra.h index e4522e5d90..98a7fc7e5f 100644 --- a/ortools/graph/bounded_dijkstra.h +++ b/ortools/graph/bounded_dijkstra.h @@ -15,8 +15,10 @@ #define OR_TOOLS_GRAPH_BOUNDED_DIJKSTRA_H_ #include +#include #include #include +#include #include #include @@ -25,6 +27,8 @@ #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/iterator_adaptors.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" #include "ortools/base/top_n.h" #include "ortools/graph/graph.h" @@ -54,22 +58,40 @@ namespace operations_research { // is >= limit we will return {limit, {}}. As a consequence any arc length >= // limit is the same as no arc. The code is also overflow-safe and will behave // correctly if the limit is int64max or infinity. -template -std::pair> SimpleOneToOneShortestPath( - int source, int destination, absl::Span tails, - absl::Span heads, absl::Span lengths, +template +std::pair> SimpleOneToOneShortestPath( + NodeIndex source, NodeIndex destination, absl::Span tails, + absl::Span heads, absl::Span lengths, DistanceType limit = std::numeric_limits::max()); -template +namespace internal { + +// TODO(user): We should move `is_strong_int` to util/intops/strong_int.h. +template +struct is_strong_int : std::false_type {}; + +template +struct is_strong_int<::util_intops::StrongInt> + : std::true_type {}; + +template +using IndexedVector = + std::conditional_t::value, + ::util_intops::StrongVector, + std::vector>; + +template class ElementGetter { public: - explicit ElementGetter(const std::vector& c) : c_(c) {} - const T& operator()(int index) const { return c_[index]; } + explicit ElementGetter(const IndexedVector& c) : c_(c) {} + const T& operator()(ArcIndex index) const { return c_[index]; } private: - const std::vector& c_; + const IndexedVector& c_; }; +} // namespace internal + // A wrapper that holds the memory needed to run many bounded shortest path // computations on the given graph. The graph must implement the // interface described in graph.h (without the need for reverse arcs). @@ -92,12 +114,20 @@ class ElementGetter { // negative source_offset, arc with a length greater than the distance_limit can // still be considered! template > + class ArcLengthFunctor = internal::ElementGetter< + DistanceType, typename GraphType::ArcIndex>> class BoundedDijkstraWrapper { public: - typedef typename GraphType::NodeIndex node_type; + typedef typename GraphType::NodeIndex NodeIndex; + typedef typename GraphType::ArcIndex ArcIndex; typedef DistanceType distance_type; + // A vector of T, indexed by NodeIndex/ArcIndex. + template + using ByNode = internal::IndexedVector; + template + using ByArc = internal::IndexedVector; + // IMPORTANT: Both arguments must outlive the class. The arc lengths cannot be // negative and the vector must be of the correct size (both preconditions are // CHECKed). @@ -106,7 +136,7 @@ class BoundedDijkstraWrapper { // RunBoundedDijkstra(). That's fine. Doing so will obviously invalidate the // reader API of the last Dijkstra run, which could return junk, or crash. BoundedDijkstraWrapper(const GraphType* graph, - const std::vector* arc_lengths); + const ByArc* arc_lengths); // Variant that takes a custom arc length functor and copies it locally. BoundedDijkstraWrapper(const GraphType* graph, @@ -116,8 +146,8 @@ class BoundedDijkstraWrapper { // of the graph within the distance limit (exclusive). The first element of // the returned vector will always be the source_node with a distance of zero. // See RunBoundedDijkstraFromMultipleSources() for more information. - const std::vector& RunBoundedDijkstra(int source_node, - DistanceType distance_limit) { + const std::vector& RunBoundedDijkstra( + NodeIndex source_node, DistanceType distance_limit) { return RunBoundedDijkstraFromMultipleSources({{source_node, 0}}, distance_limit); } @@ -127,7 +157,8 @@ class BoundedDijkstraWrapper { // // If this returns true, you can get the path distance with distances()[to] // and the path with ArcPathTo(to) or NodePathTo(to). - bool OneToOneShortestPath(int from, int to, DistanceType distance_limit); + bool OneToOneShortestPath(NodeIndex from, NodeIndex to, + DistanceType distance_limit); // Returns the list of all the nodes which are under the given distance limit // (exclusive) from at least one of the given source nodes (which also have @@ -136,8 +167,8 @@ class BoundedDijkstraWrapper { // By "distance", we mean the length of the shortest path from any source // plus the source's distance offset, where the length of a path is the // sum of the length of its arcs - const std::vector& RunBoundedDijkstraFromMultipleSources( - const std::vector>& + const std::vector& RunBoundedDijkstraFromMultipleSources( + const std::vector>& sources_with_distance_offsets, DistanceType distance_limit); @@ -162,10 +193,11 @@ class BoundedDijkstraWrapper { // // Note that the distances() will take the source offsets into account, // but not the destination offsets. - std::vector RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( - const std::vector>& + std::vector + RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( + const std::vector>& sources_with_distance_offsets, - const std::vector>& + const std::vector>& destinations_with_distance_offsets, int num_destinations_to_reach, DistanceType distance_limit); @@ -174,19 +206,19 @@ class BoundedDijkstraWrapper { // happens at most once per node, when popping it from the Dijkstra queue, // meaning that the node has been fully 'processed'). This callback may modify // the distance limit dynamically, thus affecting the stopping criterion. - const std::vector& RunBoundedDijkstraWithSettledNodeCallback( - const std::vector>& + const std::vector& RunBoundedDijkstraWithSettledNodeCallback( + const std::vector>& sources_with_distance_offsets, - std::function settled_node_callback, DistanceType distance_limit); // Returns true if `node` was reached by the last Run*() call. - bool IsReachable(int node) const { return is_reached_[node]; } + bool IsReachable(NodeIndex node) const { return is_reached_[node]; } // Returns all the reached nodes form the previous Run*() call. - const std::vector& reached_nodes() const { return reached_nodes_; } + const ByNode& reached_nodes() const { return reached_nodes_; } // The following vectors are all indexed by graph node indices. // @@ -194,7 +226,7 @@ class BoundedDijkstraWrapper { // reached nodes are updated, the others will contain junk. // The distance of the nodes from their source. - const std::vector& distances() const { return distances_; } + const ByNode& distances() const { return distances_; } // The parent of the nodes in the shortest path from their source. // When a node doesn't have any parent (it has to be a source), its parent @@ -203,27 +235,29 @@ class BoundedDijkstraWrapper { // arcs have a length of zero. // Note also that some sources may have parents, because of the initial // distances. - const std::vector& parents() const { return parents_; } + const ByNode& parents() const { return parents_; } // The arc reaching a given node in the path from their source. // arc_from_source()[x] is undefined (i.e. junk) when parents()[x] == x. - const std::vector& arc_from_source() const { return arc_from_source_; } + const ByNode& arc_from_source() const { return arc_from_source_; } // Returns the list of all the arcs in the shortest path from the node's // source to the node. - std::vector ArcPathTo(int node) const; + std::vector ArcPathTo(NodeIndex node) const; ABSL_DEPRECATED("Use ArcPathTo() instead.") - std::vector ArcPathToNode(int node) const { return ArcPathTo(node); } + std::vector ArcPathToNode(NodeIndex node) const { + return ArcPathTo(node); + } // Returns the list of all the nodes in the shortest path from the node's // source to the node. This always start by the node's source, and end by // the given node. In the case that source == node, returns {node}. - std::vector NodePathTo(int node) const; + std::vector NodePathTo(NodeIndex node) const; // Returns the node's source. This is especially useful when running // Dijkstras from multiple sources. - int SourceOfShortestPathToNode(int node) const; + NodeIndex SourceOfShortestPathToNode(NodeIndex node) const; // Original Source/Destination index extraction, after a call to the // multi-source and/or multi-destination variants: @@ -239,16 +273,16 @@ class BoundedDijkstraWrapper { // rely on the value. // // These methods are invalidated by the next RunBoundedDijkstra*() call. - int GetSourceIndex(int node) const; - int GetDestinationIndex(int node) const; + int GetSourceIndex(NodeIndex node) const; + int GetDestinationIndex(NodeIndex node) const; // Trivial accessors to the underlying graph and arc lengths. const GraphType& graph() const { return *graph_; } - const std::vector& arc_lengths() const { + const ByArc& arc_lengths() const { CHECK(arc_lengths_); return *arc_lengths_; } - DistanceType GetArcLength(int arc) const { + DistanceType GetArcLength(ArcIndex arc) const { const DistanceType length = arc_length_functor_(arc); DCHECK_GE(length, 0); return length; @@ -262,18 +296,18 @@ class BoundedDijkstraWrapper { // The Graph and length of each arc. const GraphType* const graph_; ArcLengthFunctor arc_length_functor_; - const std::vector* const arc_lengths_; + const ByArc* const arc_lengths_; // Data about the last Dijkstra run. - std::vector distances_; - std::vector parents_; - std::vector arc_from_source_; - std::vector is_reached_; - std::vector reached_nodes_; + ByNode distances_; + ByNode parents_; + ByNode arc_from_source_; + ByNode is_reached_; + std::vector reached_nodes_; // Priority queue of nodes, ordered by their distance to the source. struct NodeDistance { - node_type node; // The target node. + NodeIndex node; // The target node. DistanceType distance; // Its distance from the source. bool operator<(const NodeDistance& other) const { @@ -287,7 +321,7 @@ class BoundedDijkstraWrapper { // or ieee754 floating-point, when the machine is little endian, and // when the total size of NodeDistance equals 16 bytes). // And here are the speeds of the BM_GridGraph benchmark (in which - // DistanceType=int64_t and node_type=int32_t), done with benchy + // DistanceType=int64_t and NodeIndex=int32_t), done with benchy // --runs=20: 0) BM_GridGraph 9.22ms ± 5% BM_GridGraph 3.19ms // ± 6% 1) BM_GridGraph 8.89ms ± 4% BM_GridGraph 3.07ms ± // 3% 2) BM_GridGraph 8.61ms ± 3% BM_GridGraph 3.13ms ± 6% @@ -303,8 +337,8 @@ class BoundedDijkstraWrapper { // The vectors are only allocated after they are first used. // Between calls, is_destination_ is all false, and the rest is junk. std::vector is_destination_; - std::vector node_to_source_index_; - std::vector node_to_destination_index_; + ByNode node_to_source_index_; + ByNode node_to_destination_index_; }; // ----------------------------------------------------------------------------- @@ -314,12 +348,12 @@ class BoundedDijkstraWrapper { template BoundedDijkstraWrapper:: BoundedDijkstraWrapper(const GraphType* graph, - const std::vector* arc_lengths) + const ByArc* arc_lengths) : graph_(graph), arc_length_functor_(*arc_lengths), arc_lengths_(arc_lengths) { CHECK(arc_lengths_ != nullptr); - CHECK_EQ(arc_lengths_->size(), graph->num_arcs()); + CHECK_EQ(ArcIndex(arc_lengths_->size()), graph->num_arcs()); for (const DistanceType length : *arc_lengths) { CHECK_GE(length, 0); } @@ -341,10 +375,10 @@ BoundedDijkstraWrapper:: arc_lengths_(other.arc_lengths_) {} template -const std::vector& +const std::vector& BoundedDijkstraWrapper:: RunBoundedDijkstraFromMultipleSources( - const std::vector>& + const std::vector>& sources_with_distance_offsets, DistanceType distance_limit) { return RunBoundedDijkstraWithSettledNodeCallback( @@ -352,12 +386,12 @@ BoundedDijkstraWrapper:: } template -std::vector +std::vector BoundedDijkstraWrapper:: RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( - const std::vector>& + const std::vector>& sources_with_distance_offsets, - const std::vector>& + const std::vector>& destinations_with_distance_offsets, int num_destinations_to_reach, DistanceType distance_limit) { if (destinations_with_distance_offsets.empty()) return {}; @@ -368,22 +402,22 @@ BoundedDijkstraWrapper:: // to reduce the search space. DCHECK_GE(num_destinations_to_reach, 0); int num_destinations = 0; - is_destination_.resize(graph_->num_nodes(), false); + is_destination_.resize(static_cast(graph_->num_nodes()), false); node_to_destination_index_.resize(graph_->num_nodes(), -1); DistanceType min_destination_distance_offset = destinations_with_distance_offsets[0].second; for (int i = 0; i < destinations_with_distance_offsets.size(); ++i) { - const int node = destinations_with_distance_offsets[i].first; + const NodeIndex node = destinations_with_distance_offsets[i].first; const DistanceType distance = destinations_with_distance_offsets[i].second; - if (!is_destination_[node]) ++num_destinations; + if (!is_destination_[static_cast(node)]) ++num_destinations; // Skip useless repetitions. - if (is_destination_[node] && + if (is_destination_[static_cast(node)] && distance >= destinations_with_distance_offsets[node_to_destination_index_[node]] .second) { continue; } - is_destination_[node] = true; + is_destination_[static_cast(node)] = true; node_to_destination_index_[node] = i; min_destination_distance_offset = std::min(min_destination_distance_offset, distance); @@ -395,13 +429,13 @@ BoundedDijkstraWrapper:: gtl::TopN> closest_destinations( /*limit=*/num_destinations_to_reach); - std::function + std::function settled_node_callback = [this, num_destinations_to_reach, min_destination_distance_offset, &destinations_with_distance_offsets, &closest_destinations]( - node_type settled_node, DistanceType settled_distance, + NodeIndex settled_node, DistanceType settled_distance, DistanceType* distance_limit) { - if (!is_destination_[settled_node]) return; + if (!is_destination_[static_cast(settled_node)]) return; const DistanceType distance = settled_distance + destinations_with_distance_offsets @@ -423,12 +457,12 @@ BoundedDijkstraWrapper:: // Clean up, sparsely, for the next call. for (const auto& [node, _] : destinations_with_distance_offsets) { - is_destination_[node] = false; + is_destination_[static_cast(node)] = false; } // Return the closest "num_destinations_to_reach" reached destinations, // sorted by distance. - std::vector sorted_destinations; + std::vector sorted_destinations; sorted_destinations.reserve(closest_destinations.size()); for (const NodeDistance& d : closest_destinations.Take()) { sorted_destinations.push_back(d.node); @@ -438,10 +472,11 @@ BoundedDijkstraWrapper:: template bool BoundedDijkstraWrapper:: - OneToOneShortestPath(int from, int to, DistanceType distance_limit) { + OneToOneShortestPath(NodeIndex from, NodeIndex to, + DistanceType distance_limit) { bool reached = false; - std::function - settled_node_callback = [to, &reached](node_type node, + std::function + settled_node_callback = [to, &reached](NodeIndex node, DistanceType distance, DistanceType* distance_limit) { if (node != to) return; @@ -456,18 +491,18 @@ bool BoundedDijkstraWrapper:: } template -const std::vector& +const std::vector& BoundedDijkstraWrapper:: RunBoundedDijkstraWithSettledNodeCallback( - const std::vector>& + const std::vector>& sources_with_distance_offsets, - std::function settled_node_callback, DistanceType distance_limit) { // Sparse clear is_reached_ from the last call. - for (const int node : reached_nodes_) { + for (const NodeIndex node : reached_nodes_) { is_reached_[node] = false; } reached_nodes_.clear(); @@ -475,15 +510,15 @@ BoundedDijkstraWrapper:: is_reached_.resize(graph_->num_nodes(), false); distances_.resize(graph_->num_nodes(), distance_limit); - parents_.resize(graph_->num_nodes(), std::numeric_limits::min()); - arc_from_source_.resize(graph_->num_nodes(), -1); + parents_.resize(graph_->num_nodes(), std::numeric_limits::min()); + arc_from_source_.resize(graph_->num_nodes(), GraphType::kNilArc); // Initialize sources. CHECK(queue_.empty()); node_to_source_index_.resize(graph_->num_nodes(), -1); for (int i = 0; i < sources_with_distance_offsets.size(); ++i) { - const int node = sources_with_distance_offsets[i].first; - DCHECK_GE(node, 0); + const NodeIndex node = sources_with_distance_offsets[i].first; + DCHECK_GE(node, NodeIndex(0)); DCHECK_LT(node, graph_->num_nodes()); const DistanceType distance = sources_with_distance_offsets[i].second; // Sources with an initial distance ≥ limit are *not* reached. @@ -498,7 +533,7 @@ BoundedDijkstraWrapper:: node_to_source_index_[node] = i; distances_[node] = distance; } - for (const int source : reached_nodes_) { + for (const NodeIndex source : reached_nodes_) { queue_.push_back({source, distances_[source]}); } std::make_heap(queue_.begin(), queue_.end(), std::greater()); @@ -533,7 +568,8 @@ BoundedDijkstraWrapper:: // Visit the neighbors. const DistanceType limit = distance_limit - top.distance; - for (const int arc : graph_->OutgoingArcs(top.node)) { + for (const typename GraphType::ArcIndex arc : + graph_->OutgoingArcs(top.node)) { // Overflow-safe check of top.distance + arc_length >= distance_limit. // This works since we know top.distance < distance_limit, as long as we // don't have negative top.distance (which might happen with negative @@ -543,7 +579,7 @@ BoundedDijkstraWrapper:: if (arc_length >= limit) continue; const DistanceType candidate_distance = top.distance + arc_length; - const int head = graph_->Head(arc); + const NodeIndex head = graph_->Head(arc); if (is_reached_[head]) { if (candidate_distance >= distances_[head]) continue; } else { @@ -563,14 +599,14 @@ BoundedDijkstraWrapper:: } template -std::vector +std::vector BoundedDijkstraWrapper::ArcPathTo( - int node) const { - std::vector output; + NodeIndex node) const { + std::vector output; int loop_detector = 0; while (true) { - DCHECK_GE(node, 0); - DCHECK_LT(node, parents_.size()); + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(parents_.size())); CHECK_LT(loop_detector++, parents_.size()); if (parents_[node] == node) break; output.push_back(arc_from_source_[node]); @@ -581,14 +617,14 @@ BoundedDijkstraWrapper::ArcPathTo( } template -std::vector +std::vector BoundedDijkstraWrapper::NodePathTo( - int node) const { - std::vector output; + NodeIndex node) const { + std::vector output; int loop_detector = 0; while (true) { - DCHECK_GE(node, 0); - DCHECK_LT(node, parents_.size()); + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(parents_.size())); CHECK_LT(loop_detector++, parents_.size()); output.push_back(node); if (parents_[node] == node) break; @@ -599,27 +635,28 @@ BoundedDijkstraWrapper::NodePathTo( } template -int BoundedDijkstraWrapper:: - SourceOfShortestPathToNode(int node) const { - int parent = node; +typename GraphType::NodeIndex BoundedDijkstraWrapper< + GraphType, DistanceType, + ArcLengthFunctor>::SourceOfShortestPathToNode(NodeIndex node) const { + NodeIndex parent = node; while (parents_[parent] != parent) parent = parents_[parent]; return parent; } template int BoundedDijkstraWrapper::GetSourceIndex(int node) const { - DCHECK_GE(node, 0); - DCHECK_LT(node, node_to_source_index_.size()); + ArcLengthFunctor>::GetSourceIndex(NodeIndex node) + const { + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(node_to_source_index_.size())); return node_to_source_index_[node]; } template -int BoundedDijkstraWrapper::GetDestinationIndex(int node) - const { - DCHECK_GE(node, 0); - DCHECK_LT(node, node_to_destination_index_.size()); +int BoundedDijkstraWrapper:: + GetDestinationIndex(NodeIndex node) const { + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(node_to_destination_index_.size())); return node_to_destination_index_[node]; } @@ -627,37 +664,38 @@ int BoundedDijkstraWrapper -std::pair> SimpleOneToOneShortestPath( - int source, int destination, absl::Span tails, - absl::Span heads, absl::Span lengths, +template +std::pair> SimpleOneToOneShortestPath( + NodeIndex source, NodeIndex destination, absl::Span tails, + absl::Span heads, absl::Span lengths, DistanceType limit) { + using ArcIndex = NodeIndex; // Compute the number of nodes. // // This is not necessary, but is a good practice to allocate the graph size in // one go. We also do some basic validation. CHECK_GE(source, 0); CHECK_GE(destination, 0); - int num_nodes = std::max(source + 1, destination + 1); - for (const int tail : tails) { + NodeIndex num_nodes = std::max(source + 1, destination + 1); + for (const NodeIndex tail : tails) { CHECK_GE(tail, 0); num_nodes = std::max(tail + 1, num_nodes); } - for (const int head : heads) { + for (const NodeIndex head : heads) { CHECK_GE(head, 0); num_nodes = std::max(head + 1, num_nodes); } // The number of arcs. - const int num_arcs = tails.size(); + const ArcIndex num_arcs = tails.size(); CHECK_EQ(num_arcs, heads.size()); CHECK_EQ(num_arcs, lengths.size()); // Build the graph. Note that this permutes arc indices for speed, but we // don't care here since we will return a node path. - util::StaticGraph<> graph(num_nodes, num_arcs); + util::StaticGraph graph(num_nodes, num_arcs); std::vector arc_lengths(lengths.begin(), lengths.end()); - for (int a = 0; a < num_arcs; ++a) { + for (ArcIndex a = 0; a < num_arcs; ++a) { // Negative length can cause the algo to loop forever and/or use a lot of // memory. So it should be validated. CHECK_GE(lengths[a], 0); diff --git a/ortools/graph/bounded_dijkstra_test.cc b/ortools/graph/bounded_dijkstra_test.cc index 07a21f5a8d..a5f256cce1 100644 --- a/ortools/graph/bounded_dijkstra_test.cc +++ b/ortools/graph/bounded_dijkstra_test.cc @@ -30,6 +30,7 @@ #include "gtest/gtest.h" #include "ortools/base/dump_vars.h" #include "ortools/base/gmock.h" +#include "ortools/base/strong_int.h" #include "ortools/graph/graph.h" #include "ortools/graph/graph_io.h" #include "ortools/graph/test_util.h" @@ -45,122 +46,140 @@ using ::testing::Pair; using ::testing::UnorderedElementsAreArray; using ::util::ListGraph; +DEFINE_STRONG_INT_TYPE(NodeIndex, int32_t); +DEFINE_STRONG_INT_TYPE(ArcIndex, int64_t); + +using TestGraph = ListGraph; +template +using DijkstraWrapper = BoundedDijkstraWrapper; + TEST(BoundedDijkstraWrapperDeathTest, Accessors) { - ListGraph<> graph; - graph.AddArc(1, 3); - std::vector arc_lengths = {2.5}; - BoundedDijkstraWrapper, float> dijkstra(&graph, &arc_lengths); + TestGraph graph; + graph.AddArc(NodeIndex(1), NodeIndex(3)); + DijkstraWrapper::ByArc arc_lengths = {2.5}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); const std::is_same same_type; ASSERT_TRUE(same_type.value); ASSERT_EQ(&dijkstra.graph(), &graph); - ASSERT_EQ(dijkstra.GetArcLength(0), 2.5); + ASSERT_EQ(dijkstra.GetArcLength(ArcIndex(0)), 2.5); } TEST(BoundedDijkstraWrapperDeathTest, WithArcLengthFunctor) { - ListGraph<> graph; - graph.AddArc(1, 3); - BoundedDijkstraWrapper, float, std::function> - dijkstra(&graph, [](int) { return 2.34; }); - ASSERT_FLOAT_EQ(dijkstra.GetArcLength(0), 2.34f); + TestGraph graph; + graph.AddArc(NodeIndex(1), NodeIndex(3)); + BoundedDijkstraWrapper> + dijkstra(&graph, [](ArcIndex) { return 2.34; }); + ASSERT_FLOAT_EQ(dijkstra.GetArcLength(ArcIndex(0)), 2.34f); } TEST(BoundedDijkstraWrapperDeathTest, ConstructorPreconditions) { - ListGraph<> graph; - for (int i = 0; i < 50; ++i) graph.AddArc(i, i + 1); + TestGraph graph; + for (int i = 0; i < 50; ++i) graph.AddArc(NodeIndex(i), NodeIndex(i + 1)); - std::vector arc_lengths(13, 0); - typedef BoundedDijkstraWrapper, int> TestedClass; + typedef DijkstraWrapper TestedClass; + TestedClass::ByArc arc_lengths(13, 0); EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "13"); arc_lengths.resize(50, 0); - arc_lengths[20] = -132; + arc_lengths[ArcIndex(20)] = -132; EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "-132"); } TEST(BoundedDijkstraWrapper, ArcPathToAndSourceOfShortestPathToNode) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2, 3, 4, 6, 5}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(1, 2); - graph.AddArc(2, 3); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2, 3, 4, 6, 5}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 10); - EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(9, dijkstra.distances()[3]); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(0, 2, 5)); - EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 10); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), + NodeIndex(3))); + EXPECT_EQ(9, dijkstra.distances()[NodeIndex(3)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(0), ArcIndex(2), ArcIndex(5))); + EXPECT_THAT( + dijkstra.NodePathTo(NodeIndex(3)), + ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); } TEST(BoundedDijkstraWrapper, EmptyPath) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2}; - graph.AddArc(0, 1); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 10); - EXPECT_THAT(reached, ElementsAre(0, 1)); + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 10); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1))); - EXPECT_EQ(0, dijkstra.distances()[0]); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre()); - EXPECT_THAT(dijkstra.NodePathTo(0), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0)); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(0)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre()); + EXPECT_THAT(dijkstra.NodePathTo(NodeIndex(0)), ElementsAre(NodeIndex(0))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); } TEST(BoundedDijkstraWrapper, OverflowSafe) { - ListGraph<> graph; + TestGraph graph; const int64_t int_max = std::numeric_limits::max(); - std::vector arc_lengths = {int_max, int_max / 2, int_max / 2, 1}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(2, 3); + DijkstraWrapper::ByArc arc_lengths = {int_max, int_max / 2, + int_max / 2, 1}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int64_t> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, int_max); + BoundedDijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), int_max); // This works because int_max is odd, i.e. 2 * (int_max / 2) = int_max - 1 - EXPECT_THAT(reached, ElementsAre(0, 1, 2)); - EXPECT_EQ(0, dijkstra.distances()[0]); - EXPECT_EQ(int_max / 2, dijkstra.distances()[1]); - EXPECT_EQ(int_max - 1, dijkstra.distances()[2]); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2))); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(0)]); + EXPECT_EQ(int_max / 2, dijkstra.distances()[NodeIndex(1)]); + EXPECT_EQ(int_max - 1, dijkstra.distances()[NodeIndex(2)]); } TEST(BoundedDijkstraWrapper, ArcPathToAndSourceOfShortestPathToNode_WithArcLengthFunction) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2, 3, 4, 6, 5}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(1, 2); - graph.AddArc(2, 3); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2, 3, 4, 6, 5}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); class MyArcLengthFunctor { public: - explicit MyArcLengthFunctor(const std::vector& arc_lengths) + explicit MyArcLengthFunctor( + const DijkstraWrapper::ByArc& arc_lengths) : arc_lengths_(arc_lengths) {} - int operator()(int arc) const { - return arc % 2 == 1 ? arc_lengths_[arc] : 100; + + int operator()(ArcIndex arc) const { + return arc.value() % 2 == 1 ? arc_lengths_[arc] : 100; } private: - const std::vector& arc_lengths_; + const DijkstraWrapper::ByArc& arc_lengths_; }; - BoundedDijkstraWrapper, int, MyArcLengthFunctor> dijkstra( + BoundedDijkstraWrapper dijkstra( &graph, MyArcLengthFunctor(arc_lengths)); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 20); - EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(11, dijkstra.distances()[3]); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(1, 3, 5)); - EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 20); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), + NodeIndex(3))); + EXPECT_EQ(11, dijkstra.distances()[NodeIndex(3)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(1), ArcIndex(3), ArcIndex(5))); + EXPECT_THAT( + dijkstra.NodePathTo(NodeIndex(3)), + ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); } TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { @@ -168,12 +187,12 @@ TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { const int num_nodes = 50; std::vector> lengths(num_nodes, std::vector(num_nodes)); - ListGraph<> graph; - std::vector arc_lengths; + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; for (int i = 0; i < num_nodes; ++i) { for (int j = 0; j < num_nodes; ++j) { lengths[i][j] = (i == j) ? 0 : absl::Uniform(random, 0, 1000); - graph.AddArc(i, j); + graph.AddArc(NodeIndex(i), NodeIndex(j)); arc_lengths.push_back(lengths[i][j]); } } @@ -191,15 +210,15 @@ TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { std::vector reached_sizes; for (int source = 0; source < num_nodes; ++source) { const int limit = 100; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(source, limit); - for (const int node : reached) { + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(source), limit); + for (const NodeIndex node : reached) { EXPECT_LT(dijkstra.distances()[node], limit); - EXPECT_EQ(dijkstra.distances()[node], lengths[source][node]); + EXPECT_EQ(dijkstra.distances()[node], lengths[source][node.value()]); // Check that we never have the same node twice in the paths. - std::vector path = {node}; - int parent = node; + std::vector path = {node}; + NodeIndex parent = node; while (dijkstra.parents()[parent] != parent) { parent = dijkstra.parents()[parent]; path.push_back(parent); @@ -230,7 +249,7 @@ TEST(SimpleOneToOneShortestPathTest, PathTooLong) { { const auto [distance, path] = - SimpleOneToOneShortestPath(0, 3, tails, heads, lengths); + SimpleOneToOneShortestPath(0, 3, tails, heads, lengths); EXPECT_EQ(distance, std::numeric_limits::max()); EXPECT_TRUE(path.empty()); } @@ -238,7 +257,7 @@ TEST(SimpleOneToOneShortestPathTest, PathTooLong) { { // from 0 to 2 work because 2 * big_length < int_max. const auto [distance, path] = - SimpleOneToOneShortestPath(0, 2, tails, heads, lengths); + SimpleOneToOneShortestPath(0, 2, tails, heads, lengths); EXPECT_EQ(distance, std::numeric_limits::max() - 1); EXPECT_THAT(path, ElementsAre(0, 1, 2)); } @@ -256,7 +275,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // This will be the "sparse" representation. std::vector tails; std::vector heads; - std::vector arc_lengths; + DijkstraWrapper::ByArc arc_lengths; // We permutes the arc order to properly test that it do not matter. std::vector nodes(num_nodes); @@ -292,8 +311,8 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // No limit. There should always be a path with our generated data. { - const auto [distance, path] = - SimpleOneToOneShortestPath(from, to, tails, heads, arc_lengths); + const auto [distance, path] = SimpleOneToOneShortestPath( + from, to, tails, heads, arc_lengths); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_FALSE(path.empty()); EXPECT_EQ(path.front(), from); @@ -302,7 +321,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // A limit of shortest_distance[from][to] + 1 works too. { - const auto [distance, path] = SimpleOneToOneShortestPath( + const auto [distance, path] = SimpleOneToOneShortestPath( from, to, tails, heads, arc_lengths, shortest_distance[from][to] + 1); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_FALSE(path.empty()); @@ -312,7 +331,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // But a limit of shortest_distance[from][to] should fail. { - const auto [distance, path] = SimpleOneToOneShortestPath( + const auto [distance, path] = SimpleOneToOneShortestPath( from, to, tails, heads, arc_lengths, shortest_distance[from][to]); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_TRUE(path.empty()); @@ -321,101 +340,116 @@ TEST(SimpleOneToOneShortestPathTest, Random) { } TEST(BoundedDijkstraWrapperTest, MultiRunsOverDynamicGraphAndLengths) { - ListGraph<> graph; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - std::vector arc_lengths = {4, 3}; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + TestGraph graph; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + DijkstraWrapper::ByArc arc_lengths = {4, 3}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 5), ElementsAre(0, 1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(1)); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 5), + ElementsAre(NodeIndex(0), NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), ElementsAre(ArcIndex(1))); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 2), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0)); - EXPECT_THAT(dijkstra.ArcPathTo(0), IsEmpty()); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 2), + ElementsAre(NodeIndex(0))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), IsEmpty()); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(1, 99), ElementsAre(1)); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty()); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(1), 99), + ElementsAre(NodeIndex(1))); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), IsEmpty()); // Add some arcs and nodes... - graph.AddArc(0, 2); + graph.AddArc(NodeIndex(0), NodeIndex(2)); arc_lengths.push_back(1); - graph.AddArc(1, 2); + graph.AddArc(NodeIndex(1), NodeIndex(2)); arc_lengths.push_back(0); - graph.AddArc(2, 1); + graph.AddArc(NodeIndex(2), NodeIndex(1)); arc_lengths.push_back(1); - graph.AddArc(1, 3); + graph.AddArc(NodeIndex(1), NodeIndex(3)); arc_lengths.push_back(5); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 10), ElementsAre(0, 2, 1, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(2, 4, 5)); + EXPECT_THAT( + dijkstra.RunBoundedDijkstra(NodeIndex(0), 10), + ElementsAre(NodeIndex(0), NodeIndex(2), NodeIndex(1), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(2), ArcIndex(4), ArcIndex(5))); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 6), ElementsAre(0, 2, 1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(2, 4)); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 6), + ElementsAre(NodeIndex(0), NodeIndex(2), NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), + ElementsAre(ArcIndex(2), ArcIndex(4))); } TEST(BoundedDijkstraWrapperTest, MultipleSources) { // Use this graph. Source nodes have their initial distance in [ ]. // // N1[0] --(2)--> N0[4] --(1)--> N2 --(5)--> N3 <--(4)-- N4[3] --(5)--> N5 - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(1, 0); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(1), NodeIndex(0)); arc_lengths.push_back(2); - graph.AddArc(0, 2); + graph.AddArc(NodeIndex(0), NodeIndex(2)); arc_lengths.push_back(1); - graph.AddArc(2, 3); + graph.AddArc(NodeIndex(2), NodeIndex(3)); arc_lengths.push_back(5); - graph.AddArc(4, 3); + graph.AddArc(NodeIndex(4), NodeIndex(3)); arc_lengths.push_back(4); - graph.AddArc(4, 5); + graph.AddArc(NodeIndex(4), NodeIndex(5)); arc_lengths.push_back(5); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); // The distance limit is exclusive, so we can't reach Node 5. ASSERT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{1, 0}, {0, 4}, {4, 3}}, 8), + {{NodeIndex(1), 0}, {NodeIndex(0), 4}, {NodeIndex(4), 3}}, 8), // The order is deterministic: node 4 comes before node 2, despite // having equal distance and higher index, because it's a source. - ElementsAre(1, 0, 4, 2, 3)); - EXPECT_EQ(2, dijkstra.distances()[0]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(0)); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.distances()[1]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty()); - EXPECT_EQ(3, dijkstra.distances()[2]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(2)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1)); - EXPECT_EQ(7, dijkstra.distances()[3]); - EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(3)); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(3)); - EXPECT_EQ(3, dijkstra.distances()[4]); - EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(4)); - EXPECT_THAT(dijkstra.ArcPathTo(4), IsEmpty()); + ElementsAre(NodeIndex(1), NodeIndex(0), NodeIndex(4), + NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(2, dijkstra.distances()[NodeIndex(0)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre(ArcIndex(0))); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(1)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), IsEmpty()); + EXPECT_EQ(3, dijkstra.distances()[NodeIndex(2)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(2))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), + ElementsAre(ArcIndex(0), ArcIndex(1))); + EXPECT_EQ(7, dijkstra.distances()[NodeIndex(3)]); + EXPECT_EQ(NodeIndex(4), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), ElementsAre(ArcIndex(3))); + EXPECT_EQ(3, dijkstra.distances()[NodeIndex(4)]); + EXPECT_EQ(NodeIndex(4), dijkstra.SourceOfShortestPathToNode(NodeIndex(4))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(4)), IsEmpty()); } TEST(BoundedDijkstraWrapperTest, SourcesAtOrBeyondDistanceLimitAreNotReached) { - ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/0); - std::vector arc_lengths; // No arcs. - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{0, 10}, {1, 11}, {2, 12}, {3, 13}}, 12), - ElementsAre(0, 1)); + TestGraph graph(/*num_nodes=*/NodeIndex(5), /*arc_capacity=*/ArcIndex(0)); + DijkstraWrapper::ByArc arc_lengths; // No arcs. + DijkstraWrapper dijkstra(&graph, &arc_lengths); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraFromMultipleSources({{NodeIndex(0), 10}, + {NodeIndex(1), 11}, + {NodeIndex(2), 12}, + {NodeIndex(3), 13}}, + 12), + ElementsAre(NodeIndex(0), NodeIndex(1))); } TEST(BoundedDijkstraWrapperTest, SourcesListedMultipleTimesKeepsMinDistance) { - ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/1); - graph.AddArc(1, 3); - std::vector arc_lengths = {20}; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{1, 12}, {1, 10}, {1, 14}}, 31), - ElementsAre(1, 3)); - EXPECT_EQ(dijkstra.distances()[3], 30); + TestGraph graph(/*num_nodes=*/NodeIndex(5), /*arc_capacity=*/ArcIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(3)); + DijkstraWrapper::ByArc arc_lengths = {20}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraFromMultipleSources( + {{NodeIndex(1), 12}, {NodeIndex(1), 10}, {NodeIndex(1), 14}}, 31), + ElementsAre(NodeIndex(1), NodeIndex(3))); + EXPECT_EQ(dijkstra.distances()[NodeIndex(3)], 30); } TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { @@ -430,38 +464,45 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { // `------(0)-----' // // The shortest path is S0->D1->N5->D4, of distance 2 + 3 + 1 + 1 + 1 = 8. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(0, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(0), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(2, 3); + graph.AddArc(NodeIndex(2), NodeIndex(3)); arc_lengths.push_back(3); - graph.AddArc(1, 5); + graph.AddArc(NodeIndex(1), NodeIndex(5)); arc_lengths.push_back(1); - graph.AddArc(3, 5); + graph.AddArc(NodeIndex(3), NodeIndex(5)); arc_lengths.push_back(0); - graph.AddArc(5, 3); + graph.AddArc(NodeIndex(5), NodeIndex(3)); arc_lengths.push_back(0); - graph.AddArc(5, 4); + graph.AddArc(NodeIndex(5), NodeIndex(4)); arc_lengths.push_back(1); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); // Repeat the same source and destination multiple times, to verify that // it's supported. - std::vector> sources = {{0, 5}, {2, 4}, {0, 2}, {0, 9}}; - std::vector> destinations = { - {1, 7}, {4, 5}, {3, 3}, {4, 1}, {4, 3}}; + std::vector> sources = {{NodeIndex(0), 5}, + {NodeIndex(2), 4}, + {NodeIndex(0), 2}, + {NodeIndex(0), 9}}; + std::vector> destinations = {{NodeIndex(1), 7}, + {NodeIndex(4), 5}, + {NodeIndex(3), 3}, + {NodeIndex(4), 1}, + {NodeIndex(4), 3}}; EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/1000), - Contains(4)); - EXPECT_EQ(2 + 3 + 1 + 1, dijkstra.distances()[4]); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(4)); - EXPECT_THAT(dijkstra.ArcPathTo(4), - ElementsAre(/*0->1*/ 0, /*1->5*/ 2, /*5->4*/ 5)); - EXPECT_EQ(2, dijkstra.GetSourceIndex(0)); - EXPECT_EQ(3, dijkstra.GetDestinationIndex(4)); + Contains(NodeIndex(4))); + EXPECT_EQ(2 + 3 + 1 + 1, dijkstra.distances()[NodeIndex(4)]); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(4))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(4)), + ElementsAre(/*0->1*/ ArcIndex(0), /*1->5*/ ArcIndex(2), + /*5->4*/ ArcIndex(5))); + EXPECT_EQ(2, dijkstra.GetSourceIndex(NodeIndex(0))); + EXPECT_EQ(3, dijkstra.GetDestinationIndex(NodeIndex(4))); // Run it with a limit too small: it'll fail to discover any destination. EXPECT_THAT( @@ -475,18 +516,20 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/2, /*distance_limit=*/9), // Limit is exclusive. - ElementsAre(4)); + ElementsAre(NodeIndex(4))); // Slightly modify the graph and try again. We want a case where the best // destination isn't the one with the smallest distance offset. - destinations.push_back({1, 2}); // D1 will be the closest destination now. + destinations.push_back( + {NodeIndex(1), 2}); // D1 will be the closest destination now. EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/8), // Limit is exclusive. - ElementsAre(1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(/*0->1*/ 0)); + ElementsAre(NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), + ElementsAre(/*0->1*/ ArcIndex(0))); // Corner case: run with no destinations. EXPECT_THAT( @@ -505,8 +548,8 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { // Call Get{Source,Destination}Index() on nodes that aren't sources or // destinations. This returns junk; so we don't check the returned values, // but we do check that it doesn't crash. - dijkstra.GetDestinationIndex(4); - dijkstra.GetSourceIndex(1); + dijkstra.GetDestinationIndex(NodeIndex(4)); + dijkstra.GetSourceIndex(NodeIndex(1)); // Setting num_reached_destinations=1 now should make '1' the only reachable // destination, even if the limit is infinite. @@ -514,85 +557,88 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/1000), - ElementsAre(1)); + ElementsAre(NodeIndex(1))); // Verify that if we set the number of destinations to infinity, they're all // explored, and the search still stops before exploring the whole graph. To // do that, we add one extra arc that's beyond the farthest destination's // distance (including its destination offset), i.e. 1 (distance 2+3+7 = 12). - graph.AddArc(5, 6); + graph.AddArc(NodeIndex(5), NodeIndex(6)); arc_lengths.push_back(2); - graph.AddArc(6, 7); + graph.AddArc(NodeIndex(6), NodeIndex(7)); arc_lengths.push_back(0); EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1000, /*distance_limit=*/1000), - ElementsAre(1, 4, 3)); - EXPECT_GE(dijkstra.distances()[1], 5); - EXPECT_GE(dijkstra.distances()[4], 7); - EXPECT_GE(dijkstra.distances()[3], 6); + ElementsAre(NodeIndex(1), NodeIndex(4), NodeIndex(3))); + EXPECT_GE(dijkstra.distances()[NodeIndex(1)], 5); + EXPECT_GE(dijkstra.distances()[NodeIndex(4)], 7); + EXPECT_GE(dijkstra.distances()[NodeIndex(3)], 6); // To verify that node #7 isn't reached, we can check its distance, which will // still be set to the initialized "distance_limit - min_destination_offset". - EXPECT_GE(dijkstra.distances()[7], 1000 - 1); + EXPECT_GE(dijkstra.distances()[NodeIndex(7)], 1000 - 1); } TEST(BoundedDijkstraWrapperTest, OneToOneShortestPath) { // Since we already tested the multiple sources - multiple destinations // variant, we only need to test the "plumbing" here. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(0, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(0), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(1, 2); + graph.AddArc(NodeIndex(1), NodeIndex(2)); arc_lengths.push_back(2); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 2, 6)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1)); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(2), 6)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), + ElementsAre(ArcIndex(0), ArcIndex(1))); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 0, 1)); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre()); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(0), 1)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre()); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(1, 2, 3)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(1)); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(1), NodeIndex(2), 3)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), ElementsAre(ArcIndex(1))); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 2, 5)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 0, 0)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(1, 2, 2)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(2, 1, 1000)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(2), 5)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(0), 0)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(1), NodeIndex(2), 2)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(2), NodeIndex(0), 1000)); } TEST(BoundedDijkstraWrapperTest, CustomSettledNodeCallback) { // A small chain: 8 --[3]--> 1 --[2]--> 42 --[3]--> 3 --[2]--> 4. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(8, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(8), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(1, 42); + graph.AddArc(NodeIndex(1), NodeIndex(42)); arc_lengths.push_back(2); - graph.AddArc(42, 3); + graph.AddArc(NodeIndex(42), NodeIndex(3)); arc_lengths.push_back(3); - graph.AddArc(3, 4); + graph.AddArc(NodeIndex(3), NodeIndex(4)); arc_lengths.push_back(2); - typedef BoundedDijkstraWrapper, int> DijkstraType; + typedef DijkstraWrapper DijkstraType; DijkstraType dijkstra(&graph, &arc_lengths); // Tracks each NodeDistance it's called on, and sets the distance limit // to 10 if it gets called on node 42. - std::vector> settled_node_dists; - auto callback = [&settled_node_dists](int node, int distance, + std::vector> settled_node_dists; + auto callback = [&settled_node_dists](NodeIndex node, int distance, int* distance_limit) { settled_node_dists.push_back({node, distance}); - if (node == 42) *distance_limit = 10; + if (node == NodeIndex(42)) *distance_limit = 10; }; - EXPECT_THAT(dijkstra.RunBoundedDijkstraWithSettledNodeCallback({{8, 0}}, - callback, 999), - ElementsAre(8, 1, 42, 3)); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraWithSettledNodeCallback({{NodeIndex(8), 0}}, + callback, 999), + ElementsAre(NodeIndex(8), NodeIndex(1), NodeIndex(42), NodeIndex(3))); EXPECT_THAT(settled_node_dists, - ElementsAre(Pair(8, 0), Pair(1, 3), Pair(42, 5), Pair(3, 8))); + ElementsAre(Pair(NodeIndex(8), 0), Pair(NodeIndex(1), 3), + Pair(NodeIndex(42), 5), Pair(NodeIndex(3), 8))); } TEST(BoundedDisjktraTest, RandomizedStressTest) { @@ -601,49 +647,51 @@ TEST(BoundedDisjktraTest, RandomizedStressTest) { constexpr int kint32max = std::numeric_limits::max(); for (int test = 0; test < kNumTests; ++test) { // Generate a random graph with random weights. - const int num_nodes = absl::Uniform(random, 1, 12); - const int num_arcs = - absl::Uniform(absl::IntervalClosed, random, 0, - std::min(num_nodes * (num_nodes - 1), 15)); - ListGraph<> graph(num_nodes, num_arcs); - for (int a = 0; a < num_arcs; ++a) { - graph.AddArc(absl::Uniform(random, 0, num_nodes), - absl::Uniform(random, 0, num_nodes)); + const NodeIndex num_nodes(absl::Uniform(random, 1, 12)); + const ArcIndex num_arcs(absl::Uniform( + absl::IntervalClosed, random, 0, + std::min(num_nodes.value() * (num_nodes.value() - 1), 15))); + TestGraph graph(num_nodes, num_arcs); + for (ArcIndex a(0); a < num_arcs; ++a) { + graph.AddArc(NodeIndex(absl::Uniform(random, 0, num_nodes.value())), + NodeIndex(absl::Uniform(random, 0, num_nodes.value()))); } - std::vector lengths(num_arcs); + DijkstraWrapper::ByArc lengths(num_arcs); for (int& w : lengths) w = absl::Uniform(random, 0, 5); // Run Floyd-Warshall as a 'reference' shortest path algorithm. - FlatMatrix ref_dist(num_nodes, num_nodes, kint32max); - for (int a = 0; a < num_arcs; ++a) { - int& d = ref_dist[graph.Tail(a)][graph.Head(a)]; + FlatMatrix ref_dist(num_nodes.value(), num_nodes.value(), kint32max); + for (ArcIndex a(0); a < num_arcs; ++a) { + int& d = ref_dist[graph.Tail(a).value()][graph.Head(a).value()]; if (lengths[a] < d) d = lengths[a]; } - for (int node = 0; node < num_nodes; ++node) { - ref_dist[node][node] = 0; + for (NodeIndex node(0); node < num_nodes; ++node) { + ref_dist[node.value()][node.value()] = 0; } - for (int k = 0; k < num_nodes; ++k) { - for (int i = 0; i < num_nodes; ++i) { - for (int j = 0; j < num_nodes; ++j) { + for (NodeIndex k(0); k < num_nodes; ++k) { + for (NodeIndex i(0); i < num_nodes; ++i) { + for (NodeIndex j(0); j < num_nodes; ++j) { const int64_t dist_through_k = - static_cast(ref_dist[i][k]) + ref_dist[k][j]; - if (dist_through_k < ref_dist[i][j]) ref_dist[i][j] = dist_through_k; + static_cast(ref_dist[i.value()][k.value()]) + + ref_dist[k.value()][j.value()]; + if (dist_through_k < ref_dist[i.value()][j.value()]) + ref_dist[i.value()][j.value()] = dist_through_k; } } } // Compute the graph's largest distance below kint32max. int max_distance = 0; - for (int i = 0; i < num_nodes; ++i) { - for (int j = 0; j < num_nodes; ++j) { - const int d = ref_dist[i][j]; + for (NodeIndex i(0); i < num_nodes; ++i) { + for (NodeIndex j(0); j < num_nodes; ++j) { + const int d = ref_dist[i.value()][j.value()]; if (d != kint32max && d > max_distance) max_distance = d; } } // Now, run some Dijkstras and verify that they match. To balance out the // FW (Floyd-Warshall) which is O(N³), we run more than one Dijkstra per FW. - BoundedDijkstraWrapper, int> dijkstra(&graph, &lengths); + DijkstraWrapper dijkstra(&graph, &lengths); for (int num_dijkstra = 0; num_dijkstra < 20; ++num_dijkstra) { // Draw the distance limit. const int limit = @@ -652,33 +700,34 @@ TEST(BoundedDisjktraTest, RandomizedStressTest) { : absl::Uniform(absl::IntervalClosed, random, 0, max_distance); // Draw sources (*with* repetition) with initial distances. const int num_sources = absl::Uniform(random, 1, 5); - std::vector> sources(num_sources); + std::vector> sources(num_sources); for (auto& [s, dist] : sources) { - s = absl::Uniform(random, 0, num_nodes); + s = NodeIndex(absl::Uniform(random, 0, num_nodes.value())); dist = absl::Uniform(absl::IntervalClosed, random, 0, max_distance + 1); } // Precompute the reference minimum distance to each node (using any of // the sources), and the expected reached nodes: any node whose distance // is < limit. That includes the sources: if a source's initial distance // is ≥ limit, it won't be reached.That includes the source themselves. - std::vector node_min_dist(num_nodes, kint32max); - std::vector expected_reached_nodes; - for (int node = 0; node < num_nodes; ++node) { + DijkstraWrapper::ByNode node_min_dist(num_nodes, kint32max); + DijkstraWrapper::ByNode expected_reached_nodes; + for (NodeIndex node(0); node < num_nodes; ++node) { int min_dist = kint32max; for (const auto& [src, dist] : sources) { // Cast to int64_t to avoid overflows. min_dist = std::min( - min_dist, static_cast(ref_dist[src][node]) + dist); + min_dist, + static_cast(ref_dist[src.value()][node.value()]) + dist); } node_min_dist[node] = min_dist; if (min_dist < limit) expected_reached_nodes.push_back(node); } - const std::vector reached_nodes = + const auto reached_nodes = dijkstra.RunBoundedDijkstraFromMultipleSources(sources, limit); EXPECT_THAT(reached_nodes, UnorderedElementsAreArray(expected_reached_nodes)); - for (const int node : reached_nodes) { + for (const NodeIndex node : reached_nodes) { EXPECT_EQ(dijkstra.distances()[node], node_min_dist[node]) << node; } ASSERT_FALSE(HasFailure()) @@ -697,7 +746,8 @@ void BM_GridGraph(benchmark::State& state) { const int kSourceNode = static_cast(kWidth * kHeight / 2); std::unique_ptr graph = util::Create2DGridGraph(/*width=*/kWidth, /*height=*/kHeight); - std::vector arc_lengths(graph->num_arcs(), 0); + BoundedDijkstraWrapper::ByArc arc_lengths( + graph->num_arcs(), 0); const int64_t min_length = arc_lengths_are_discrete ? 0 : 1; const int64_t max_length = arc_lengths_are_discrete ? 2 : 1000000000000000L; std::mt19937 random(12345); diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index 73af4c944a..c8b7ef0b83 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -420,6 +420,8 @@ class Vector : public std::vector { template class SVector { public: + using value_type = T; + SVector() : base_(nullptr), size_(0), capacity_(0) {} ~SVector() { clear_and_dealloc(); } @@ -434,7 +436,7 @@ class SVector { capacity_ = other.size_; base_ = Allocate(capacity_); CHECK(base_ != nullptr); - base_ += capacity_; + base_ += static_cast(capacity_); } else { // capacity_ >= other.size clear(); } @@ -488,6 +490,9 @@ class SVector { T* data() const { return base_; } + const T* begin() const { return base_; } + const T* end() const { return base_ + static_cast(size_); } + void swap(SVector& x) noexcept { std::swap(base_, x.base_); std::swap(size_, x.size_); @@ -564,8 +569,9 @@ class SVector { // Copies other.base_ to base_ in this SVector. Safe for all types as it uses // constructor for each entry. void CopyInternal(const SVector& other, std::false_type) { - for (int i = -size_; i < size_; ++i) { - new (base_ + i) T(other.base_[i]); + for (IndexT i = -size_; i < size_; ++i) { + new (base_ + static_cast(i)) + T(other.base_[static_cast(i)]); } } @@ -1091,41 +1097,21 @@ class ReverseArcStaticGraph // TODO(user): consider slower but more memory efficient implementations that // follow the cycles of the permutation and use a bitmap to indicate what has // been permuted or to mark the beginning of each cycle. - -// Some compiler do not know typeof(), so we have to use this extra function -// internally. -template -void PermuteWithExplicitElementType(const IntVector& permutation, - Array& array_to_permute, - ElementType unused) { - std::vector temp(permutation.size()); - for (size_t i = 0; i < permutation.size(); ++i) { - temp[i] = array_to_permute[i]; - } - for (size_t i = 0; i < permutation.size(); ++i) { - array_to_permute[static_cast(permutation[i])] = temp[i]; - } -} - template void Permute(const IntVector& permutation, Array* array_to_permute) { if (permutation.empty()) { return; } - PermuteWithExplicitElementType(permutation, *array_to_permute, - (*array_to_permute)[0]); -} - -// We need a specialization for vector, because the default code uses -// (*array_to_permute)[0] as ElementType, which isn't 'bool' in that case. -template -void Permute(const IntVector& permutation, - std::vector* array_to_permute) { - if (permutation.empty()) { - return; + const auto size = permutation.size(); + auto& array = *array_to_permute; + using ElementType = + typename std::iterator_traits::value_type; + std::vector temp(size); + auto array_begin = std::begin(array); + std::copy_n(array_begin, size, temp.begin()); + for (size_t i = 0; i < permutation.size(); ++i) { + *(array_begin + static_cast(permutation[i])) = temp[i]; } - bool unused = false; - PermuteWithExplicitElementType(permutation, *array_to_permute, unused); } // BaseGraph implementation ---------------------------------------------------- diff --git a/ortools/graph/graph_io.h b/ortools/graph/graph_io.h index ffe455028e..82a2002a5e 100644 --- a/ortools/graph/graph_io.h +++ b/ortools/graph/graph_io.h @@ -97,12 +97,12 @@ std::string GraphToString(const Graph& graph, GraphToStringFormat format) { } else { // PRINT_GRAPH_ADJACENCY_LISTS[_SORTED] adj.clear(); for (const typename Graph::ArcIndex arc : graph.OutgoingArcs(node)) { - adj.push_back(graph.Head(arc)); + adj.push_back(static_cast(graph.Head(arc))); } if (format == PRINT_GRAPH_ADJACENCY_LISTS_SORTED) { std::sort(adj.begin(), adj.end()); } - if (node != 0) out += '\n'; + if (node != typename Graph::NodeIndex(0)) out += '\n'; absl::StrAppend(&out, static_cast(node), ": ", absl::StrJoin(adj, " ")); } diff --git a/ortools/graph/graph_test.cc b/ortools/graph/graph_test.cc index 29e85240e2..e690197385 100644 --- a/ortools/graph/graph_test.cc +++ b/ortools/graph/graph_test.cc @@ -24,6 +24,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/log/check.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" @@ -32,9 +33,11 @@ #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" namespace util { +using testing::ElementsAre; using testing::Pair; using testing::UnorderedElementsAre; @@ -289,98 +292,144 @@ void ConstructAndCheckGraph( // Return the size of the memory block allocated by malloc when asking for x // bytes. -inline int UpperBoundOfMallocBlockSizeOf(int x) { +template +inline IndexType UpperBoundOfMallocBlockSizeOf(IndexType x) { // Note(user): as of 2012-09, the rule seems to be: round x up to the // next multiple of 16. // WARNING: This may change, and may already be wrong for small values. - return 16 * ((x + 15) / 16); + return IndexType((16 * (static_cast(x) + 15)) / 16); } -TEST(SVectorTest, DynamicGrowth) { - internal::SVector v; - EXPECT_EQ(0, v.size()); - EXPECT_EQ(0, v.capacity()); - for (int i = 0; i < 100; i++) { +template +class SVectorTest : public ::testing::Test {}; + +typedef ::testing::Types, std::pair, + std::pair, + std::pair> + TestSVectorIndexTypes; + +TYPED_TEST_SUITE(SVectorTest, TestSVectorIndexTypes); + +TYPED_TEST(SVectorTest, CopyMoveIterate) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + using VectorT = internal::SVector; + VectorT v; + v.resize(IndexT(2)); + v[IndexT(0)] = ValueT(1); + v[IndexT(1)] = ValueT(2); + + { + EXPECT_THAT(VectorT(v), ElementsAre(ValueT(1), ValueT(2))); + VectorT v2 = v; + EXPECT_THAT(v2, ElementsAre(ValueT(1), ValueT(2))); + EXPECT_THAT(v, ElementsAre(ValueT(1), ValueT(2))); + } + + { + VectorT v2 = std::move(v); + EXPECT_THAT(v2, ElementsAre(ValueT(1), ValueT(2))); + EXPECT_THAT(VectorT(std::move(v2)), ElementsAre(ValueT(1), ValueT(2))); + } +} + +TYPED_TEST(SVectorTest, DynamicGrowth) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + EXPECT_EQ(IndexT(0), v.size()); + EXPECT_EQ(IndexT(0), v.capacity()); + for (ValueT i(0); i < ValueT(100); i++) { v.grow(-i, i); } - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { - EXPECT_EQ(-i, v[~i]); - EXPECT_EQ(i, v[i]); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(100); ++i) { + EXPECT_EQ(ValueT(static_cast(-i)), v[~i]); + EXPECT_EQ(ValueT(static_cast(i)), v[i]); } } -TEST(SVectorTest, Reserve) { - internal::SVector v; - v.reserve(100); - EXPECT_EQ(0, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { +TYPED_TEST(SVectorTest, Reserve) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.reserve(IndexT(100)); + EXPECT_EQ(IndexT(0), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (ValueT i(0); i < ValueT(100); i++) { v.grow(-i, i); } - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 10; i++) { - EXPECT_EQ(-i, v[~i]); - EXPECT_EQ(i, v[i]); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(10); i++) { + EXPECT_EQ(ValueT(static_cast(-i)), v[~i]); + EXPECT_EQ(ValueT(static_cast(i)), v[i]); } } -TEST(SVectorTest, Resize) { - internal::SVector v; - v.resize(100); - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { - EXPECT_EQ(0, v[-i - 1]); - EXPECT_EQ(0, v[i]); +TYPED_TEST(SVectorTest, Resize) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.resize(IndexT(100)); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(100); ++i) { + EXPECT_EQ(ValueT(0), v[-i - IndexT(1)]); + EXPECT_EQ(ValueT(0), v[i]); } } -TEST(SVectorTest, ResizeToZero) { - internal::SVector s; - s.resize(1); - s.resize(0); - EXPECT_EQ(0, s.size()); +TYPED_TEST(SVectorTest, ResizeToZero) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.resize(IndexT(1)); + v.resize(IndexT(0)); + EXPECT_EQ(IndexT(0), v.size()); } -TEST(SVectorTest, Swap) { - internal::SVector s; - internal::SVector t; - s.resize(1); - s[0] = 's'; - s[-1] = 's'; - t.resize(2); - for (int i = -2; i <= 1; ++i) { - t[i] = 't'; +TYPED_TEST(SVectorTest, Swap) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector s; + internal::SVector t; + s.resize(IndexT(1)); + s[IndexT(0)] = ValueT('s'); + s[IndexT(-1)] = ValueT('s'); + t.resize(IndexT(2)); + for (IndexT i(-2); i <= IndexT(1); ++i) { + t[i] = ValueT('t'); } s.swap(t); - EXPECT_EQ(1, t.size()); - EXPECT_EQ('s', t[-1]); - EXPECT_EQ('s', t[0]); - EXPECT_EQ(2, s.size()); - EXPECT_EQ('t', s[-2]); - EXPECT_EQ('t', s[-1]); - EXPECT_EQ('t', s[0]); - EXPECT_EQ('t', s[1]); + EXPECT_EQ(IndexT(1), t.size()); + EXPECT_EQ(ValueT('s'), t[IndexT(-1)]); + EXPECT_EQ(ValueT('s'), t[IndexT(0)]); + EXPECT_EQ(IndexT(2), s.size()); + EXPECT_EQ(ValueT('t'), s[IndexT(-2)]); + EXPECT_EQ(ValueT('t'), s[IndexT(-1)]); + EXPECT_EQ(ValueT('t'), s[IndexT(0)]); + EXPECT_EQ(ValueT('t'), s[IndexT(1)]); } -TEST(SVectorTest, SwapAndDestroy) { - internal::SVector s; +TYPED_TEST(SVectorTest, SwapAndDestroy) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector s; { - internal::SVector t; - t.resize(2); - t[-2] = 42; + internal::SVector t; + t.resize(IndexT(2)); + t[IndexT(-2)] = ValueT(42); t.swap(s); } - EXPECT_EQ(2, s.size()); - EXPECT_EQ(42, s[-2]); - EXPECT_EQ(0, s[1]); + EXPECT_EQ(IndexT(2), s.size()); + EXPECT_EQ(ValueT(42), s[IndexT(-2)]); + EXPECT_EQ(ValueT(0), s[IndexT(1)]); } // Use a more complex type to better check the invocations of @@ -458,7 +507,7 @@ class MoveOnlyObject { int MoveOnlyObject::sequence_ = 1; int MoveOnlyObject::object_count_ = 0; -TEST(SVectorTest, MoveWithMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, MoveWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); internal::SVector a; a.resize(10); @@ -472,7 +521,7 @@ TEST(SVectorTest, MoveWithMoveOnlyObject) { EXPECT_EQ(0, a.size()); // NOLINT } -TEST(SVectorTest, ShrinkWithMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, ShrinkWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -484,7 +533,7 @@ TEST(SVectorTest, ShrinkWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); } -TEST(SVectorTest, GrowMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, GrowMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -501,7 +550,7 @@ TEST(SVectorTest, GrowMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); } -TEST(SVectorTest, ReserveMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, ReserveMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -554,7 +603,7 @@ int TrackedObject::num_destructions = 0; int TrackedObject::num_moves = 0; int TrackedObject::num_copies = 0; -TEST(SVectorTest, CopyConstructor) { +TEST(SVectorTrackingTest, CopyConstructor) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -573,7 +622,7 @@ TEST(SVectorTest, CopyConstructor) { ASSERT_EQ(v_copy.size(), 5); } -TEST(SVectorTest, AssignmentOperator) { +TEST(SVectorTrackingTest, AssignmentOperator) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -595,7 +644,7 @@ TEST(SVectorTest, AssignmentOperator) { ASSERT_EQ(other.size(), 5); } -TEST(SVectorTest, CopyConstructorIntegralType) { +TEST(SVectorTrackingTest, CopyConstructorIntegralType) { auto v = internal::SVector(); v.resize(3); v[-3] = 1; @@ -613,7 +662,7 @@ TEST(SVectorTest, CopyConstructorIntegralType) { } } -TEST(SVectorTest, AssignmentOperatorIntegralType) { +TEST(SVectorTrackingTest, AssignmentOperatorIntegralType) { internal::SVector other; auto v = internal::SVector(); v.resize(3); @@ -632,7 +681,7 @@ TEST(SVectorTest, AssignmentOperatorIntegralType) { } } -TEST(SVectorTest, MoveConstructor) { +TEST(SVectorTrackingTest, MoveConstructor) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -650,7 +699,7 @@ TEST(SVectorTest, MoveConstructor) { ASSERT_EQ(b.size(), 5); } -TEST(SVectorTest, MoveAssignmentOperator) { +TEST(SVectorTrackingTest, MoveAssignmentOperator) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -1011,6 +1060,28 @@ TEST(SVector, NoHeapCheckerFalsePositive) { EXPECT_EQ(kVector->size(), 5000); } +TEST(Permute, IntArray) { + int array[] = {4, 5, 6}; + std::vector permutation = {0, 2, 1}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(4, 6, 5)); +} + +TEST(Permute, BoolVector) { + std::vector array = {true, false, true}; + std::vector permutation = {0, 2, 1}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(true, true, false)); +} + +TEST(Permute, StrongVector) { + util_intops::StrongVector array = {4, 5, 6}; + std::vector permutation = {StrongArcId(0), StrongArcId(2), + StrongArcId(1)}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(4, 6, 5)); +} + template static void BM_RandomArcs(benchmark::State& state) { const int kRandomSeed = 0; @@ -1304,4 +1375,23 @@ static void BM_CompleteBipartiteGraphTailHead(benchmark::State& state) { BENCHMARK_TEMPLATE(BM_CompleteBipartiteGraphTailHead, int32_t); BENCHMARK_TEMPLATE(BM_CompleteBipartiteGraphTailHead, int16_t); +template +void BM_Permute(benchmark::State& state) { + const int size = state.range(0); + ArrayT array(size); + + std::vector permutation(size); + absl::c_iota(permutation, IndexT(0)); + + for (const auto s : state) { + util::Permute(permutation, &array); + benchmark::DoNotOptimize(array); + benchmark::DoNotOptimize(permutation); + } +} +BENCHMARK(BM_Permute, StrongArcId>) + ->Arg(128); +BENCHMARK(BM_Permute, int>)->Arg(128); +BENCHMARK(BM_Permute, int>)->Arg(128); + } // namespace util diff --git a/ortools/graph/samples/assignment_linear_sum_assignment.py b/ortools/graph/samples/assignment_linear_sum_assignment.py index 82af30d560..c662741f64 100755 --- a/ortools/graph/samples/assignment_linear_sum_assignment.py +++ b/ortools/graph/samples/assignment_linear_sum_assignment.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import linear_sum_assignment + # [END import] diff --git a/ortools/graph/samples/assignment_min_flow.py b/ortools/graph/samples/assignment_min_flow.py index 0d55ed20c9..1e4f56387a 100755 --- a/ortools/graph/samples/assignment_min_flow.py +++ b/ortools/graph/samples/assignment_min_flow.py @@ -16,6 +16,7 @@ """Linear assignment example.""" # [START import] from ortools.graph.python import min_cost_flow + # [END import] diff --git a/ortools/graph/samples/balance_min_flow.py b/ortools/graph/samples/balance_min_flow.py index 923ff22a85..688c9c79ad 100755 --- a/ortools/graph/samples/balance_min_flow.py +++ b/ortools/graph/samples/balance_min_flow.py @@ -16,6 +16,7 @@ """Assignment with teams of workers.""" # [START import] from ortools.graph.python import min_cost_flow + # [END import] diff --git a/ortools/graph/samples/dijkstra_directed.cc b/ortools/graph/samples/dijkstra_directed.cc index 046f15d1d9..20c8a508c8 100644 --- a/ortools/graph/samples/dijkstra_directed.cc +++ b/ortools/graph/samples/dijkstra_directed.cc @@ -50,8 +50,8 @@ int main(int argc, char** argv) { // Solve the shortest path problem from 0 to 5. std::pair> result = - operations_research::SimpleOneToOneShortestPath(0, 5, tails, heads, - lengths); + operations_research::SimpleOneToOneShortestPath(0, 5, tails, + heads, lengths); // Print to length of the path and then the nodes in the path. std::cout << "Shortest path length: " << result.first << std::endl; diff --git a/ortools/graph/samples/dijkstra_undirected.cc b/ortools/graph/samples/dijkstra_undirected.cc index f51645691b..84bef36fee 100644 --- a/ortools/graph/samples/dijkstra_undirected.cc +++ b/ortools/graph/samples/dijkstra_undirected.cc @@ -59,8 +59,8 @@ int main(int argc, char** argv) { // Solve the shortest path problem from 0 to 4. std::pair> result = - operations_research::SimpleOneToOneShortestPath(0, 4, tails, heads, - lengths); + operations_research::SimpleOneToOneShortestPath(0, 4, tails, + heads, lengths); // Print to length of the path and then the nodes in the path. std::cout << "Shortest path length: " << result.first << std::endl; diff --git a/ortools/graph/samples/simple_max_flow_program.py b/ortools/graph/samples/simple_max_flow_program.py index 38bd192247..43820f3db8 100755 --- a/ortools/graph/samples/simple_max_flow_program.py +++ b/ortools/graph/samples/simple_max_flow_program.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import max_flow + # [END import] diff --git a/ortools/graph/samples/simple_min_cost_flow_program.py b/ortools/graph/samples/simple_min_cost_flow_program.py index 4e0e0afd56..390e7bdbae 100755 --- a/ortools/graph/samples/simple_min_cost_flow_program.py +++ b/ortools/graph/samples/simple_min_cost_flow_program.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import min_cost_flow + # [END import] From 79b86cc581b1e86ddc5ccd8e7d26cda989124b06 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 14:54:22 +0200 Subject: [PATCH 093/509] constraint_solver: export from google3 --- .../constraint_solver/constraint_solver.cc | 4 + ortools/constraint_solver/constraint_solver.h | 1 + ortools/constraint_solver/docs/CP.md | 145 +++++++++--------- ortools/constraint_solver/local_search.cc | 4 +- .../python/constraint_solver.i | 18 +-- .../constraint_solver/samples/cp_is_fun_cp.py | 1 + .../constraint_solver/samples/nqueens_cp.py | 1 + .../samples/simple_cp_program.py | 1 + ortools/constraint_solver/search.cc | 76 ++++++--- ortools/constraint_solver/search_stats.proto | 12 ++ 10 files changed, 163 insertions(+), 100 deletions(-) diff --git a/ortools/constraint_solver/constraint_solver.cc b/ortools/constraint_solver/constraint_solver.cc index c87f38ba92..de6ebe007f 100644 --- a/ortools/constraint_solver/constraint_solver.cc +++ b/ortools/constraint_solver/constraint_solver.cc @@ -3254,6 +3254,10 @@ std::string Solver::SearchContext(const Search* search) const { return search->search_context(); } +bool Solver::AcceptSolution(Search* search) const { + return search->AcceptSolution(); +} + Assignment* Solver::GetOrCreateLocalSearchState() { if (local_search_state_ == nullptr) { local_search_state_ = std::make_unique(this); diff --git a/ortools/constraint_solver/constraint_solver.h b/ortools/constraint_solver/constraint_solver.h index 644c1c0199..3040a32089 100644 --- a/ortools/constraint_solver/constraint_solver.h +++ b/ortools/constraint_solver/constraint_solver.h @@ -3145,6 +3145,7 @@ class Solver { void SetSearchContext(Search* search, absl::string_view search_context); std::string SearchContext() const; std::string SearchContext(const Search* search) const; + bool AcceptSolution(Search* search) const; /// Returns (or creates) an assignment representing the state of local search. // TODO(user): Investigate if this should be moved to Search. Assignment* GetOrCreateLocalSearchState(); diff --git a/ortools/constraint_solver/docs/CP.md b/ortools/constraint_solver/docs/CP.md index 2865706a0f..395409180a 100644 --- a/ortools/constraint_solver/docs/CP.md +++ b/ortools/constraint_solver/docs/CP.md @@ -12,12 +12,13 @@ Java and .Net. Each language have different requirements for the code samples. ### C++ code samples ```cpp +// Snippet from ortools/constraint_solver/samples/simple_cp_program.cc #include #include +#include "ortools/base/init_google.h" #include "absl/base/log_severity.h" #include "absl/log/globals.h" -#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" namespace operations_research { @@ -74,55 +75,57 @@ int main(int argc, char* argv[]) { ```python #!/usr/bin/env python3 +# Snippet from ortools/constraint_solver/samples/simple_cp_program.py """Simple Constraint optimization example.""" from ortools.constraint_solver import pywrapcp def main(): - """Entry point of the program.""" - # Instantiate the solver. - solver = pywrapcp.Solver("CPSimple") + """Entry point of the program.""" + # Instantiate the solver. + solver = pywrapcp.Solver("CPSimple") - # Create the variables. - num_vals = 3 - x = solver.IntVar(0, num_vals - 1, "x") - y = solver.IntVar(0, num_vals - 1, "y") - z = solver.IntVar(0, num_vals - 1, "z") + # Create the variables. + num_vals = 3 + x = solver.IntVar(0, num_vals - 1, "x") + y = solver.IntVar(0, num_vals - 1, "y") + z = solver.IntVar(0, num_vals - 1, "z") - # Constraint 0: x != y. - solver.Add(x != y) - print("Number of constraints: ", solver.Constraints()) + # Constraint 0: x != y. + solver.Add(x != y) + print("Number of constraints: ", solver.Constraints()) - # Solve the problem. - decision_builder = solver.Phase( - [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE - ) + # Solve the problem. + decision_builder = solver.Phase( + [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE + ) - # Print solution on console. - count = 0 - solver.NewSearch(decision_builder) - while solver.NextSolution(): - count += 1 - solution = f"Solution {count}:\n" - for var in [x, y, z]: - solution += f" {var.Name()} = {var.Value()}" - print(solution) - solver.EndSearch() - print(f"Number of solutions found: {count}") + # Print solution on console. + count = 0 + solver.NewSearch(decision_builder) + while solver.NextSolution(): + count += 1 + solution = f"Solution {count}:\n" + for var in [x, y, z]: + solution += f" {var.Name()} = {var.Value()}" + print(solution) + solver.EndSearch() + print(f"Number of solutions found: {count}") - print("Advanced usage:") - print(f"Problem solved in {solver.WallTime()}ms") - print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes") + print("Advanced usage:") + print(f"Problem solved in {solver.WallTime()}ms") + print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes") if __name__ == "__main__": - main() + main() ``` ### Java code samples ```java +// Snippet from ortools/constraint_solver/samples/SimpleCpProgram.java package com.google.ortools.constraintsolver.samples; import com.google.ortools.Loader; import com.google.ortools.constraintsolver.DecisionBuilder; @@ -148,74 +151,78 @@ public class SimpleCpProgram { final IntVar z = solver.makeIntVar(0, numVals - 1, "z"); // Constraint 0: x != y.. - solver.addConstraint(solver.makeAllDifferent(new IntVar[] {x, y})); + solver.addConstraint(solver.makeAllDifferent(new IntVar[]{x, y})); logger.info("Number of constraints: " + solver.constraints()); // Solve the problem. final DecisionBuilder db = solver.makePhase( - new IntVar[] {x, y, z}, Solver.CHOOSE_FIRST_UNBOUND, Solver.ASSIGN_MIN_VALUE); + new IntVar[]{x, y, z}, + Solver.CHOOSE_FIRST_UNBOUND, + Solver.ASSIGN_MIN_VALUE); // Print solution on console. int count = 0; solver.newSearch(db); while (solver.nextSolution()) { ++count; - logger.info( - String.format("Solution: %d\n x=%d y=%d z=%d", count, x.value(), y.value(), z.value())); + logger.info(String.format("Solution: %d\n x=%d y=%d z=%d" + , count + , x.value() + , y.value() + , z.value())); } solver.endSearch(); logger.info("Number of solutions found: " + solver.solutions()); - logger.info(String.format("Advanced usage:\nProblem solved in %d ms\nMemory usage: %d bytes", - solver.wallTime(), Solver.memoryUsage())); + logger.info(String.format( + "Advanced usage:\nProblem solved in %d ms\nMemory usage: %d bytes" + , solver.wallTime(), Solver.memoryUsage())); } } ``` ### .Net code samples -```cs +```csharp +// Snippet from ortools/constraint_solver/samples/SimpleCpProgram.cs using System; using Google.OrTools.ConstraintSolver; /// /// This is a simple CP program. /// -public class SimpleCpProgram -{ - public static void Main(String[] args) - { - // Instantiate the solver. - Solver solver = new Solver("CpSimple"); +public class SimpleCpProgram { + public static void Main(String[] args) { + // Instantiate the solver. + Solver solver = new Solver("CpSimple"); - // Create the variables. - const long numVals = 3; - IntVar x = solver.MakeIntVar(0, numVals - 1, "x"); - IntVar y = solver.MakeIntVar(0, numVals - 1, "y"); - IntVar z = solver.MakeIntVar(0, numVals - 1, "z"); + // Create the variables. + const long numVals = 3; + IntVar x = solver.MakeIntVar(0, numVals - 1, "x"); + IntVar y = solver.MakeIntVar(0, numVals - 1, "y"); + IntVar z = solver.MakeIntVar(0, numVals - 1, "z"); - // Constraint 0: x != y.. - solver.Add(solver.MakeAllDifferent(new IntVar[] { x, y })); - Console.WriteLine($"Number of constraints: {solver.Constraints()}"); + // Constraint 0: x != y.. + solver.Add(solver.MakeAllDifferent(new IntVar[] { x, y })); + Console.WriteLine($"Number of constraints: {solver.Constraints()}"); - // Solve the problem. - DecisionBuilder db = - solver.MakePhase(new IntVar[] { x, y, z }, Solver.CHOOSE_FIRST_UNBOUND, Solver.ASSIGN_MIN_VALUE); + // Solve the problem. + DecisionBuilder db = solver.MakePhase(new IntVar[] { x, y, z }, Solver.CHOOSE_FIRST_UNBOUND, + Solver.ASSIGN_MIN_VALUE); - // Print solution on console. - int count = 0; - solver.NewSearch(db); - while (solver.NextSolution()) - { - ++count; - Console.WriteLine($"Solution: {count}\n x={x.Value()} y={y.Value()} z={z.Value()}"); - } - solver.EndSearch(); - Console.WriteLine($"Number of solutions found: {solver.Solutions()}"); - - Console.WriteLine("Advanced usage:"); - Console.WriteLine($"Problem solved in {solver.WallTime()}ms"); - Console.WriteLine($"Memory usage: {Solver.MemoryUsage()}bytes"); + // Print solution on console. + int count = 0; + solver.NewSearch(db); + while (solver.NextSolution()) { + ++count; + Console.WriteLine($"Solution: {count}\n x={x.Value()} y={y.Value()} z={z.Value()}"); } + solver.EndSearch(); + Console.WriteLine($"Number of solutions found: {solver.Solutions()}"); + + Console.WriteLine("Advanced usage:"); + Console.WriteLine($"Problem solved in {solver.WallTime()}ms"); + Console.WriteLine($"Memory usage: {Solver.MemoryUsage()}bytes"); + } } ``` diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc index 34266c3576..2a0f237af5 100644 --- a/ortools/constraint_solver/local_search.cc +++ b/ortools/constraint_solver/local_search.cc @@ -4130,7 +4130,9 @@ Decision* FindOneNeighbor::Next(Solver* const solver) { if (solutions_since_last_check_ >= check_period_) { solutions_since_last_check_ = 0; } - const bool accept = !check_solution || solver->SolveAndCommit(restore); + const bool accept = !check_solution || + (solver->SolveAndCommit(restore) && + solver->AcceptSolution(solver->TopLevelSearch())); solver->GetLocalSearchMonitor()->EndAcceptNeighbor(ls_operator_, accept); if (accept) { diff --git a/ortools/constraint_solver/python/constraint_solver.i b/ortools/constraint_solver/python/constraint_solver.i index 1e2ce4a2b4..498e4af146 100644 --- a/ortools/constraint_solver/python/constraint_solver.i +++ b/ortools/constraint_solver/python/constraint_solver.i @@ -21,15 +21,15 @@ // // USAGE EXAMPLES (most of which are also unit tests): // - ./pywrapcp_test.py -// - ortools/python/appointments.py -// - ortools/python/golomb8.py -// - ortools/python/hidato_table.py -// - ortools/python/jobshop_ft06.py -// - ortools/python/magic_sequence_distribute.py -// - ortools/python/rabbit_pheasant.py -// - ortools/python/simple_meeting.py -// - ortools/python/sudoku.py -// - ortools/python/zebra.py +// - examples/python/appointments.py +// - examples/python/golomb8.py +// - examples/python/hidato_table.py +// - examples/python/jobshop_ft06.py +// - examples/python/magic_sequence_distribute.py +// - examples/python/rabbit_pheasant.py +// - examples/python/simple_meeting.py +// - examples/python/sudoku.py +// - examples/python/zebra.py %include "ortools/base/base.i" %include "ortools/util/python/proto.i" diff --git a/ortools/constraint_solver/samples/cp_is_fun_cp.py b/ortools/constraint_solver/samples/cp_is_fun_cp.py index fbb230be13..fbc820aee2 100755 --- a/ortools/constraint_solver/samples/cp_is_fun_cp.py +++ b/ortools/constraint_solver/samples/cp_is_fun_cp.py @@ -22,6 +22,7 @@ This problem has 72 different solutions in base 10. """ # [START import] from ortools.constraint_solver import pywrapcp + # [END import] diff --git a/ortools/constraint_solver/samples/nqueens_cp.py b/ortools/constraint_solver/samples/nqueens_cp.py index eed0d217b2..21aa7e5761 100755 --- a/ortools/constraint_solver/samples/nqueens_cp.py +++ b/ortools/constraint_solver/samples/nqueens_cp.py @@ -17,6 +17,7 @@ # [START import] import sys from ortools.constraint_solver import pywrapcp + # [END import] diff --git a/ortools/constraint_solver/samples/simple_cp_program.py b/ortools/constraint_solver/samples/simple_cp_program.py index 7c62799558..7fdb0af5d8 100755 --- a/ortools/constraint_solver/samples/simple_cp_program.py +++ b/ortools/constraint_solver/samples/simple_cp_program.py @@ -17,6 +17,7 @@ # [START import] from ortools.constraint_solver import pywrapcp + # [END import] diff --git a/ortools/constraint_solver/search.cc b/ortools/constraint_solver/search.cc index 38b64d3ac4..b3014ebf4e 100644 --- a/ortools/constraint_solver/search.cc +++ b/ortools/constraint_solver/search.cc @@ -3388,9 +3388,17 @@ class TabuSearch : public Metaheuristic { void EnterSearch() override; void ApplyDecision(Decision* d) override; bool AtSolution() override; + bool AcceptSolution() override; bool AtLocalOptimum() override; bool AcceptDelta(Assignment* delta, Assignment* deltadelta) override; void AcceptNeighbor() override; + void BeginNextDecision(DecisionBuilder* const) override { + if (stop_search_) solver()->Fail(); + } + void RefuteDecision(Decision* const d) override { + Metaheuristic::RefuteDecision(d); + if (stop_search_) solver()->Fail(); + } std::string DebugString() const override { return "Tabu Search"; } protected: @@ -3425,6 +3433,11 @@ class TabuSearch : public Metaheuristic { int64_t forbid_tenure_; double tabu_factor_; int64_t stamp_; + int64_t solution_count_ = 0; + bool stop_search_ = false; + std::vector delta_values_; + SparseBitset<> delta_vars_; + std::vector var_index_to_index_; }; TabuSearch::TabuSearch(Solver* solver, const std::vector& maximize, @@ -3438,10 +3451,17 @@ TabuSearch::TabuSearch(Solver* solver, const std::vector& maximize, keep_tenure_(keep_tenure), forbid_tenure_(forbid_tenure), tabu_factor_(tabu_factor), - stamp_(0) { + stamp_(0), + delta_values_(vars.size(), 0), + delta_vars_(vars.size()) { for (int index = 0; index < vars_.size(); ++index) { assignment_container_.FastAdd(vars_[index]); DCHECK_EQ(vars_[index], assignment_container_.Element(index).Var()); + const int var_index = vars_[index]->index(); + if (var_index >= var_index_to_index_.size()) { + var_index_to_index_.resize(var_index + 1, -1); + } + var_index_to_index_[var_index] = index; } } @@ -3450,6 +3470,8 @@ void TabuSearch::EnterSearch() { solver()->SetUseFastLocalSearch(true); stamp_ = 0; has_stored_assignment_ = false; + solution_count_ = 0; + stop_search_ = false; } void TabuSearch::ApplyDecision(Decision* const d) { @@ -3482,21 +3504,19 @@ void TabuSearch::ApplyDecision(Decision* const d) { MakeMinimizationVarsLessOrEqualWithSteps( [this](int i) { return CurrentInternalValue(i); }); } - // Avoid cost plateau's which lead to tabu cycles. +} + +bool TabuSearch::AcceptSolution() { + // Avoid cost plateaus which lead to tabu cycles. if (found_initial_solution_) { - Constraint* plateau_ct = nullptr; - if (Size() == 1) { - plateau_ct = s->MakeNonEquality(MinimizationVar(0), last_values_[0]); - } else { - std::vector plateau_vars(Size()); - for (int i = 0; i < Size(); ++i) { - plateau_vars[i] = - s->MakeIsEqualCstVar(MinimizationVar(i), last_values_[i]); + for (int i = 0; i < Size(); ++i) { + if (last_values_[i] != MinimizationVar(i)->Min()) { + return true; } - plateau_ct = s->MakeSumLessOrEqual(plateau_vars, Size() - 1); } - s->AddConstraint(plateau_ct); + return false; } + return true; } std::vector TabuSearch::CreateTabuVars() { @@ -3519,6 +3539,7 @@ std::vector TabuSearch::CreateTabuVars() { } bool TabuSearch::AtSolution() { + ++solution_count_; if (!ObjectiveMonitor::AtSolution()) { return false; } @@ -3551,6 +3572,13 @@ bool TabuSearch::AtSolution() { bool TabuSearch::AtLocalOptimum() { solver()->SetUseFastLocalSearch(false); + // If no solution has been accepted since the last local optimum, and no tabu + // lists are active, stop the search. + if (stamp_ > 0 && solution_count_ == 0 && keep_tabu_list_.empty() && + forbid_tabu_list_.empty()) { + stop_search_ = true; + } + solution_count_ = 0; AgeLists(); for (int i = 0; i < Size(); ++i) { SetCurrentInternalValue(i, std::numeric_limits::max()); @@ -3569,26 +3597,32 @@ bool TabuSearch::AcceptDelta(Assignment* delta, Assignment* deltadelta) { for (const IntVarElement& element : delta_container.elements()) { if (!element.Bound()) return true; } + delta_vars_.ResetAllToFalse(); + for (const IntVarElement& element : delta_container.elements()) { + const int var_index = element.Var()->index(); + if (var_index >= var_index_to_index_.size()) continue; + const int index = var_index_to_index_[var_index]; + if (index == -1) continue; + delta_values_[index] = element.Value(); + delta_vars_.Set(index); + } int num_respected = 0; - // TODO(user): Make this O(delta). - auto get_value = [this, &delta_container](int var_index) { - const IntVarElement* element = - delta_container.ElementPtrOrNull(vars(var_index)); - return (element != nullptr) - ? element->Value() + auto get_value = [this](int var_index) { + return delta_vars_[var_index] + ? delta_values_[var_index] : assignment_container_.Element(var_index).Value(); }; + const int64_t tabu_limit = TabuLimit(); for (const auto [var_index, value, unused_stamp] : synced_keep_tabu_list_) { if (get_value(var_index) == value) { - ++num_respected; + if (++num_respected >= tabu_limit) return true; } } for (const auto [var_index, value, unused_stamp] : synced_forbid_tabu_list_) { if (get_value(var_index) != value) { - ++num_respected; + if (++num_respected >= tabu_limit) return true; } } - const int64_t tabu_limit = TabuLimit(); if (num_respected >= tabu_limit) return true; // Aspiration // TODO(user): Add proper support for lex-objectives with steps. diff --git a/ortools/constraint_solver/search_stats.proto b/ortools/constraint_solver/search_stats.proto index 82cea90645..5031a1d06f 100644 --- a/ortools/constraint_solver/search_stats.proto +++ b/ortools/constraint_solver/search_stats.proto @@ -89,10 +89,22 @@ message ConstraintSolverStatistics { double duration_seconds = 5; } +// Statistics on sub-solvers. +message SubSolverStatistics { + // Number of calls to Glop in LP scheduling. + int64 num_glop_calls_in_lp_scheduling = 1; + // Number of calls to CP-SAT in LP scheduling. + int64 num_cp_sat_calls_in_lp_scheduling = 2; + // Number of calls to min cost flow. + int64 num_min_cost_flow_calls = 3; +} + // Search statistics. message SearchStatistics { // Local search statistics for each solver context. repeated LocalSearchStatistics local_search_statistics = 1; // Constraint solver statistics. repeated ConstraintSolverStatistics constraint_solver_statistics = 2; + // Sub-solver statistics. + repeated SubSolverStatistics sub_solver_statistics = 3; } From 7b67f855e68c9918d3ca77c1a926c491001cc368 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 17:55:44 +0200 Subject: [PATCH 094/509] cleanup and formatting --- ortools/base/dump_vars.h | 3 ++- ortools/base/dump_vars_test.cc | 3 ++- ortools/linear_solver/model_exporter.cc | 2 +- ortools/linear_solver/xpress_interface.cc | 5 +++-- ortools/linear_solver/xpress_interface_test.cc | 2 +- ortools/math_opt/io/proto_converter.cc | 2 +- 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h index 61e6073084..b2814c2e53 100644 --- a/ortools/base/dump_vars.h +++ b/ortools/base/dump_vars.h @@ -142,7 +142,8 @@ std::ostream& operator<<(std::ostream& os, const ::std::optional& opt) { // needed by graph tests template -std::ostream& operator<<(std::ostream& os, const ::util_intops::StrongVector& vec) { +std::ostream& operator<<(std::ostream& os, + const ::util_intops::StrongVector& vec) { for (U it : vec) { os << ::std::to_string(it) << ','; } diff --git a/ortools/base/dump_vars_test.cc b/ortools/base/dump_vars_test.cc index 2dccc6381d..81b4e5ae8d 100644 --- a/ortools/base/dump_vars_test.cc +++ b/ortools/base/dump_vars_test.cc @@ -137,7 +137,8 @@ TEST(DumpVars, StrongInt) { } TEST(DumpVars, StrongVector) { - ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = {49.3, 3.14}; + ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = { + 49.3, 3.14}; EXPECT_EQ(R"(vec = 49.299999,3.140000,)", ToString(DUMP_VARS(vec))); EXPECT_EQ(R"(vec = 49.299999,3.140000,)", DUMP_VARS(vec).str()); } diff --git a/ortools/linear_solver/model_exporter.cc b/ortools/linear_solver/model_exporter.cc index 51a3727677..3554e1961d 100644 --- a/ortools/linear_solver/model_exporter.cc +++ b/ortools/linear_solver/model_exporter.cc @@ -57,7 +57,7 @@ class LineBreaker { // Returns true if string s will fit on the current line without adding a // carriage return. - bool WillFit(const std::string& s) { + bool WillFit(absl::string_view s) { return line_size_ + static_cast(s.size()) < max_line_size_; } diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 0f88bc861c..89f6d4653f 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include #include "absl/strings/numbers.h" #include "absl/strings/str_format.h" @@ -1094,7 +1094,8 @@ void XpressInterface::SetCoefficient(MPConstraint* const constraint, double new_value, double) { InvalidateSolutionSynchronization(); - fixedOrderCoefficientsPerConstraint[constraint->index()][variable->index()] = new_value; + fixedOrderCoefficientsPerConstraint[constraint->index()][variable->index()] = + new_value; // Changing a single coefficient in the matrix is potentially pretty // slow since that coefficient has to be found in the sparse matrix diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index 16925a1e36..35c0cfcaf0 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -159,7 +159,7 @@ class XPRSGetter { std::string value(280, '\0'); int valueSize; EXPECT_STATUS(XPRSgetstringattrib(prob(), attrib, &value[0], value.size(), - &valueSize)); + &valueSize)); value.resize(valueSize - 1); return value; } diff --git a/ortools/math_opt/io/proto_converter.cc b/ortools/math_opt/io/proto_converter.cc index 96ffaf24d0..8dd21764c3 100644 --- a/ortools/math_opt/io/proto_converter.cc +++ b/ortools/math_opt/io/proto_converter.cc @@ -317,7 +317,7 @@ MPModelProtoToMathOptModel(const ::operations_research::MPModelProto& model) { for (const MPGeneralConstraintProto& general_constraint : model.general_constraint()) { - const std::string& in_name = general_constraint.name(); + absl::string_view in_name = general_constraint.name(); switch (general_constraint.general_constraint_case()) { case MPGeneralConstraintProto::kQuadraticConstraint: { (*output.mutable_quadratic_constraints()) From 3467396a7450affcdcb733545b0e9614e587e8e9 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 17 Jun 2025 11:08:04 +0200 Subject: [PATCH 095/509] dependencies: bump Protobuf from v31.0 to v31.1 --- Dependencies.txt | 2 +- MODULE.bazel | 2 +- bazel/notebook_requirements.in | 4 ++-- bazel/notebook_requirements.txt | 2 +- bazel/ortools_requirements.in | 4 ++-- bazel/ortools_requirements.txt | 2 +- cmake/dependencies/CMakeLists.txt | 4 ++-- cmake/host.CMakeLists.txt | 4 ++-- ortools/dotnet/Google.OrTools-full.csproj.in | 2 +- ortools/dotnet/Google.OrTools-local.csproj.in | 2 +- ortools/java/pom-full.xml.in | 2 +- ortools/java/pom-local.xml.in | 2 +- ortools/python/setup.py.in | 2 +- patches/BUILD.bazel | 2 +- patches/fuzztest-2025-02-14.patch | 2 +- patches/{protobuf-v31.0.patch => protobuf-v31.1.patch} | 0 16 files changed, 19 insertions(+), 19 deletions(-) rename patches/{protobuf-v31.0.patch => protobuf-v31.1.patch} (100%) diff --git a/Dependencies.txt b/Dependencies.txt index f0813f2c1d..5d072a8acb 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -1,6 +1,6 @@ ZLIB=1.3.1 abseil-cpp=20250512.0 -Protobuf=v31.0 +Protobuf=v31.1 Eigen=3.4.0 Re2=2024-07-02 CoinUtils=2.11.12 diff --git a/MODULE.bazel b/MODULE.bazel index 05c94ada19..08fe2aeacf 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -26,7 +26,7 @@ bazel_dep(name = "google_benchmark", version = "1.9.2") bazel_dep(name = "googletest", version = "1.17.0") bazel_dep(name = "highs", version = "1.11.0") bazel_dep(name = "platforms", version = "0.0.11") -bazel_dep(name = "protobuf", version = "31.0") +bazel_dep(name = "protobuf", version = "31.1") bazel_dep(name = "pybind11_abseil", version = "202402.0") bazel_dep(name = "pybind11_bazel", version = "2.13.6") bazel_dep(name = "pybind11_protobuf", version = "0.0.0-20240524-1d7a729") diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index d7c30c0201..c2d02e6fcb 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -2,8 +2,8 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.31.0 -requests==2.32.3 +protobuf==6.31.1 +requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index b8a7a5aa7a..b656e14e68 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -215,7 +215,7 @@ prometheus-client==0.22.1 # via jupyter-server prompt-toolkit==3.0.51 # via ipython -protobuf==6.31.0 +protobuf==6.31.1 # via # -r bazel/notebook_requirements.in # mypy-protobuf diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index 0b4c89ab40..e893a8b629 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -2,8 +2,8 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.31.0 -requests==2.32.3 +protobuf==6.31.1 +requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 820668e036..10c1d8b277 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -45,7 +45,7 @@ platformdirs==3.10.0 # via # black # virtualenv -protobuf==6.31.0 +protobuf==6.31.1 # via # -r bazel/ortools_requirements.in # mypy-protobuf diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 926e51c047..80fdbb1b37 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -136,11 +136,11 @@ if(BUILD_Protobuf) FetchContent_Declare( Protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v31.0" + GIT_TAG "v31.1" GIT_SHALLOW TRUE GIT_SUBMODULES "" PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v31.0.patch" + "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v31.1.patch" ) FetchContent_MakeAvailable(Protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/cmake/host.CMakeLists.txt b/cmake/host.CMakeLists.txt index 970c4e0e40..6b63f17257 100644 --- a/cmake/host.CMakeLists.txt +++ b/cmake/host.CMakeLists.txt @@ -125,11 +125,11 @@ set(protobuf_WITH_ZLIB OFF) FetchContent_Declare( protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v31.0" + GIT_TAG "v31.1" GIT_SHALLOW TRUE GIT_SUBMODULES "" PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v31.0.patch" + "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v31.1.patch" ) FetchContent_MakeAvailable(protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/ortools/dotnet/Google.OrTools-full.csproj.in b/ortools/dotnet/Google.OrTools-full.csproj.in index b4619f4627..c20ce39b77 100644 --- a/ortools/dotnet/Google.OrTools-full.csproj.in +++ b/ortools/dotnet/Google.OrTools-full.csproj.in @@ -193,7 +193,7 @@ - + diff --git a/ortools/dotnet/Google.OrTools-local.csproj.in b/ortools/dotnet/Google.OrTools-local.csproj.in index 02b9da4443..a925e3e724 100644 --- a/ortools/dotnet/Google.OrTools-local.csproj.in +++ b/ortools/dotnet/Google.OrTools-local.csproj.in @@ -181,7 +181,7 @@ - + diff --git a/ortools/java/pom-full.xml.in b/ortools/java/pom-full.xml.in index ffde245eac..791f7b6f3f 100644 --- a/ortools/java/pom-full.xml.in +++ b/ortools/java/pom-full.xml.in @@ -109,7 +109,7 @@ com.google.protobuf protobuf-java - 4.31.0 + 4.31.1 diff --git a/ortools/java/pom-local.xml.in b/ortools/java/pom-local.xml.in index d03b19413b..64b2c51221 100644 --- a/ortools/java/pom-local.xml.in +++ b/ortools/java/pom-local.xml.in @@ -81,7 +81,7 @@ com.google.protobuf protobuf-java - 4.31.0 + 4.31.1 diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index b38b5fd708..5d67c5fa12 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -46,7 +46,7 @@ setup( 'absl-py >= 2.0.0', 'numpy >= 1.13.3', 'pandas >= 2.0.0', - 'protobuf >= 6.31.0,<6.32', + 'protobuf >= 6.31.1,<6.32', 'typing-extensions >= 4.12', 'immutabledict >= 3.0.0', ], diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 28b25b4abe..f73a6d5b4b 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -13,7 +13,7 @@ exports_files([ "abseil-cpp-20250512.0.patch", - "protobuf-v31.0.patch", + "protobuf-v31.1.patch", "pybind11_bazel.patch", "pybind11_abseil.patch", "pybind11_protobuf.patch", diff --git a/patches/fuzztest-2025-02-14.patch b/patches/fuzztest-2025-02-14.patch index 053736fbb7..d288eb5418 100644 --- a/patches/fuzztest-2025-02-14.patch +++ b/patches/fuzztest-2025-02-14.patch @@ -36,7 +36,7 @@ index 1f4f08d..cc4d0ba 100644 set(proto_URL https://github.com/protocolbuffers/protobuf.git) -set(proto_TAG v28.2) -+set(proto_TAG v31.0) ++set(proto_TAG v31.1) set(nlohmann_json_URL https://github.com/nlohmann/json.git) set(nlohmann_json_TAG v3.11.2) diff --git a/patches/protobuf-v31.0.patch b/patches/protobuf-v31.1.patch similarity index 100% rename from patches/protobuf-v31.0.patch rename to patches/protobuf-v31.1.patch From ac0fe6254412b3fdf6ed3e03a4b817d256daf721 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 12:47:23 +0200 Subject: [PATCH 096/509] gscip cleanup; linear_solver samples reident --- ortools/gscip/BUILD.bazel | 14 -------------- ortools/gscip/gscip.cc | 13 +++---------- ortools/gscip/gscip.h | 7 ++----- ortools/linear_solver/BUILD.bazel | 2 +- ortools/linear_solver/glpk_interface.cc | 4 ---- ortools/linear_solver/proto_solver/BUILD.bazel | 15 ++++++++++++++- .../proto_solver/scip_params.cc} | 2 +- .../proto_solver/scip_params.h} | 6 +++--- .../proto_solver/scip_proto_solver.cc | 2 +- ortools/linear_solver/python/linear_solver.i | 2 +- .../samples/assignment_groups_mip.py | 1 + ortools/linear_solver/samples/assignment_mb.py | 1 + ortools/linear_solver/samples/assignment_mip.py | 1 + .../samples/assignment_task_sizes_mip.py | 1 + .../linear_solver/samples/assignment_teams_mip.py | 1 + ortools/linear_solver/samples/basic_example.py | 1 + ortools/linear_solver/samples/bin_packing_mb.py | 1 + ortools/linear_solver/samples/bin_packing_mip.py | 2 ++ ortools/linear_solver/scip_interface.cc | 2 +- ortools/linear_solver/solve.cc | 2 +- 20 files changed, 37 insertions(+), 43 deletions(-) rename ortools/{gscip/legacy_scip_params.cc => linear_solver/proto_solver/scip_params.cc} (98%) rename ortools/{gscip/legacy_scip_params.h => linear_solver/proto_solver/scip_params.h} (83%) diff --git a/ortools/gscip/BUILD.bazel b/ortools/gscip/BUILD.bazel index 1c7e016f10..00bbd080db 100644 --- a/ortools/gscip/BUILD.bazel +++ b/ortools/gscip/BUILD.bazel @@ -49,19 +49,6 @@ cc_library( ], ) -cc_library( - name = "legacy_scip_params", - srcs = ["legacy_scip_params.cc"], - hdrs = ["legacy_scip_params.h"], - deps = [ - "//ortools/linear_solver:scip_helper_macros", - "@abseil-cpp//absl/status", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/strings:str_format", - "@scip", - ], -) - cc_library( name = "gscip", srcs = [ @@ -76,7 +63,6 @@ cc_library( ":gscip_cc_proto", ":gscip_message_handler", ":gscip_parameters", - ":legacy_scip_params", "//ortools/base", "//ortools/base:status_builder", "//ortools/base:status_macros", diff --git a/ortools/gscip/gscip.cc b/ortools/gscip/gscip.cc index bba9b3fc8a..58eb771694 100644 --- a/ortools/gscip/gscip.cc +++ b/ortools/gscip/gscip.cc @@ -40,7 +40,6 @@ #include "ortools/gscip/gscip.pb.h" #include "ortools/gscip/gscip_event_handler.h" #include "ortools/gscip/gscip_parameters.h" -#include "ortools/gscip/legacy_scip_params.h" #include "ortools/linear_solver/scip_helper_macros.h" #include "ortools/port/proto_utils.h" #include "ortools/util/status_macros.h" @@ -294,8 +293,7 @@ const GScipConstraintOptions& DefaultGScipConstraintOptions() { return constraint_options; } -absl::Status GScip::SetParams(const GScipParameters& params, - absl::string_view legacy_params) { +absl::Status GScip::SetParams(const GScipParameters& params) { if (params.has_silence_output()) { SCIPsetMessagehdlrQuiet(scip_, params.silence_output()); } @@ -350,10 +348,6 @@ absl::Status GScip::SetParams(const GScipParameters& params, RETURN_IF_SCIP_ERROR( SCIPsetRealParam(scip_, real_param.first.c_str(), real_param.second)); } - if (!legacy_params.empty()) { - RETURN_IF_ERROR( - LegacyScipSetSolverSpecificParameters(legacy_params, scip_)); - } return absl::OkStatus(); } @@ -929,8 +923,7 @@ absl::StatusOr GScip::SuggestHint( } absl::StatusOr GScip::Solve( - const GScipParameters& params, absl::string_view legacy_params, - const GScipMessageHandler message_handler, + const GScipParameters& params, const GScipMessageHandler message_handler, const Interrupter* const interrupter) { if (InErrorState()) { return absl::InvalidArgumentError( @@ -950,7 +943,7 @@ absl::StatusOr GScip::Solve( GScipResult result; // Step 1: apply parameters. - const absl::Status param_status = SetParams(params, legacy_params); + const absl::Status param_status = SetParams(params); if (!param_status.ok()) { result.gscip_output.set_status(GScipOutput::INVALID_SOLVER_PARAMETERS); // Conversion to std::string for open source build. diff --git a/ortools/gscip/gscip.h b/ortools/gscip/gscip.h index 722fc45dff..e6f9aa08ca 100644 --- a/ortools/gscip/gscip.h +++ b/ortools/gscip/gscip.h @@ -178,8 +178,7 @@ class GScip { static std::string ScipVersion(); // After Solve() the parameters are reset and SCIP stage is restored to - // PROBLEM. "legacy_params" are in the format of legacy_scip_params.h and are - // applied after "params". Use of "legacy_params" is discouraged. + // PROBLEM. // // The returned StatusOr will contain an error only if an: // * An underlying function from SCIP fails. @@ -192,7 +191,6 @@ class GScip { // returns. absl::StatusOr Solve( const GScipParameters& params = GScipParameters(), - absl::string_view legacy_params = "", GScipMessageHandler message_handler = nullptr, const Interrupter* interrupter = nullptr); @@ -480,8 +478,7 @@ class GScip { // Releases SCIP memory. absl::Status CleanUp(); - absl::Status SetParams(const GScipParameters& params, - absl::string_view legacy_params); + absl::Status SetParams(const GScipParameters& params); absl::Status FreeTransform(); // Returns an error if |d| >= ScipInf(). diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index 4a7ed5e0ad..d574cc3343 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -311,7 +311,7 @@ cc_library( "//conditions:default": [], }) + select({ ":use_scip": [ - "//ortools/gscip:legacy_scip_params", + "//ortools/linear_solver/proto_solver:scip_params", "//ortools/linear_solver/proto_solver:scip_proto_solver", "@scip", ], diff --git a/ortools/linear_solver/glpk_interface.cc b/ortools/linear_solver/glpk_interface.cc index aa76d17397..95549b7b45 100644 --- a/ortools/linear_solver/glpk_interface.cc +++ b/ortools/linear_solver/glpk_interface.cc @@ -15,7 +15,6 @@ #include #include -#include #include #include #include @@ -24,10 +23,7 @@ #include #include "absl/base/attributes.h" -#include "absl/memory/memory.h" #include "absl/strings/str_format.h" -#include "ortools/base/commandlineflags.h" -#include "ortools/base/hash.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" #include "ortools/glpk/glpk_env_deleter.h" diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 8b01c79af5..1e8952014d 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -110,6 +110,19 @@ cc_library( ], ) +cc_library( + name = "scip_params", + srcs = ["scip_params.cc"], + hdrs = ["scip_params.h"], + deps = [ + "//ortools/linear_solver:scip_helper_macros", + "@abseil-cpp//absl/status", + "@abseil-cpp//absl/strings", + "@abseil-cpp//absl/strings:str_format", + "@scip", + ], +) + cc_library( name = "scip_proto_solver", srcs = ["scip_proto_solver.cc"], @@ -121,10 +134,10 @@ cc_library( deps = [ "//ortools/base", "//ortools/base:timer", - "//ortools/gscip:legacy_scip_params", "//ortools/linear_solver:linear_solver_cc_proto", "//ortools/linear_solver:model_validator", "//ortools/linear_solver:scip_helper_macros", + "//ortools/linear_solver/proto_solver:scip_params", "//ortools/util:lazy_mutable_copy", "@abseil-cpp//absl/cleanup", "@abseil-cpp//absl/container:btree", diff --git a/ortools/gscip/legacy_scip_params.cc b/ortools/linear_solver/proto_solver/scip_params.cc similarity index 98% rename from ortools/gscip/legacy_scip_params.cc rename to ortools/linear_solver/proto_solver/scip_params.cc index 13f015a39b..282b5d303a 100644 --- a/ortools/gscip/legacy_scip_params.cc +++ b/ortools/linear_solver/proto_solver/scip_params.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/gscip/legacy_scip_params.h" +#include "ortools/linear_solver/proto_solver/scip_params.h" #include #include diff --git a/ortools/gscip/legacy_scip_params.h b/ortools/linear_solver/proto_solver/scip_params.h similarity index 83% rename from ortools/gscip/legacy_scip_params.h rename to ortools/linear_solver/proto_solver/scip_params.h index d50d5072b2..fecd699c08 100644 --- a/ortools/gscip/legacy_scip_params.h +++ b/ortools/linear_solver/proto_solver/scip_params.h @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef OR_TOOLS_GSCIP_LEGACY_SCIP_PARAMS_H_ -#define OR_TOOLS_GSCIP_LEGACY_SCIP_PARAMS_H_ +#ifndef OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_SCIP_PARAMS_H_ +#define OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_SCIP_PARAMS_H_ #include @@ -27,4 +27,4 @@ absl::Status LegacyScipSetSolverSpecificParameters(absl::string_view parameters, SCIP* scip); } -#endif // OR_TOOLS_GSCIP_LEGACY_SCIP_PARAMS_H_ +#endif // OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_SCIP_PARAMS_H_ diff --git a/ortools/linear_solver/proto_solver/scip_proto_solver.cc b/ortools/linear_solver/proto_solver/scip_proto_solver.cc index 3829b73204..8e36df5dfb 100644 --- a/ortools/linear_solver/proto_solver/scip_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/scip_proto_solver.cc @@ -41,9 +41,9 @@ #include "absl/time/time.h" #include "ortools/base/status_macros.h" #include "ortools/base/timer.h" -#include "ortools/gscip/legacy_scip_params.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_validator.h" +#include "ortools/linear_solver/proto_solver/scip_params.h" #include "ortools/linear_solver/scip_helper_macros.h" #include "ortools/util/lazy_mutable_copy.h" #include "scip/cons_and.h" diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.i index 087622250b..2309ea3e4c 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.i @@ -24,7 +24,7 @@ // solver.Maximize(10 * x1 + 6 * x2) // // USAGE EXAMPLES: -// - ortools/python/linear_programming.py +// - examples/python/linear_programming.py // - ./pywraplp_test.py // // TODO(user): test all the APIs that are currently marked as 'untested'. diff --git a/ortools/linear_solver/samples/assignment_groups_mip.py b/ortools/linear_solver/samples/assignment_groups_mip.py index 62e18572f6..e37318ca22 100644 --- a/ortools/linear_solver/samples/assignment_groups_mip.py +++ b/ortools/linear_solver/samples/assignment_groups_mip.py @@ -16,6 +16,7 @@ """Solve assignment problem for given group of workers.""" # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/assignment_mb.py b/ortools/linear_solver/samples/assignment_mb.py index d366b204bd..f15b455738 100644 --- a/ortools/linear_solver/samples/assignment_mb.py +++ b/ortools/linear_solver/samples/assignment_mb.py @@ -20,6 +20,7 @@ import io import pandas as pd from ortools.linear_solver.python import model_builder + # [END import] diff --git a/ortools/linear_solver/samples/assignment_mip.py b/ortools/linear_solver/samples/assignment_mip.py index 7f8a315430..b6c830314b 100644 --- a/ortools/linear_solver/samples/assignment_mip.py +++ b/ortools/linear_solver/samples/assignment_mip.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/assignment_task_sizes_mip.py b/ortools/linear_solver/samples/assignment_task_sizes_mip.py index da15fd5acf..8e9b4bfb9e 100644 --- a/ortools/linear_solver/samples/assignment_task_sizes_mip.py +++ b/ortools/linear_solver/samples/assignment_task_sizes_mip.py @@ -16,6 +16,7 @@ """MIP example that solves an assignment problem.""" # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/assignment_teams_mip.py b/ortools/linear_solver/samples/assignment_teams_mip.py index 3e27b2e9e0..bdd4f9c9a1 100644 --- a/ortools/linear_solver/samples/assignment_teams_mip.py +++ b/ortools/linear_solver/samples/assignment_teams_mip.py @@ -16,6 +16,7 @@ """MIP example that solves an assignment problem.""" # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/basic_example.py b/ortools/linear_solver/samples/basic_example.py index bdf5b570ae..baaf5ae5ad 100644 --- a/ortools/linear_solver/samples/basic_example.py +++ b/ortools/linear_solver/samples/basic_example.py @@ -17,6 +17,7 @@ # [START import] from ortools.init.python import init from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/bin_packing_mb.py b/ortools/linear_solver/samples/bin_packing_mb.py index 1882771a95..3d7dcaa908 100644 --- a/ortools/linear_solver/samples/bin_packing_mb.py +++ b/ortools/linear_solver/samples/bin_packing_mb.py @@ -20,6 +20,7 @@ import io import pandas as pd from ortools.linear_solver.python import model_builder + # [END import] diff --git a/ortools/linear_solver/samples/bin_packing_mip.py b/ortools/linear_solver/samples/bin_packing_mip.py index 4f49aabcaf..977759c38b 100755 --- a/ortools/linear_solver/samples/bin_packing_mip.py +++ b/ortools/linear_solver/samples/bin_packing_mip.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] @@ -31,6 +32,7 @@ def create_data_model(): data["bin_capacity"] = 100 return data + # [END data_model] diff --git a/ortools/linear_solver/scip_interface.cc b/ortools/linear_solver/scip_interface.cc index cd3b5390d6..582a4409ec 100644 --- a/ortools/linear_solver/scip_interface.cc +++ b/ortools/linear_solver/scip_interface.cc @@ -35,11 +35,11 @@ #include "ortools/base/logging.h" #include "ortools/base/status_macros.h" #include "ortools/base/timer.h" -#include "ortools/gscip/legacy_scip_params.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/linear_solver_callback.h" #include "ortools/linear_solver/proto_solver/proto_utils.h" +#include "ortools/linear_solver/proto_solver/scip_params.h" #include "ortools/linear_solver/proto_solver/scip_proto_solver.h" #include "ortools/linear_solver/scip_callback.h" #include "ortools/linear_solver/scip_helper_macros.h" diff --git a/ortools/linear_solver/solve.cc b/ortools/linear_solver/solve.cc index 3d77bb37e9..52c2957529 100644 --- a/ortools/linear_solver/solve.cc +++ b/ortools/linear_solver/solve.cc @@ -32,7 +32,7 @@ // CP-SAT parameters: // // solve --solver=sat \ -// --params="max_time_in_seconds:600, num_search_workers:8" +// --params="max_time_in_seconds:600, num_workers:8" // --stderrthreshold=0 \ // --input=/tmp/foo.mps \ // 2>/tmp/foo.err From 20f14fa745c22effe927541a200299178e0a043a Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 12:47:41 +0200 Subject: [PATCH 097/509] misc doc change --- ortools/constraint_solver/docs/CP.md | 1 - 1 file changed, 1 deletion(-) diff --git a/ortools/constraint_solver/docs/CP.md b/ortools/constraint_solver/docs/CP.md index 395409180a..9909a28221 100644 --- a/ortools/constraint_solver/docs/CP.md +++ b/ortools/constraint_solver/docs/CP.md @@ -74,7 +74,6 @@ int main(int argc, char* argv[]) { ### Python code samples ```python -#!/usr/bin/env python3 # Snippet from ortools/constraint_solver/samples/simple_cp_program.py """Simple Constraint optimization example.""" From 1c81bd833c832e6051aea61b2a7e3359d4126037 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 12:47:56 +0200 Subject: [PATCH 098/509] more gscip cleaning --- ortools/math_opt/solvers/cp_sat_solver.cc | 2 +- .../math_opt/solvers/cp_sat_solver_test.cc | 2 +- ortools/math_opt/solvers/glpk/rays.cc | 1 - ortools/math_opt/solvers/gscip_solver.cc | 3 +- ortools/math_opt/solvers/highs_solver.cc | 45 ++++++++++--------- 5 files changed, 28 insertions(+), 25 deletions(-) diff --git a/ortools/math_opt/solvers/cp_sat_solver.cc b/ortools/math_opt/solvers/cp_sat_solver.cc index df36fea70e..436a89661d 100644 --- a/ortools/math_opt/solvers/cp_sat_solver.cc +++ b/ortools/math_opt/solvers/cp_sat_solver.cc @@ -137,7 +137,7 @@ std::vector SetSolveParameters( sat_parameters.set_random_seed(parameters.random_seed()); } if (parameters.has_threads()) { - sat_parameters.set_num_search_workers(parameters.threads()); + sat_parameters.set_num_workers(parameters.threads()); } if (parameters.has_relative_gap_tolerance()) { sat_parameters.set_relative_gap_limit(parameters.relative_gap_tolerance()); diff --git a/ortools/math_opt/solvers/cp_sat_solver_test.cc b/ortools/math_opt/solvers/cp_sat_solver_test.cc index e1372726c3..fb91ec639a 100644 --- a/ortools/math_opt/solvers/cp_sat_solver_test.cc +++ b/ortools/math_opt/solvers/cp_sat_solver_test.cc @@ -225,7 +225,7 @@ SolutionHintTestParams MakeCpsatSolutionHintParams() { solve_params.cuts = Emphasis::kOff; solve_params.presolve = Emphasis::kOff; solve_params.cp_sat.set_stop_after_first_solution(true); - solve_params.cp_sat.set_num_search_workers(1); + solve_params.cp_sat.set_num_workers(1); // Matches "best:", "next:" and "hint" appearing in the same line std::string hint_message_regex = "best:.*next:.*hint"; return SolutionHintTestParams(SolverType::kCpSat, solve_params, std::nullopt, diff --git a/ortools/math_opt/solvers/glpk/rays.cc b/ortools/math_opt/solvers/glpk/rays.cc index 281345b729..9907907d9f 100644 --- a/ortools/math_opt/solvers/glpk/rays.cc +++ b/ortools/math_opt/solvers/glpk/rays.cc @@ -14,7 +14,6 @@ #include "ortools/math_opt/solvers/glpk/rays.h" #include -#include #include #include diff --git a/ortools/math_opt/solvers/gscip_solver.cc b/ortools/math_opt/solvers/gscip_solver.cc index 03503fdb21..8b1317374a 100644 --- a/ortools/math_opt/solvers/gscip_solver.cc +++ b/ortools/math_opt/solvers/gscip_solver.cc @@ -1093,8 +1093,7 @@ absl::StatusOr GScipSolver::Solve( ASSIGN_OR_RETURN( GScipResult gscip_result, - gscip_->Solve(gscip_parameters, - /*legacy_params=*/"", std::move(gscip_msg_cb), + gscip_->Solve(gscip_parameters, std::move(gscip_msg_cb), use_interrupter ? &gscip_interrupter : nullptr)); // Flush the potential last unfinished line. diff --git a/ortools/math_opt/solvers/highs_solver.cc b/ortools/math_opt/solvers/highs_solver.cc index b3c664c97c..25f66f1e7f 100644 --- a/ortools/math_opt/solvers/highs_solver.cc +++ b/ortools/math_opt/solvers/highs_solver.cc @@ -544,30 +544,35 @@ absl::StatusOr HighsSolver::MakeTermination( optional_finite_primal_objective, optional_dual_objective); case HighsModelStatus::kIterationLimit: { - if (is_integer) { - if (had_node_limit && had_solution_limit) { - return LimitTerminationProto( - is_maximize, LIMIT_UNDETERMINED, optional_finite_primal_objective, - optional_dual_objective, - "Both node limit and solution limit were requested, cannot " - "determine reason for termination"); - } else if (had_node_limit) { - return LimitTerminationProto(is_maximize, LIMIT_NODE, - optional_finite_primal_objective, - optional_dual_objective); - } else if (had_solution_limit) { - return LimitTerminationProto(is_maximize, LIMIT_SOLUTION, - optional_finite_primal_objective, - optional_dual_objective); - } - } else { - // For LP, only the MathOpt iteration limit can cause highs to return - // HighsModelStatus::kIterationLimit. - return LimitTerminationProto(is_maximize, LIMIT_ITERATION, + return LimitTerminationProto(is_maximize, LIMIT_ITERATION, + optional_finite_primal_objective, + optional_dual_objective); + } + case HighsModelStatus::kSolutionLimit: { + if (had_node_limit && !had_solution_limit) { + return LimitTerminationProto(is_maximize, LIMIT_NODE, optional_finite_primal_objective, optional_dual_objective); + } else if (had_solution_limit && !had_node_limit) { + return LimitTerminationProto(is_maximize, LIMIT_SOLUTION, + optional_finite_primal_objective, + optional_dual_objective); + } else { + return LimitTerminationProto( + is_maximize, LIMIT_UNDETERMINED, optional_finite_primal_objective, + optional_dual_objective, + "HighsModelStatus was kSolutionLimit but cannot infer a MathOpt " + "Limit, could be NODE_LIMIT or SOLUTION_LIMIT"); } } + case HighsModelStatus::kInterrupt: + return LimitTerminationProto(is_maximize, LIMIT_INTERRUPTED, + optional_finite_primal_objective, + optional_dual_objective); + case HighsModelStatus::kMemoryLimit: + return LimitTerminationProto( + is_maximize, LIMIT_OTHER, optional_finite_primal_objective, + optional_dual_objective, "Highs hit kMemoryLimit"); } return util::InternalErrorBuilder() << "HighsModelStatus unimplemented: " << static_cast(highs_model_status); From e3d4f349fef40c4d31183741420c5a3acc110140 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 12:48:24 +0200 Subject: [PATCH 099/509] [CP-SAT] more work on precedences; and no_overlap_2d --- ortools/sat/2d_distances_propagator.cc | 63 +++---- ortools/sat/2d_distances_propagator.h | 4 +- ortools/sat/BUILD.bazel | 4 +- ortools/sat/cp_model_presolve.cc | 1 + ortools/sat/cp_model_search.cc | 1 - ortools/sat/diffn.cc | 27 +-- ortools/sat/disjunctive.cc | 4 + ortools/sat/integer_base.cc | 11 ++ ortools/sat/integer_base.h | 12 ++ ortools/sat/precedences.cc | 223 ++++++++++++------------- ortools/sat/precedences.h | 108 ++++++++---- ortools/sat/precedences_test.cc | 27 +-- ortools/sat/sat_decision.h | 12 +- ortools/sat/sat_decision_test.cc | 41 +++++ ortools/sat/scheduling_cuts.cc | 154 ++++++++--------- ortools/sat/scheduling_cuts.h | 31 ++++ ortools/sat/scheduling_cuts_test.cc | 31 +++- ortools/util/bitset.h | 3 + 18 files changed, 449 insertions(+), 308 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 7cf521400f..65e949c206 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -32,6 +32,7 @@ #include "ortools/sat/precedences.h" #include "ortools/sat/scheduling_helpers.h" #include "ortools/sat/synchronization.h" +#include "ortools/util/bitset.h" namespace operations_research { namespace sat { @@ -39,9 +40,8 @@ namespace sat { Precedences2DPropagator::Precedences2DPropagator( NoOverlap2DConstraintHelper* helper, Model* model) : helper_(*helper), - linear2_bounds_from_linear3_( - model->GetOrCreate()), linear2_bounds_(model->GetOrCreate()), + linear2_watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()) { model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); } @@ -55,6 +55,8 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { std::vector boxes[2][2]; }; absl::flat_hash_map var_to_box_and_coeffs; + SparseBitset& var_set = + *linear2_bounds_->GetTemporyClearedAndResizedBitset(); for (int dim = 0; dim < 2; ++dim) { const SchedulingConstraintHelper& dim_helper = @@ -63,26 +65,26 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { const absl::Span interval_points = j == 0 ? dim_helper.Starts() : dim_helper.Ends(); for (int i = 0; i < helper_.NumBoxes(); ++i) { - if (interval_points[i].var != kNoIntegerVariable) { - var_to_box_and_coeffs[PositiveVariable(interval_points[i].var)] - .boxes[dim][j] - .push_back(i); + const IntegerVariable var = interval_points[i].var; + if (var != kNoIntegerVariable) { + var_set.Set(PositiveVariable(var)); + var_to_box_and_coeffs[PositiveVariable(var)].boxes[dim][j].push_back( + i); } } } } + const absl::Span exprs = + linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds( + var_set.BitsetConstView()); VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " - << linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds() - .size(); - for (const LinearExpression2& expr : - linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds()) { + << exprs.size(); + for (const LinearExpression2& expr : exprs) { auto it1 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[0])); auto it2 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[1])); - if (it1 == var_to_box_and_coeffs.end() || - it2 == var_to_box_and_coeffs.end()) { - continue; - } + DCHECK(it1 != var_to_box_and_coeffs.end()); + DCHECK(it2 != var_to_box_and_coeffs.end()); const VarUsage& usage1 = it1->second; const VarUsage& usage2 = it2->second; @@ -92,15 +94,9 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { for (const int box1 : usage1.boxes[dim][0 /* start */]) { for (const int box2 : usage2.boxes[dim][1 /* end */]) { if (box1 == box2) continue; - const AffineExpression& start = dim_helper.Starts()[box1]; - const AffineExpression& end = dim_helper.Ends()[box2]; - LinearExpression2 expr2; - expr2.vars[0] = start.var; - expr2.vars[1] = end.var; - expr2.coeffs[0] = start.coeff; - expr2.coeffs[1] = -end.coeff; - expr2.SimpleCanonicalization(); - expr2.DivideByGcd(); + const auto [expr2, unused] = EncodeDifferenceLowerThan( + dim_helper.Starts()[box1], dim_helper.Ends()[box2], + /*ub=unused*/ 0); if (expr == expr2) { if (box1 < box2) { non_trivial_pairs_.push_back({box1, box2}); @@ -120,11 +116,9 @@ bool Precedences2DPropagator::Propagate() { if (!helper_.SynchronizeAndSetDirection()) return false; if (last_helper_inprocessing_count_ != helper_.InProcessingCount() || helper_.x_helper().CurrentDecisionLevel() == 0 || - last_num_expressions_ != - linear2_bounds_from_linear3_->NumExpressionsWithAffineBounds()) { + last_linear2_timestamp_ != linear2_watcher_->Timestamp()) { last_helper_inprocessing_count_ = helper_.InProcessingCount(); - last_num_expressions_ = - linear2_bounds_from_linear3_->NumExpressionsWithAffineBounds(); + last_linear2_timestamp_ = linear2_watcher_->Timestamp(); CollectPairsOfBoxesWithNonTrivialDistance(); } @@ -150,15 +144,9 @@ bool Precedences2DPropagator::Propagate() { if (j == 1) { std::swap(b1, b2); } - LinearExpression2 expr; - expr.vars[0] = helper->Starts()[b1].var; - expr.vars[1] = helper->Ends()[b2].var; - expr.coeffs[0] = helper->Starts()[b1].coeff; - expr.coeffs[1] = -helper->Ends()[b2].coeff; - const IntegerValue ub_of_start_minus_end_value = - linear2_bounds_->UpperBound(expr) + helper->Starts()[b1].constant - - helper->Ends()[b2].constant; - if (ub_of_start_minus_end_value >= 0) { + const auto [expr, ub_for_no_overlap] = EncodeDifferenceLowerThan( + helper->Starts()[b1], helper->Ends()[b2], 0); + if (linear2_bounds_->UpperBound(expr) >= ub_for_no_overlap) { is_unfeasible = false; break; } @@ -197,8 +185,7 @@ bool Precedences2DPropagator::Propagate() { int Precedences2DPropagator::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); helper_.WatchAllBoxes(id); - linear2_bounds_from_linear3_->WatchAllLinearExpressions2(id); - // TODO(user): Implement a Linear2Bounds watcher. + linear2_watcher_->WatchAllLinearExpressions2(id); return id; } diff --git a/ortools/sat/2d_distances_propagator.h b/ortools/sat/2d_distances_propagator.h index f2e46ca1d9..e8ca1066c9 100644 --- a/ortools/sat/2d_distances_propagator.h +++ b/ortools/sat/2d_distances_propagator.h @@ -48,12 +48,12 @@ class Precedences2DPropagator : public PropagatorInterface { std::vector> non_trivial_pairs_; NoOverlap2DConstraintHelper& helper_; - Linear2BoundsFromLinear3* linear2_bounds_from_linear3_; Linear2Bounds* linear2_bounds_; + Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; int last_helper_inprocessing_count_ = -1; - int last_num_expressions_ = -1; + int64_t last_linear2_timestamp_ = -1; int64_t num_conflicts_ = 0; int64_t num_calls_ = 0; diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index f76d43b09e..a3321cdee7 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -129,6 +129,7 @@ cc_library( ":scheduling_helpers", ":synchronization", "//ortools/base:stl_util", + "//ortools/util:bitset", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", @@ -1751,6 +1752,7 @@ cc_library( ":sat_base", "//ortools/base", "//ortools/base:strong_vector", + "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:sorted_interval_list", "//ortools/util:strong_integers", @@ -2290,6 +2292,7 @@ cc_library( ":integer", ":integer_base", ":intervals", + ":linear_propagation", ":model", ":precedences", ":sat_base", @@ -3575,7 +3578,6 @@ cc_library( ":synchronization", ":timetable", ":util", - "//ortools/base:stl_util", "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 589cba2d38..c17f6da241 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -8820,6 +8820,7 @@ void CpModelPresolver::ExpandObjective() { } void CpModelPresolver::MergeNoOverlapConstraints() { + PresolveTimer timer("MergeNoOverlap", logger_, time_limit_); if (context_->ModelIsUnsat()) return; if (time_limit_->LimitReached()) return; diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index e6b29ef50b..6d18fd1ea1 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -188,7 +188,6 @@ void AddExtraSchedulingPropagators(SatParameters& new_params) { new_params.set_use_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_area_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_try_edge_reasoning_in_no_overlap_2d(true); - new_params.set_no_overlap_2d_boolean_relations_limit(100); } // We want a random tie breaking among variables with equivalent values. diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index fde8ee6a46..078de5eb92 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -32,7 +32,6 @@ #include "absl/log/vlog_is_on.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" -#include "ortools/base/stl_util.h" #include "ortools/sat/2d_distances_propagator.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" @@ -277,11 +276,11 @@ void AddNonOverlappingRectangles(const std::vector& x, DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); for (int i = 0; i < num_boxes; ++i) { - if (repository->IsAbsent(x[i])) continue; - if (repository->IsAbsent(y[i])) continue; + if (repository->IsOptional(x[i])) continue; + if (repository->IsOptional(y[i])) continue; for (int j = i + 1; j < num_boxes; ++j) { - if (repository->IsAbsent(x[j])) continue; - if (repository->IsAbsent(y[j])) continue; + if (repository->IsOptional(x[j])) continue; + if (repository->IsOptional(y[j])) continue; // At most one of these two x options is true. const Literal x_ij = repository->GetOrCreatePrecedenceLiteral( @@ -307,22 +306,8 @@ void AddNonOverlappingRectangles(const std::vector& x, return; } - // At least one of the 4 options is true if all boxes are present. - std::vector clause = {x_ij, x_ji, y_ij, y_ji}; - if (repository->IsOptional(x[i])) { - clause.push_back(repository->PresenceLiteral(x[i]).Negated()); - } - if (repository->IsOptional(y[i])) { - clause.push_back(repository->PresenceLiteral(y[i]).Negated()); - } - if (repository->IsOptional(x[j])) { - clause.push_back(repository->PresenceLiteral(x[j]).Negated()); - } - if (repository->IsOptional(y[j])) { - clause.push_back(repository->PresenceLiteral(y[j]).Negated()); - } - gtl::STLSortAndRemoveDuplicates(&clause); - if (!sat_solver->AddProblemClause(clause)) { + // At least one of the 4 options is true. + if (!sat_solver->AddProblemClause({x_ij, x_ji, y_ij, y_ji})) { return; } } diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index f5bfad4950..c018335fce 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -25,6 +25,7 @@ #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/intervals.h" +#include "ortools/sat/linear_propagation.h" #include "ortools/sat/model.h" #include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" @@ -143,6 +144,9 @@ void AddDisjunctive(const std::vector& intervals, // using the fact that they are in disjunction. if (params.use_precedences_in_disjunctive_constraint() && !params.use_combined_no_overlap()) { + // Lets try to exploit linear3 too. + model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); + for (const bool time_direction : {true, false}) { DisjunctivePrecedences* precedences = new DisjunctivePrecedences(time_direction, helper, model); diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index f39463353f..740c02bd0d 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -21,6 +21,7 @@ #include #include "absl/log/check.h" +#include "ortools/util/bitset.h" namespace operations_research::sat { @@ -244,4 +245,14 @@ BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { return root_relations_sorted; } +void BestBinaryRelationBounds::AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const { + for (const auto& [expr, unused] : best_bounds_) { + if (!var_set[PositiveVariable(expr.vars[0])]) continue; + if (!var_set[PositiveVariable(expr.vars[1])]) continue; + result->push_back(expr); + } +} + } // namespace operations_research::sat diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index ad4331e5a5..9eb30219cc 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -95,6 +95,13 @@ inline IntegerValue FloorRatio(IntegerValue dividend, return result - adjust; } +// When the case positive_divisor == 1 is frequent, this is faster. +inline IntegerValue FloorRatioWithTest(IntegerValue dividend, + IntegerValue positive_divisor) { + if (positive_divisor == 1) return dividend; + return FloorRatio(dividend, positive_divisor); +} + // Overflows and saturated arithmetic. inline IntegerValue CapProdI(IntegerValue a, IntegerValue b) { @@ -497,6 +504,11 @@ class BestBinaryRelationBounds { std::vector> GetSortedNonTrivialBounds() const; + // Note that this is non-deterministic and in O(num_relations). + void AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const; + private: // The best bound on the given "canonicalized" expression. absl::flat_hash_map> diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index b82d97b8fb..33a3c29318 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -54,6 +54,27 @@ namespace operations_research { namespace sat { +void Linear2Watcher::NotifyBoundChanged(LinearExpression2 expr) { + DCHECK(expr.IsCanonicalized()); + DCHECK_EQ(expr.DivideByGcd(), 1); + ++timestamp_; + for (const int id : propagator_ids_) { + watcher_->CallOnNextPropagate(id); + } + for (IntegerVariable var : expr.non_zero_vars()) { + var = PositiveVariable(var); // TODO(user): Be more precise? + if (var >= var_timestamp_.size()) { + var_timestamp_.resize(var + 1, 0); + } + var_timestamp_[var]++; + } +} + +int64_t Linear2Watcher::VarTimestamp(IntegerVariable var) { + var = PositiveVariable(var); + return var < var_timestamp_.size() ? var_timestamp_[var] : 0; +} + std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) { @@ -80,6 +101,7 @@ std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, if (!lb_restricted && !ub_restricted) return {false, false}; ++num_updates_; + linear2_watcher_->NotifyBoundChanged(expr); // Update our special coeff=1 lookup table. if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { @@ -281,6 +303,12 @@ RootLevelLinear2Bounds::GetAllBoundsContainingVariables( return result; } +void RootLevelLinear2Bounds::AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const { + root_level_relations_.AppendAllExpressionContaining(var_set, result); +} + EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { if (!VLOG_IS_ON(1)) return; std::vector> stats; @@ -314,6 +342,7 @@ void EnforcedLinear2Bounds::PushConditionalRelation( if (rhs >= root_level_bounds_->LevelZeroUpperBound(expr)) return; + linear2_watcher_->NotifyBoundChanged(expr); ++num_conditional_relation_updates_; const int new_index = conditional_stack_.size(); @@ -707,14 +736,14 @@ void EnforcedLinear2Bounds::CollectPrecedences( } } -std::vector -EnforcedLinear2Bounds::GetAllExpressionsWithConditionalBounds() const { - std::vector result; - result.reserve(conditional_stack_.size()); +void EnforcedLinear2Bounds::AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const { for (const auto& entry : conditional_stack_) { - result.push_back(entry.key); + if (!var_set[PositiveVariable(entry.key.vars[0])]) continue; + if (!var_set[PositiveVariable(entry.key.vars[1])]) continue; + result->push_back(entry.key); } - return result; } namespace { @@ -1783,80 +1812,57 @@ LiteralIndex ReifiedLinear2Bounds::GetReifiedPrecedence(AffineExpression a, Linear2BoundsFromLinear3::Linear2BoundsFromLinear3(Model* model) : integer_trail_(model->GetOrCreate()), trail_(model->GetOrCreate()), + linear2_watcher_(model->GetOrCreate()), watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - best_root_level_bounds_(model->GetOrCreate()) {} + root_level_bounds_(model->GetOrCreate()) {} +// Note that for speed we do not compare to the trivial or root level bounds. +// +// It is okay to still store it in the hash-map, since at worst we will have no +// more entries than 3 * number_of_linear3_in_the_problem. bool Linear2BoundsFromLinear3::AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub) { - const IntegerValue new_ub = integer_trail_->UpperBound(affine_ub); expr.SimpleCanonicalization(); - // Not better than trivial upper bound. - if (integer_trail_->UpperBound(expr) <= new_ub) return false; - + // At level zero, just add it to root_level_bounds_. if (trail_->CurrentDecisionLevel() == 0) { - best_root_level_bounds_->Add( - expr, kMinIntegerValue, integer_trail_->LevelZeroUpperBound(affine_ub)); - NotifyWatchingPropagators(); - return false; + root_level_bounds_->AddUpperBound( + expr, integer_trail_->LevelZeroUpperBound(affine_ub)); + return false; // Not important. } - // Not better than the root level upper bound. - if (best_root_level_bounds_->LevelZeroUpperBound(expr) <= new_ub) { - return false; - } - - const IntegerValue gcd = expr.DivideByGcd(); - - const auto it = best_affine_ub_.find(expr); + // We have gcd * canonical_expr <= affine_ub, + // so we do need to store a "divisor". + const IntegerValue divisor = expr.DivideByGcd(); + auto it = best_affine_ub_.find(expr); if (it != best_affine_ub_.end()) { - const auto [old_affine_ub, old_gcd] = it->second; // We have an affine bound for this expr in the map. Can be exactly the // same, a better one or a worse one. - if (old_affine_ub == affine_ub && old_gcd == gcd) { - // The affine bound is already in the map. - NotifyWatchingPropagators(); // The affine bound was updated. + // + // Note that we expect exactly the same most of the time as it should be + // rare to have many linear3 "competing" for the same linear2 bound. + const auto [old_affine_ub, old_divisor] = it->second; + if (old_affine_ub == affine_ub && old_divisor == divisor) { + linear2_watcher_->NotifyBoundChanged(expr); return false; } - const IntegerValue old_ub = - FloorRatio(integer_trail_->UpperBound(old_affine_ub), old_gcd); + + const IntegerValue new_ub = + FloorRatioWithTest(integer_trail_->UpperBound(affine_ub), divisor); + const IntegerValue old_ub = FloorRatioWithTest( + integer_trail_->UpperBound(old_affine_ub), old_divisor); if (old_ub <= new_ub) return false; // old bound is better. - } - // We have gcd * canonical_expr <= affine_ub, so we do need to store a - // "divisor". - ++num_affine_updates_; - best_affine_ub_[expr] = {affine_ub, gcd}; - NotifyWatchingPropagators(); - return true; -} - -void Linear2BoundsFromLinear3::NotifyWatchingPropagators() const { - for (const int id : propagator_ids_) { - watcher_->CallOnNextPropagate(id); - } -} - -IntegerValue Linear2BoundsFromLinear3::UpperBound( - LinearExpression2 expr) const { - expr.SimpleCanonicalization(); - - const IntegerValue trivial_ub = integer_trail_->UpperBound(expr); - const IntegerValue root_level_ub = - best_root_level_bounds_->LevelZeroUpperBound(expr); - const IntegerValue best_ub = std::min(root_level_ub, trivial_ub); - - const IntegerValue gcd = expr.DivideByGcd(); - const auto it = best_affine_ub_.find(expr); - if (it == best_affine_ub_.end()) { - return best_ub; + it->second = {affine_ub, divisor}; // Overwrite. } else { - const auto [affine, divisor] = it->second; - const IntegerValue canonical_ub = - FloorRatio(integer_trail_->UpperBound(affine), divisor); - return std::min(best_ub, CapProdI(gcd, canonical_ub)); + // Note that this should almost never happen (only once per lin2). + best_affine_ub_[expr] = {affine_ub, divisor}; } + + ++num_affine_updates_; + linear2_watcher_->NotifyBoundChanged(expr); + return true; } IntegerValue Linear2BoundsFromLinear3::GetUpperBoundFromLinear3( @@ -1872,53 +1878,31 @@ IntegerValue Linear2BoundsFromLinear3::GetUpperBoundFromLinear3( } } -// TODO(user): If the trivial bound is better, its explanation is different... void Linear2BoundsFromLinear3::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* /*literal_reason*/, std::vector* integer_reason) const { - expr.SimpleCanonicalization(); + DCHECK(expr.IsCanonicalized()); + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK_LE(GetUpperBoundFromLinear3(expr), ub); - if (expr.coeffs[0] == 0 && expr.coeffs[1] == 0) return; // trivially zero - - // Starts by simple bounds. - if (best_root_level_bounds_->LevelZeroUpperBound(expr) <= ub) return; - - // Add explanation if it is a trivial bound. - const IntegerValue implied_ub = integer_trail_->UpperBound(expr); - if (implied_ub <= ub) { - const IntegerValue slack = ub - implied_ub; - expr.Negate(); // AppendRelaxedLinearReason() explains a lower bound. - absl::Span vars = expr.non_zero_vars(); - absl::Span coeffs = expr.non_zero_coeffs(); - integer_trail_->AppendRelaxedLinearReason(slack, coeffs, vars, - integer_reason); - return; - } - - // None of the bound above are enough, try the affine one. Note that gcd * - // expr <= ub, is the same as asking why expr <= FloorRatio(ub, gcd). - const IntegerValue gcd = expr.DivideByGcd(); const auto it = best_affine_ub_.find(expr); - if (it == best_affine_ub_.end()) return; + DCHECK(it != best_affine_ub_.end()); - // We want the reason for "expr <= ub", that is the reason for - // - "gcd * canonical_expr <= ub" - // - "canonical_expr <= FloorRatio(ub, gcd); - // - // knowing that canonical_expr <= affine_ub / divisor. + // We want the reason for "expr <= ub" + // knowing that expr <= affine / divisor. const auto [affine, divisor] = it->second; - integer_reason->push_back( - affine.LowerOrEqual(CapProdI(FloorRatio(ub, gcd) + 1, divisor) - 1)); + integer_reason->push_back(affine.LowerOrEqual(CapProdI(ub + 1, divisor) - 1)); } -std::vector -Linear2BoundsFromLinear3::GetAllExpressionsWithAffineBounds() const { - std::vector result; - for (const auto [expr, info] : best_affine_ub_) { - result.push_back(expr); +void Linear2BoundsFromLinear3::AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const { + for (const auto& [expr, unused] : best_affine_ub_) { + if (!var_set[PositiveVariable(expr.vars[0])]) continue; + if (!var_set[PositiveVariable(expr.vars[1])]) continue; + result->push_back(expr); } - return result; } IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { @@ -1958,28 +1942,43 @@ void Linear2Bounds::AddReasonForUpperBoundLowerThan( ub = FloorRatio(ub, gcd); DCHECK_LE(UpperBound(expr), ub); + // Explanation are by order of preference, with no reason needed first. if (root_level_bounds_->LevelZeroUpperBound(expr) <= ub) { return; } + + // This one is a single literal. if (enforced_bounds_->GetUpperBoundFromEnforced(expr) <= ub) { - enforced_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, literal_reason, - integer_reason); - } else { - linear3_bounds_->AddReasonForUpperBoundLowerThan(expr, ub, literal_reason, - integer_reason); + return enforced_bounds_->AddReasonForUpperBoundLowerThan( + expr, ub, literal_reason, integer_reason); } + + // This one is a single var upper bound. + if (linear3_bounds_->GetUpperBoundFromLinear3(expr) <= ub) { + return linear3_bounds_->AddReasonForUpperBoundLowerThan( + expr, ub, literal_reason, integer_reason); + } + + // Trivial linear2 bounds from its variables. + const IntegerValue implied_ub = integer_trail_->UpperBound(expr); + const IntegerValue slack = ub - implied_ub; + DCHECK_GE(slack, 0); + expr.Negate(); // AppendRelaxedLinearReason() explains a lower bound. + absl::Span vars = expr.non_zero_vars(); + absl::Span coeffs = expr.non_zero_coeffs(); + integer_trail_->AppendRelaxedLinearReason(slack, coeffs, vars, + integer_reason); } -std::vector -Linear2Bounds::GetAllExpressionsWithPotentialNonTrivialBounds() const { - std::vector result = - enforced_bounds_->GetAllExpressionsWithConditionalBounds(); - std::vector binary_relations_result = - linear3_bounds_->GetAllExpressionsWithAffineBounds(); - result.insert(result.end(), binary_relations_result.begin(), - binary_relations_result.end()); - gtl::STLSortAndRemoveDuplicates(&result); - return result; +absl::Span +Linear2Bounds::GetAllExpressionsWithPotentialNonTrivialBounds( + Bitset64::ConstView var_set) const { + tmp_expressions_.clear(); + root_level_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); + enforced_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); + linear3_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); + gtl::STLSortAndRemoveDuplicates(&tmp_expressions_); + return tmp_expressions_; } } // namespace sat diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 6b75d0b9d9..57fd147999 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -45,12 +45,40 @@ namespace operations_research { namespace sat { +// Simple "watcher" class that will be notified if a linear2 bound changed. It +// can also be queried to see if LinearExpression2 involving a specific variable +// changed since last time. +class Linear2Watcher { + public: + explicit Linear2Watcher(Model* model) + : watcher_(model->GetOrCreate()) {} + + // This assumes `expr` is canonicalized and divided by its gcd. + void NotifyBoundChanged(LinearExpression2 expr); + + // Register a GenericLiteralWatcher() id so that propagation is called as + // soon as a bound on a linear2 changed. + void WatchAllLinearExpressions2(int id) { propagator_ids_.insert(id); } + + // Allow to know if some bounds changed since last query. + int64_t Timestamp() const { return timestamp_; } + int64_t VarTimestamp(IntegerVariable var); + + private: + GenericLiteralWatcher* watcher_; + + int64_t timestamp_ = 0; + util_intops::StrongVector var_timestamp_; + absl::btree_set propagator_ids_; +}; + // This holds all the relation lhs <= linear2 <= rhs that are true at level // zero. It is the source of truth across all the solver for such bounds. class RootLevelLinear2Bounds { public: explicit RootLevelLinear2Bounds(Model* model) : integer_trail_(model->GetOrCreate()), + linear2_watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()) {} ~RootLevelLinear2Bounds(); @@ -112,8 +140,13 @@ class RootLevelLinear2Bounds { // canonicalized and gcd-reduced. IntegerValue GetUpperBoundNoTrail(LinearExpression2 expr) const; + void AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const; + private: IntegerTrail* integer_trail_; + Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; // Lookup table to find all the LinearExpression2 with a given variable and @@ -223,6 +256,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { : params_(*model->GetOrCreate()), trail_(model->GetOrCreate()), integer_trail_(model->GetOrCreate()), + linear2_watcher_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()) { integer_trail_->RegisterReversibleClass(this); @@ -277,7 +311,9 @@ class EnforcedLinear2Bounds : public ReversibleInterface { std::vector* integer_reason) const; // Note: might contain duplicate expressions. - std::vector GetAllExpressionsWithConditionalBounds() const; + void AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const; private: void CreateLevelEntryIfNeeded(); @@ -285,6 +321,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { const SatParameters& params_; Trail* trail_; IntegerTrail* integer_trail_; + Linear2Watcher* linear2_watcher_; RootLevelLinear2Bounds* root_level_bounds_; SharedStatistics* shared_stats_; @@ -408,42 +445,37 @@ class Linear2BoundsFromLinear3 { // If the given upper bound evaluate better than the current one we have, this // will replace it and returns true, otherwise it returns false. - // - // Note that we never store trivial upper bound (using the current variable - // domain). bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); - // Returns the best known upper-bound of the given LinearExpression2 at the - // current decision level. If its explanation is needed, it can be queried - // with the second function. + // Warning, the order will not be deterministic. + void AppendAllExpressionContaining( + Bitset64::ConstView var_set, + std::vector* result) const; + + // Most users should just use Linear2Bounds::UpperBound() instead. // - // NOTE: most users will want to call Linear2Bounds::UpperBound() instead. - IntegerValue UpperBound(LinearExpression2 expr) const; + // Returns the upper bound only if there is some relations coming from a + // linear3. Otherwise always returns kMaxIntegerValue. + // `expr` must be canonicalized and gcd-reduced. + IntegerValue GetUpperBoundFromLinear3(LinearExpression2 expr) const; + + // Most users should use Linear2Bounds::AddReasonForUpperBoundLowerThan() + // instead. + // + // Adds the reason for GetUpperBoundFromLinear3() to be <= ub. + // `expr` must be canonicalized and gcd-reduced. void AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const; - // Warning, the order will not be deterministic. - std::vector GetAllExpressionsWithAffineBounds() const; - - int NumExpressionsWithAffineBounds() const { return best_affine_ub_.size(); } - - void WatchAllLinearExpressions2(int id) { propagator_ids_.insert(id); } - - // Low-level function that returns the upper bound only if there is some - // relations coming from a linear3. Otherwise always returns kMaxIntegerValue. - // `expr` must be canonicalized and gcd-reduced. - IntegerValue GetUpperBoundFromLinear3(LinearExpression2 expr) const; - private: - void NotifyWatchingPropagators() const; - IntegerTrail* integer_trail_; Trail* trail_; + Linear2Watcher* linear2_watcher_; GenericLiteralWatcher* watcher_; SharedStatistics* shared_stats_; - RootLevelLinear2Bounds* best_root_level_bounds_; + RootLevelLinear2Bounds* root_level_bounds_; int64_t num_affine_updates_ = 0; @@ -456,8 +488,6 @@ class Linear2BoundsFromLinear3 { absl::flat_hash_map> best_affine_ub_; - - absl::btree_set propagator_ids_; }; // TODO(user): Merge with BinaryRelationRepository. Note that this one provides @@ -504,8 +534,8 @@ class ReifiedLinear2Bounds { class Linear2Bounds { public: explicit Linear2Bounds(Model* model) - : root_level_bounds_(model->GetOrCreate()), - integer_trail_(model->GetOrCreate()), + : integer_trail_(model->GetOrCreate()), + root_level_bounds_(model->GetOrCreate()), enforced_bounds_(model->GetOrCreate()), linear3_bounds_(model->GetOrCreate()) {} @@ -522,14 +552,30 @@ class Linear2Bounds { // don't want the trivial bounds. IntegerValue NonTrivialUpperBoundForGcd1(LinearExpression2 expr) const; - std::vector - GetAllExpressionsWithPotentialNonTrivialBounds() const; + // Returns all known expressions with potentially non-trivial bounds that + // involves two variable whose positive version is marked in 'vars'. + absl::Span + GetAllExpressionsWithPotentialNonTrivialBounds( + Bitset64::ConstView var_set) const; + + // Returns a temporay bitset, cleared, and resized for all existing variables. + // + // If we have many class calling + // GetAllExpressionsWithPotentialNonTrivialBounds() it is important that not + // all of them have a O(num_variables) vector when the same one can be used. + SparseBitset* GetTemporyClearedAndResizedBitset() { + tmp_bitset_.ClearAndResize(integer_trail_->NumIntegerVariables()); + return &tmp_bitset_; + } private: - RootLevelLinear2Bounds* root_level_bounds_; IntegerTrail* integer_trail_; + RootLevelLinear2Bounds* root_level_bounds_; EnforcedLinear2Bounds* enforced_bounds_; Linear2BoundsFromLinear3* linear3_bounds_; + + mutable std::vector tmp_expressions_; + SparseBitset tmp_bitset_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index c9c9dbf993..0f911b9144 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -1122,9 +1122,10 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { expr.coeffs[1] = IntegerValue(-1); // Starts with trivial level zero bound. - auto* tested = model.GetOrCreate(); - auto* root_level_lin2_bounds = model.GetOrCreate(); - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(10)); + auto* bounds = model.GetOrCreate(); + auto* lin3_bounds = model.GetOrCreate(); + auto* root_bounds = model.GetOrCreate(); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(10)); auto* search = model.GetOrCreate(); search->TakeDecision( @@ -1132,18 +1133,18 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { IntegerLiteral::LowerOrEqual(w, IntegerValue(10)))))); // Lets add a relation. - root_level_lin2_bounds->Add(expr, IntegerValue(-5), IntegerValue(5)); - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(5)); + root_bounds->Add(expr, IntegerValue(-5), IntegerValue(5)); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(5)); // Note that we canonicalize with gcd. expr.coeffs[0] *= 3; expr.coeffs[1] *= 3; - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(15)); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(15)); // Lets add an affine upper bound to that expression <= 4 * z + 1. - EXPECT_TRUE(tested->AddAffineUpperBound( + EXPECT_TRUE(lin3_bounds->AddAffineUpperBound( expr, AffineExpression(z, IntegerValue(4), IntegerValue(1)))); - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(9)); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(9)); // Lets test the reason, first push a new bound. search->TakeDecision( @@ -1151,11 +1152,11 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { IntegerLiteral::LowerOrEqual(z, IntegerValue(1)))))); // Because of gcd, even though ub(affine) is now 5, we get 3, - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(3)); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(3)); { std::vector literal_reason; std::vector integer_reason; - tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(4), + bounds->AddReasonForUpperBoundLowerThan(expr, IntegerValue(4), &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre()); EXPECT_THAT(integer_reason, @@ -1166,7 +1167,7 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { { std::vector literal_reason; std::vector integer_reason; - tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(9), + bounds->AddReasonForUpperBoundLowerThan(expr, IntegerValue(9), &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre()); EXPECT_THAT(integer_reason, @@ -1176,7 +1177,7 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { // This is implied by the level zero relation x <= 5 std::vector literal_reason; std::vector integer_reason; - tested->AddReasonForUpperBoundLowerThan(expr, IntegerValue(15), + bounds->AddReasonForUpperBoundLowerThan(expr, IntegerValue(15), &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre()); EXPECT_THAT(integer_reason, ElementsAre()); @@ -1185,7 +1186,7 @@ TEST(BinaryRelationMapsTest, AffineUpperBound) { // Note that the bound works on the canonicalized expr. expr.coeffs[0] /= 3; expr.coeffs[1] /= 3; - EXPECT_EQ(tested->UpperBound(expr), IntegerValue(1)); + EXPECT_EQ(bounds->UpperBound(expr), IntegerValue(1)); } } // namespace diff --git a/ortools/sat/sat_decision.h b/ortools/sat/sat_decision.h index acce6c1292..371c98223d 100644 --- a/ortools/sat/sat_decision.h +++ b/ortools/sat/sat_decision.h @@ -108,12 +108,20 @@ class SatDecisionPolicy { // Like SetAssignmentPreference() but it can be overridden by phase-saving. void SetTargetPolarity(Literal l) { - var_polarity_[l.Variable()] = l.IsPositive(); + has_target_polarity_[l.Variable()] = true; + target_polarity_[l.Variable()] = var_polarity_[l.Variable()] = + l.IsPositive(); + best_partial_assignment_.push_back(l); + target_length_++; } absl::Span GetBestPartialAssignment() const { return best_partial_assignment_; } - void ClearBestPartialAssignment() { best_partial_assignment_.clear(); } + void ClearBestPartialAssignment() { + target_length_ = 0; + has_target_polarity_.assign(has_target_polarity_.size(), false); + best_partial_assignment_.clear(); + } private: // Computes an initial variable ordering. diff --git a/ortools/sat/sat_decision_test.cc b/ortools/sat/sat_decision_test.cc index 104a4a566d..ff90d70772 100644 --- a/ortools/sat/sat_decision_test.cc +++ b/ortools/sat/sat_decision_test.cc @@ -95,6 +95,47 @@ TEST(SatDecisionPolicyTest, ErwaHeuristic) { EXPECT_EQ(Literal(BooleanVariable(2), true), decision->NextBranch()); } +TEST(SatDecisionPolicyTest, SetTargetPolarityInStablePhase) { + Model model; + Trail* trail = model.GetOrCreate(); + SatDecisionPolicy* decision = model.GetOrCreate(); + const int num_variables = 100; + trail->Resize(num_variables); + decision->IncreaseNumVariables(num_variables); + + for (int i = 0; i < num_variables; ++i) { + decision->SetTargetPolarity(Literal(BooleanVariable(i), i % 2)); + } + + decision->SetStablePhase(true); + for (int i = 0; i < num_variables; ++i) { + const Literal literal = decision->NextBranch(); + EXPECT_EQ(literal, Literal(BooleanVariable(literal.Variable()), + literal.Variable().value() % 2)); + trail->EnqueueSearchDecision(literal); + } +} + +TEST(SatDecisionPolicyTest, SetTargetPolarity) { + Model model; + Trail* trail = model.GetOrCreate(); + SatDecisionPolicy* decision = model.GetOrCreate(); + const int num_variables = 100; + trail->Resize(num_variables); + decision->IncreaseNumVariables(num_variables); + + for (int i = 0; i < num_variables; ++i) { + decision->SetTargetPolarity(Literal(BooleanVariable(i), i % 2)); + } + + decision->SetStablePhase(false); + for (int i = 0; i < num_variables; ++i) { + const Literal literal = decision->NextBranch(); + EXPECT_EQ(literal, Literal(BooleanVariable(literal.Variable()), + literal.Variable().value() % 2)); + trail->EnqueueSearchDecision(literal); + } +} } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index bcbc07ddd0..4c62279d7b 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -124,24 +124,6 @@ bool DecomposedEnergyIsPropagated(const VariablesAssignment& assignment, int t, return true; } -template -std::vector> SplitEventsInIndendentSets(std::vector& events) { - std::sort(events.begin(), events.end(), [](const E& a, const E& b) { - return std::tie(a.start_min, a.end_max) < std::tie(b.start_min, b.end_max); - }); - std::vector> result; - IntegerValue max_end_max = kMinIntegerValue; - for (const E& event : events) { - if (event.start_min >= max_end_max) { - result.push_back({event}); - } else { - result.back().push_back(event); - } - max_end_max = std::max(max_end_max, event.end_max); - } - return result; -} - } // namespace struct EnergyEvent { @@ -356,9 +338,9 @@ std::vector FindPossibleDemands(const EnergyEvent& event, void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( absl::string_view cut_name, const util_intops::StrongVector& lp_values, - std::vector events, IntegerValue capacity, + absl::Span events, IntegerValue capacity, AffineExpression makespan, TimeLimit* time_limit, Model* model, - LinearConstraintManager* manager) { + TopNCuts& top_n_cuts) { // Checks the precondition of the code. IntegerTrail* integer_trail = model->GetOrCreate(); DCHECK(integer_trail->IsFixed(capacity)); @@ -426,7 +408,6 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( const double makespan_lp = makespan.LpValue(lp_values); const double makespan_min_lp = ToDouble(makespan_min); LinearConstraintBuilder temp_builder(model); - TopNCuts top_n_cuts(5); for (int i = 0; i + 1 < num_time_points; ++i) { // Checks the time limit if the problem is too big. if (events.size() > 50 && time_limit->LimitReached()) return; @@ -528,15 +509,13 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( } } } - - top_n_cuts.TransferToManager(manager); } void GenerateCumulativeEnergeticCuts( absl::string_view cut_name, const util_intops::StrongVector& lp_values, - std::vector events, const AffineExpression& capacity, - TimeLimit* time_limit, Model* model, LinearConstraintManager* manager) { + absl::Span events, const AffineExpression& capacity, + TimeLimit* time_limit, Model* model, TopNCuts& top_n_cuts) { double max_possible_energy_lp = 0.0; for (const EnergyEvent& event : events) { max_possible_energy_lp += event.linearized_energy_lp_value; @@ -567,7 +546,6 @@ void GenerateCumulativeEnergeticCuts( const int num_time_points = time_points.size(); LinearConstraintBuilder temp_builder(model); - TopNCuts top_n_cuts(5); for (int i = 0; i + 1 < num_time_points; ++i) { // Checks the time limit if the problem is too big. if (events.size() > 50 && time_limit->LimitReached()) return; @@ -620,8 +598,6 @@ void GenerateCumulativeEnergeticCuts( } } } - - top_n_cuts.TransferToManager(manager); } CutGenerator CreateCumulativeEnergyCutGenerator( @@ -682,21 +658,24 @@ CutGenerator CreateCumulativeEnergyCutGenerator( events.push_back(e); } - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + // Can we pass cluster as const. It would mean sorting before. + for (const absl::Span cluster : disjoint_events) { if (makespan.has_value() && integer_trail->IsFixed(capacity)) { GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( - "CumulativeEnergyM", lp_values, std::move(cluster), + "CumulativeEnergyM", lp_values, cluster, integer_trail->FixedValue(capacity), makespan.value(), time_limit, - model, manager); + model, top_n_cuts); } else { - GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, - std::move(cluster), capacity, - time_limit, model, manager); + GenerateCumulativeEnergeticCuts("CumulativeEnergy", lp_values, cluster, + capacity, time_limit, model, + top_n_cuts); } } + top_n_cuts.TransferToManager(manager); return true; }; @@ -739,20 +718,22 @@ CutGenerator CreateNoOverlapEnergyCutGenerator( events.push_back(e); } - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + for (const absl::Span cluster : disjoint_events) { if (makespan.has_value()) { GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity( - "NoOverlapEnergyM", lp_values, std::move(cluster), + "NoOverlapEnergyM", lp_values, cluster, /*capacity=*/IntegerValue(1), makespan.value(), time_limit, model, - manager); + top_n_cuts); } else { - GenerateCumulativeEnergeticCuts( - "NoOverlapEnergy", lp_values, std::move(cluster), - /*capacity=*/IntegerValue(1), time_limit, model, manager); + GenerateCumulativeEnergeticCuts("NoOverlapEnergy", lp_values, cluster, + /*capacity=*/IntegerValue(1), + time_limit, model, top_n_cuts); } } + top_n_cuts.TransferToManager(manager); return true; }; return result; @@ -916,9 +897,8 @@ struct CachedIntervalData { void GenerateCutsBetweenPairOfNonOverlappingTasks( absl::string_view cut_name, bool ignore_zero_size_intervals, const util_intops::StrongVector& lp_values, - std::vector events, IntegerValue capacity_max, - Model* model, LinearConstraintManager* manager) { - TopNCuts top_n_cuts(5); + absl::Span events, IntegerValue capacity_max, + Model* model, TopNCuts& top_n_cuts) { const int num_events = events.size(); if (num_events <= 1) return; @@ -1011,8 +991,6 @@ void GenerateCutsBetweenPairOfNonOverlappingTasks( } } } - - top_n_cuts.TransferToManager(manager); } CutGenerator CreateCumulativePrecedenceCutGenerator( @@ -1042,14 +1020,15 @@ CutGenerator CreateCumulativePrecedenceCutGenerator( const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + for (const absl::Span cluster : disjoint_events) { GenerateCutsBetweenPairOfNonOverlappingTasks( "Cumulative", /* ignore_zero_size_intervals= */ true, - manager->LpValues(), std::move(cluster), capacity_max, model, - manager); + manager->LpValues(), cluster, capacity_max, model, top_n_cuts); } + top_n_cuts.TransferToManager(manager); return true; }; return result; @@ -1075,14 +1054,15 @@ CutGenerator CreateNoOverlapPrecedenceCutGenerator( events.push_back(event); } - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + for (const absl::Span cluster : disjoint_events) { GenerateCutsBetweenPairOfNonOverlappingTasks( "NoOverlap", /* ignore_zero_size_intervals= */ false, - manager->LpValues(), std::move(cluster), IntegerValue(1), model, - manager); + manager->LpValues(), cluster, IntegerValue(1), model, top_n_cuts); } + top_n_cuts.TransferToManager(manager); return true; }; @@ -1436,11 +1416,10 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( // - detect disjoint tasks (no need to crossover to the second part) // - better caching of explored states ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( - absl::string_view cut_name, std::vector events, - IntegerValue capacity_max, CtExhaustiveHelper& helper, Model* model, - LinearConstraintManager* manager) { - TopNCuts top_n_cuts(5); - + absl::string_view cut_name, + const util_intops::StrongVector& lp_values, + absl::Span events, IntegerValue capacity_max, + CtExhaustiveHelper& helper, Model* model, TopNCuts& top_n_cuts) { // Sort by start min to bucketize by start_min. std::sort( events.begin(), events.end(), @@ -1533,7 +1512,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( std::string full_name(cut_name); if (cut_use_precedences) full_name.append("_prec"); if (is_lifted) full_name.append("_lifted"); - top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); + top_n_cuts.AddCut(cut.Build(), full_name, lp_values); } // Weighted cuts. @@ -1551,11 +1530,10 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( if (is_lifted) full_name.append("_lifted"); if (cut_use_precedences) full_name.append("_prec"); full_name.append("_weighted"); - top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); + top_n_cuts.AddCut(cut.Build(), full_name, lp_values); } } } - top_n_cuts.TransferToManager(manager); return true; } @@ -1672,9 +1650,10 @@ void AddEventDemandsToCapacitySubsetSum( // - second loop, we add tasks that must contribute after this start time // ordered by increasing end time in the LP relaxation. void GenerateCompletionTimeCutsWithEnergy( - absl::string_view cut_name, std::vector events, - IntegerValue capacity_max, Model* model, LinearConstraintManager* manager) { - TopNCuts top_n_cuts(5); + absl::string_view cut_name, + const util_intops::StrongVector& lp_values, + absl::Span events, IntegerValue capacity_max, + Model* model, TopNCuts& top_n_cuts) { const VariablesAssignment& assignment = model->GetOrCreate()->Assignment(); std::vector tmp_possible_demands; @@ -1825,10 +1804,9 @@ void GenerateCompletionTimeCutsWithEnergy( if (add_energy_to_name) full_name.append("_energy"); if (is_lifted) full_name.append("_lifted"); if (best_uses_subset_sum) full_name.append("_subsetsum"); - top_n_cuts.AddCut(cut.Build(), full_name, manager->LpValues()); + top_n_cuts.AddCut(cut.Build(), full_name, lp_values); } } - top_n_cuts.TransferToManager(manager); } CutGenerator CreateNoOverlapCompletionTimeCutGenerator( @@ -1862,19 +1840,21 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( CtExhaustiveHelper helper; helper.Init(events, model); - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + for (const absl::Span cluster : disjoint_events) { if (!GenerateShortCompletionTimeCutsWithExactBound( - "NoOverlapCompletionTimeExhaustive", cluster, - /*capacity_max=*/IntegerValue(1), helper, model, manager)) { + "NoOverlapCompletionTimeExhaustive", lp_values, cluster, + /*capacity_max=*/IntegerValue(1), helper, model, top_n_cuts)) { return false; } GenerateCompletionTimeCutsWithEnergy( - "NoOverlapCompletionTimeQueyrane", std::move(cluster), - /*capacity_max=*/IntegerValue(1), model, manager); + "NoOverlapCompletionTimeQueyrane", lp_values, cluster, + /*capacity_max=*/IntegerValue(1), model, top_n_cuts); } + top_n_cuts.TransferToManager(manager); return true; }; if (!generate_cuts(/*time_is_forward=*/true)) return false; @@ -1930,19 +1910,21 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( helper.Init(events, model); const IntegerValue capacity_max = integer_trail->UpperBound(capacity); - std::vector> disjoint_events = - SplitEventsInIndendentSets(events); - for (auto& cluster : disjoint_events) { + TopNCuts top_n_cuts(5); + std::vector> disjoint_events = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + for (const absl::Span cluster : disjoint_events) { if (!GenerateShortCompletionTimeCutsWithExactBound( - "CumulativeCompletionTimeExhaustive", cluster, capacity_max, - helper, model, manager)) { + "CumulativeCompletionTimeExhaustive", lp_values, cluster, + capacity_max, helper, model, top_n_cuts)) { return false; } GenerateCompletionTimeCutsWithEnergy("CumulativeCompletionTimeQueyrane", - std::move(cluster), capacity_max, - model, manager); + lp_values, cluster, capacity_max, + model, top_n_cuts); } + top_n_cuts.TransferToManager(manager); return true; }; diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index e6c78edd64..8b493eefa3 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -218,6 +218,37 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( double& min_sum_of_weighted_ends, bool& cut_use_precedences, int& exploration_credit); +// Split the list of events in connected components. Two intervals are connected +// if they overlap. It expects the events to have the start_min and end_max +// fields. Note that events are semi-open intervals [start_min, end_max). This +// will filter out components of size one. +template +std::vector> SplitEventsInIndendentSets(absl::Span events) { + if (events.empty()) return {}; + + std::sort(events.begin(), events.end(), [](const E& a, const E& b) { + return std::tie(a.start_min, a.end_max) < std::tie(b.start_min, b.end_max); + }); + const int size = events.size(); + std::vector> result; + IntegerValue max_end_max = events[0].end_max; + int start = 0; + for (int i = 1; i < size; ++i) { + const E& event = events[i]; + if (event.start_min >= max_end_max) { + if (i - start > 1) { + result.push_back(absl::MakeSpan(events.data() + start, i - start)); + } + start = i; + } + max_end_max = std::max(max_end_max, event.end_max); + } + if (size - start > 1) { + result.push_back(absl::MakeSpan(events.data() + start, size - start)); + } + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/scheduling_cuts_test.cc b/ortools/sat/scheduling_cuts_test.cc index 38263ad2bb..5a51c9b535 100644 --- a/ortools/sat/scheduling_cuts_test.cc +++ b/ortools/sat/scheduling_cuts_test.cc @@ -15,8 +15,8 @@ #include +#include #include -#include #include #include "absl/base/log_severity.h" @@ -657,6 +657,35 @@ TEST(ComputeMinSumOfEndMinsTest, RandomCases) { } } +struct SimpleEvent { + IntegerValue start_min; + IntegerValue end_max; + bool operator==(const SimpleEvent& other) const { + return start_min == other.start_min && end_max == other.end_max; + } +}; + +SimpleEvent ConvexHull(absl::Span events) { + SimpleEvent result = events[0]; + for (int i = 1; i < events.size(); ++i) { + result.start_min = std::min(result.start_min, events[i].start_min); + result.end_max = std::max(result.end_max, events[i].end_max); + } + return result; +} + +TEST(SplitEventsInIndendentSetsTest, BasicTest) { + std::vector events = {{0, 10}, {2, 12}, {3, 5}, + {15, 20}, {12, 21}, {30, 35}}; + const std::vector> sets = + SplitEventsInIndendentSets(absl::MakeSpan(events)); + EXPECT_EQ(sets.size(), 2); + EXPECT_EQ(sets[0].size(), 3); + EXPECT_EQ(ConvexHull(sets[0]), SimpleEvent({0, 12})); + EXPECT_EQ(sets[1].size(), 2); + EXPECT_EQ(ConvexHull(sets[1]), SimpleEvent({12, 21})); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index 35f3f47e2f..909e0a5551 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -884,6 +884,9 @@ class SparseBitset { // A bit hacky for really hot loop. typename Bitset64::View BitsetView() { return bitset_.view(); } + typename Bitset64::ConstView BitsetConstView() { + return bitset_.const_view(); + } void SetUnsafe(typename Bitset64::View view, IntegerType index) { view.Set(index); to_clear_.push_back(index); From e76ecdf237c29031becf075f4bf0b92254ce53ca Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 12:48:42 +0200 Subject: [PATCH 100/509] minor cleaning --- ortools/pdlp/BUILD.bazel | 1 + ortools/pdlp/primal_dual_hybrid_gradient.cc | 97 +++++++++++++++------ 2 files changed, 71 insertions(+), 27 deletions(-) diff --git a/ortools/pdlp/BUILD.bazel b/ortools/pdlp/BUILD.bazel index 7a19ebc39a..0059d6d122 100644 --- a/ortools/pdlp/BUILD.bazel +++ b/ortools/pdlp/BUILD.bazel @@ -144,6 +144,7 @@ cc_library( "//ortools/lp_data:proto_utils", "//ortools/util:logging", "@abseil-cpp//absl/algorithm:container", + "@abseil-cpp//absl/base:nullability", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", diff --git a/ortools/pdlp/primal_dual_hybrid_gradient.cc b/ortools/pdlp/primal_dual_hybrid_gradient.cc index d166ef7700..b86f3e9f1c 100644 --- a/ortools/pdlp/primal_dual_hybrid_gradient.cc +++ b/ortools/pdlp/primal_dual_hybrid_gradient.cc @@ -53,6 +53,7 @@ #include "Eigen/Core" #include "Eigen/SparseCore" #include "absl/algorithm/container.h" +#include "absl/base/nullability.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" @@ -619,7 +620,8 @@ class Solver { NextSolutionAndDelta ComputeNextDualSolution( double dual_step_size, double extrapolation_factor, - const NextSolutionAndDelta& next_primal) const; + const NextSolutionAndDelta& next_primal_solution, + const VectorXd* absl_nullable next_primal_product = nullptr) const; std::pair ComputeMovementTerms( const VectorXd& delta_primal, const VectorXd& delta_dual) const; @@ -630,6 +632,10 @@ class Solver { double ComputeNonlinearity(const VectorXd& delta_primal, const VectorXd& next_dual_product) const; + // Sets current_primal_product_ and current_dual_product_ based on + // current_primal_solution_ and current_dual_solution_ respectively. + void SetCurrentPrimalAndDualProducts(); + // Creates all the simple-to-compute statistics in stats. IterationStats CreateSimpleIterationStats(RestartChoice restart_used) const; @@ -734,6 +740,9 @@ class Solver { WallTimer timer_; int iterations_completed_; int num_rejected_steps_; + // A cache of `constraint_matrix * current_primal_solution_`. + // Malitsky-Pock linesearch only. + std::optional current_primal_product_; // A cache of `constraint_matrix.transpose() * current_dual_solution_`. VectorXd current_dual_product_; // The primal point at which the algorithm was last restarted from, or @@ -1870,31 +1879,41 @@ Solver::NextSolutionAndDelta Solver::ComputeNextPrimalSolution( Solver::NextSolutionAndDelta Solver::ComputeNextDualSolution( double dual_step_size, double extrapolation_factor, - const NextSolutionAndDelta& next_primal_solution) const { + const NextSolutionAndDelta& next_primal_solution, + const VectorXd* absl_nullable next_primal_product) const { const int64_t dual_size = ShardedWorkingQp().DualSize(); NextSolutionAndDelta result = { .value = VectorXd(dual_size), .delta = VectorXd(dual_size), }; const QuadraticProgram& qp = WorkingQp(); - VectorXd extrapolated_primal(ShardedWorkingQp().PrimalSize()); - ShardedWorkingQp().PrimalSharder().ParallelForEachShard( - [&](const Sharder::Shard& shard) { - shard(extrapolated_primal) = - (shard(next_primal_solution.value) + - extrapolation_factor * shard(next_primal_solution.delta)); - }); - // TODO(user): Refactor this multiplication so that we only do one matrix - // vector multiply for the primal variable. This only applies to Malitsky and - // Pock and not to the adaptive step size rule. + std::optional extrapolated_primal; + if (!next_primal_product) { + extrapolated_primal.emplace(ShardedWorkingQp().PrimalSize()); + ShardedWorkingQp().PrimalSharder().ParallelForEachShard( + [&](const Sharder::Shard& shard) { + shard(*extrapolated_primal) = + (shard(next_primal_solution.value) + + extrapolation_factor * shard(next_primal_solution.delta)); + }); + } ShardedWorkingQp().TransposedConstraintMatrixSharder().ParallelForEachShard( [&](const Sharder::Shard& shard) { - VectorXd temp = - shard(current_dual_solution_) - - dual_step_size * - shard(ShardedWorkingQp().TransposedConstraintMatrix()) - .transpose() * - extrapolated_primal; + VectorXd temp; + if (next_primal_product) { + CHECK(current_primal_product_.has_value()); + temp = shard(current_dual_solution_) - + dual_step_size * + (-extrapolation_factor * shard(*current_primal_product_) + + (extrapolation_factor + 1) * shard(*next_primal_product)); + } else { + temp = shard(current_dual_solution_) - + dual_step_size * + shard(ShardedWorkingQp().TransposedConstraintMatrix()) + .transpose() * + extrapolated_primal.value(); + } + // Each element of the argument of `.cwiseMin()` is the critical point // of the respective 1D minimization problem if it's negative. // Likewise the argument to the `.cwiseMax()` is the critical point if @@ -1937,6 +1956,21 @@ double Solver::ComputeNonlinearity(const VectorXd& delta_primal, }); } +void Solver::SetCurrentPrimalAndDualProducts() { + if (params_.linesearch_rule() == + PrimalDualHybridGradientParams::MALITSKY_POCK_LINESEARCH_RULE) { + current_primal_product_ = TransposedMatrixVectorProduct( + ShardedWorkingQp().TransposedConstraintMatrix(), + current_primal_solution_, + ShardedWorkingQp().TransposedConstraintMatrixSharder()); + } else { + current_primal_product_.reset(); + } + current_dual_product_ = TransposedMatrixVectorProduct( + WorkingQp().constraint_matrix, current_dual_solution_, + ShardedWorkingQp().ConstraintMatrixSharder()); +} + IterationStats Solver::CreateSimpleIterationStats( RestartChoice restart_used) const { IterationStats stats; @@ -1977,8 +2011,10 @@ LocalizedLagrangianBounds Solver::ComputeLocalizedBoundsAtCurrent() const { ShardedWorkingQp(), current_primal_solution_, current_dual_solution_, PrimalDualNorm::kEuclideanNorm, primal_weight_, distance_traveled_by_current, - /*primal_product=*/nullptr, ¤t_dual_product_, - params_.use_diagonal_qp_trust_region_solver(), + /*primal_product=*/current_primal_product_.has_value() + ? ¤t_primal_product_.value() + : nullptr, + ¤t_dual_product_, params_.use_diagonal_qp_trust_region_solver(), params_.diagonal_qp_trust_region_solver_tolerance()); } @@ -2225,9 +2261,7 @@ void Solver::ApplyRestartChoice(const RestartChoice restart_to_apply) { } current_primal_solution_ = primal_average_.ComputeAverage(); current_dual_solution_ = dual_average_.ComputeAverage(); - current_dual_product_ = TransposedMatrixVectorProduct( - WorkingQp().constraint_matrix, current_dual_solution_, - ShardedWorkingQp().ConstraintMatrixSharder()); + SetCurrentPrimalAndDualProducts(); break; } primal_weight_ = ComputeNewPrimalWeight(); @@ -2443,6 +2477,14 @@ InnerStepOutcome Solver::TakeMalitskyPockStep() { params_.malitsky_pock_parameters().linesearch_contraction_factor(); const double dual_weight = primal_weight_ * primal_weight_; int inner_iterations = 0; + VectorXd next_primal_product(current_dual_solution_.size()); + ShardedWorkingQp().TransposedConstraintMatrixSharder().ParallelForEachShard( + [&](const Sharder::Shard& shard) { + shard(next_primal_product) = + shard(ShardedWorkingQp().TransposedConstraintMatrix()).transpose() * + next_primal_solution.value; + }); + for (bool accepted_step = false; !accepted_step; ++inner_iterations) { if (inner_iterations >= 60) { LogInnerIterationLimitHit(); @@ -2454,7 +2496,7 @@ InnerStepOutcome Solver::TakeMalitskyPockStep() { new_primal_step_size / primal_step_size; NextSolutionAndDelta next_dual_solution = ComputeNextDualSolution( dual_weight * new_primal_step_size, new_last_two_step_sizes_ratio, - next_primal_solution); + next_primal_solution, &next_primal_product); VectorXd next_dual_product = TransposedMatrixVectorProduct( WorkingQp().constraint_matrix, next_dual_solution.value, @@ -2482,6 +2524,7 @@ InnerStepOutcome Solver::TakeMalitskyPockStep() { current_primal_solution_ = std::move(next_primal_solution.value); current_dual_solution_ = std::move(next_dual_solution.value); current_dual_product_ = std::move(next_dual_product); + current_primal_product_ = std::move(next_primal_product); primal_average_.Add(current_primal_solution_, /*weight=*/new_primal_step_size); dual_average_.Add(current_dual_solution_, @@ -2555,6 +2598,7 @@ InnerStepOutcome Solver::TakeAdaptiveStep() { current_primal_solution_ = std::move(next_primal_solution.value); current_dual_solution_ = std::move(next_dual_solution.value); current_dual_product_ = std::move(next_dual_product); + current_primal_product_.reset(); current_primal_delta_ = std::move(next_primal_solution.delta); current_dual_delta_ = std::move(next_dual_solution.delta); primal_average_.Add(current_primal_solution_, /*weight=*/step_size_); @@ -2620,6 +2664,7 @@ InnerStepOutcome Solver::TakeConstantSizeStep() { current_primal_solution_ = std::move(next_primal_solution.value); current_dual_solution_ = std::move(next_dual_solution.value); current_dual_product_ = std::move(next_dual_product); + current_primal_product_.reset(); current_primal_delta_ = std::move(next_primal_solution.delta); current_dual_delta_ = std::move(next_dual_solution.delta); primal_average_.Add(current_primal_solution_, /*weight=*/step_size_); @@ -2980,9 +3025,7 @@ SolverResult Solver::Solve(const IterationType iteration_type, // restart. ratio_last_two_step_sizes_ = 1; - current_dual_product_ = TransposedMatrixVectorProduct( - WorkingQp().constraint_matrix, current_dual_solution_, - ShardedWorkingQp().ConstraintMatrixSharder()); + SetCurrentPrimalAndDualProducts(); // This is set to true if we can't proceed any more because of numerical // issues. We may or may not have found the optimal solution. From e995b1ad7a6b4a176d1f2065d2210c5b3bb4bd9f Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 17 Jun 2025 13:41:54 +0200 Subject: [PATCH 101/509] remove model_builder support for xpress as it is not implemented; move gurobi_util to linear_solver --- ortools/gurobi/BUILD.bazel | 11 --------- ortools/linear_solver/BUILD.bazel | 5 ++-- ortools/linear_solver/gurobi_interface.cc | 2 +- .../{gurobi => linear_solver}/gurobi_util.cc | 2 +- .../{gurobi => linear_solver}/gurobi_util.h | 6 ++--- .../linear_solver/proto_solver/BUILD.bazel | 23 ------------------- .../linear_solver/samples/clone_model_mb.py | 1 + .../samples/integer_programming_example.py | 1 + .../samples/linear_programming_example.py | 1 + .../linear_solver/samples/mip_var_array.py | 2 ++ .../samples/multiple_knapsack_mip.py | 1 + .../samples/simple_lp_program.py | 1 + .../samples/simple_lp_program_mb.py | 1 + .../samples/simple_mip_program.py | 1 + .../samples/simple_mip_program_mb.py | 1 + ortools/linear_solver/samples/stigler_diet.py | 1 + ortools/linear_solver/wrappers/BUILD.bazel | 2 -- .../wrappers/model_builder_helper.cc | 13 ----------- 18 files changed, 19 insertions(+), 56 deletions(-) rename ortools/{gurobi => linear_solver}/gurobi_util.cc (98%) rename ortools/{gurobi => linear_solver}/gurobi_util.h (88%) diff --git a/ortools/gurobi/BUILD.bazel b/ortools/gurobi/BUILD.bazel index f136987107..a64e5c070b 100644 --- a/ortools/gurobi/BUILD.bazel +++ b/ortools/gurobi/BUILD.bazel @@ -35,17 +35,6 @@ cc_library( ], ) -cc_library( - name = "gurobi_util", - srcs = ["gurobi_util.cc"], - hdrs = ["gurobi_util.h"], - deps = [ - ":environment", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/strings:str_format", - ], -) - cc_library( name = "gurobi_stdout_matchers", testonly = True, diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index d574cc3343..bbf790962b 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -12,7 +12,7 @@ # limitations under the License. load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") -load("@bazel_skylib//rules:copy_file.bzl", "copy_file") +# load("@bazel_skylib//rules:copy_file.bzl", "copy_file") load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") @@ -171,6 +171,7 @@ cc_library( name = "linear_solver", srcs = [ "gurobi_interface.cc", + "gurobi_util.cc", "linear_expr.cc", "linear_solver.cc", "linear_solver_callback.cc", @@ -211,6 +212,7 @@ cc_library( "//conditions:default": [], }), hdrs = [ + "gurobi_util.h", "linear_expr.h", "linear_solver.h", "linear_solver_callback.h", @@ -266,7 +268,6 @@ cc_library( "//ortools/base:stl_util", "//ortools/base:timer", "//ortools/gurobi:environment", - "//ortools/gurobi:gurobi_util", "//ortools/linear_solver/proto_solver:gurobi_proto_solver", "//ortools/linear_solver/proto_solver:sat_proto_solver", "//ortools/port:file", diff --git a/ortools/linear_solver/gurobi_interface.cc b/ortools/linear_solver/gurobi_interface.cc index 2efe7f6635..610267832d 100644 --- a/ortools/linear_solver/gurobi_interface.cc +++ b/ortools/linear_solver/gurobi_interface.cc @@ -67,7 +67,7 @@ #include "ortools/base/logging.h" #include "ortools/base/timer.h" #include "ortools/gurobi/environment.h" -#include "ortools/gurobi/gurobi_util.h" +#include "ortools/linear_solver/gurobi_util.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver_callback.h" #include "ortools/linear_solver/proto_solver/gurobi_proto_solver.h" diff --git a/ortools/gurobi/gurobi_util.cc b/ortools/linear_solver/gurobi_util.cc similarity index 98% rename from ortools/gurobi/gurobi_util.cc rename to ortools/linear_solver/gurobi_util.cc index f26c094d75..2f163a4aab 100644 --- a/ortools/gurobi/gurobi_util.cc +++ b/ortools/linear_solver/gurobi_util.cc @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/gurobi/gurobi_util.h" +#include "ortools/linear_solver/gurobi_util.h" #include #include diff --git a/ortools/gurobi/gurobi_util.h b/ortools/linear_solver/gurobi_util.h similarity index 88% rename from ortools/gurobi/gurobi_util.h rename to ortools/linear_solver/gurobi_util.h index 85c79bbdaf..812d822df2 100644 --- a/ortools/gurobi/gurobi_util.h +++ b/ortools/linear_solver/gurobi_util.h @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef OR_TOOLS_GUROBI_GUROBI_UTIL_H_ -#define OR_TOOLS_GUROBI_GUROBI_UTIL_H_ +#ifndef OR_TOOLS_LINEAR_SOLVER_GUROBI_UTIL_H_ +#define OR_TOOLS_LINEAR_SOLVER_GUROBI_UTIL_H_ #include @@ -28,4 +28,4 @@ std::string GurobiParamInfoForLogging(GRBenv* grb, bool one_liner_output = false); } // namespace operations_research -#endif // OR_TOOLS_GUROBI_GUROBI_UTIL_H_ +#endif // OR_TOOLS_LINEAR_SOLVER_GUROBI_UTIL_H_ diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 1e8952014d..04986ea7fd 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -194,26 +194,3 @@ cc_library( "@highs", ], ) - -cc_library( - name = "xpress_proto_solver", - srcs = ["xpress_proto_solver.cc"], - hdrs = ["xpress_proto_solver.h"], - deps = [ - "//ortools/base:timer", - "//ortools/linear_solver:linear_solver_cc_proto", - "//ortools/linear_solver:model_validator", - "//ortools/util:lazy_mutable_copy", - "//ortools/xpress:environment", - "@abseil-cpp//absl/base:core_headers", - "@abseil-cpp//absl/cleanup", - "@abseil-cpp//absl/log", - "@abseil-cpp//absl/log:check", - "@abseil-cpp//absl/status", - "@abseil-cpp//absl/status:statusor", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/strings:str_format", - "@abseil-cpp//absl/time", - "@abseil-cpp//absl/types:optional", - ], -) diff --git a/ortools/linear_solver/samples/clone_model_mb.py b/ortools/linear_solver/samples/clone_model_mb.py index ce27b6e1b9..a178a35349 100644 --- a/ortools/linear_solver/samples/clone_model_mb.py +++ b/ortools/linear_solver/samples/clone_model_mb.py @@ -18,6 +18,7 @@ import math from ortools.linear_solver.python import model_builder + # [END import] diff --git a/ortools/linear_solver/samples/integer_programming_example.py b/ortools/linear_solver/samples/integer_programming_example.py index 35a9264b84..1f7d4f4ad1 100644 --- a/ortools/linear_solver/samples/integer_programming_example.py +++ b/ortools/linear_solver/samples/integer_programming_example.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/linear_programming_example.py b/ortools/linear_solver/samples/linear_programming_example.py index a4bbd1b254..3b492cb769 100644 --- a/ortools/linear_solver/samples/linear_programming_example.py +++ b/ortools/linear_solver/samples/linear_programming_example.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/mip_var_array.py b/ortools/linear_solver/samples/mip_var_array.py index 2fc61626fa..c64e77ba85 100644 --- a/ortools/linear_solver/samples/mip_var_array.py +++ b/ortools/linear_solver/samples/mip_var_array.py @@ -17,6 +17,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] @@ -37,6 +38,7 @@ def create_data_model(): data["num_constraints"] = 4 return data + # [END data_model] diff --git a/ortools/linear_solver/samples/multiple_knapsack_mip.py b/ortools/linear_solver/samples/multiple_knapsack_mip.py index 29dd59155c..dbc2f7b51e 100644 --- a/ortools/linear_solver/samples/multiple_knapsack_mip.py +++ b/ortools/linear_solver/samples/multiple_knapsack_mip.py @@ -16,6 +16,7 @@ """Solve a multiple knapsack problem using a MIP solver.""" # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/simple_lp_program.py b/ortools/linear_solver/samples/simple_lp_program.py index 2612581fe7..3832104809 100644 --- a/ortools/linear_solver/samples/simple_lp_program.py +++ b/ortools/linear_solver/samples/simple_lp_program.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/simple_lp_program_mb.py b/ortools/linear_solver/samples/simple_lp_program_mb.py index d69e27395b..6291e7f088 100644 --- a/ortools/linear_solver/samples/simple_lp_program_mb.py +++ b/ortools/linear_solver/samples/simple_lp_program_mb.py @@ -18,6 +18,7 @@ import math from ortools.linear_solver.python import model_builder + # [END import] diff --git a/ortools/linear_solver/samples/simple_mip_program.py b/ortools/linear_solver/samples/simple_mip_program.py index 8b37801434..916a998155 100644 --- a/ortools/linear_solver/samples/simple_mip_program.py +++ b/ortools/linear_solver/samples/simple_mip_program.py @@ -16,6 +16,7 @@ # [START program] # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/samples/simple_mip_program_mb.py b/ortools/linear_solver/samples/simple_mip_program_mb.py index 61f487b955..cb3ac8222a 100644 --- a/ortools/linear_solver/samples/simple_mip_program_mb.py +++ b/ortools/linear_solver/samples/simple_mip_program_mb.py @@ -18,6 +18,7 @@ import math from ortools.linear_solver.python import model_builder + # [END import] diff --git a/ortools/linear_solver/samples/stigler_diet.py b/ortools/linear_solver/samples/stigler_diet.py index 15bdc2c4c3..ed3741de64 100755 --- a/ortools/linear_solver/samples/stigler_diet.py +++ b/ortools/linear_solver/samples/stigler_diet.py @@ -20,6 +20,7 @@ https://en.wikipedia.org/wiki/Stigler_diet. """ # [START import] from ortools.linear_solver import pywraplp + # [END import] diff --git a/ortools/linear_solver/wrappers/BUILD.bazel b/ortools/linear_solver/wrappers/BUILD.bazel index eb3bb8db25..3d7a471396 100644 --- a/ortools/linear_solver/wrappers/BUILD.bazel +++ b/ortools/linear_solver/wrappers/BUILD.bazel @@ -46,10 +46,8 @@ cc_library( "//ortools/linear_solver/proto_solver:pdlp_proto_solver", "//ortools/linear_solver/proto_solver:sat_proto_solver", "//ortools/linear_solver/proto_solver:scip_proto_solver", - "//ortools/linear_solver/proto_solver:xpress_proto_solver", "//ortools/lp_data:lp_parser", "//ortools/lp_data:mps_reader", "//ortools/util:logging", - "//ortools/xpress:environment", ], ) diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index bc5bcdf5eb..020e9d3a10 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -37,7 +37,6 @@ #include "ortools/linear_solver/proto_solver/glop_proto_solver.h" #include "ortools/linear_solver/proto_solver/gurobi_proto_solver.h" #include "ortools/linear_solver/proto_solver/sat_proto_solver.h" -#include "ortools/linear_solver/proto_solver/xpress_proto_solver.h" #include "ortools/linear_solver/solve_mp_model.h" #if defined(USE_SCIP) #include "ortools/linear_solver/proto_solver/scip_proto_solver.h" @@ -50,7 +49,6 @@ #endif // defined(USE_PDLP) #include "ortools/lp_data/lp_parser.h" #include "ortools/lp_data/mps_reader.h" -#include "ortools/xpress/environment.h" namespace operations_research { namespace mb { @@ -557,11 +555,6 @@ bool ModelSolverHelper::SolverIsSupported() const { solver_type_.value() == MPModelRequest::GUROBI_LINEAR_PROGRAMMING) { return GurobiIsCorrectlyInstalled(); } - if (solver_type_.value() == - MPModelRequest::XPRESS_MIXED_INTEGER_PROGRAMMING || - solver_type_.value() == MPModelRequest::XPRESS_LINEAR_PROGRAMMING) { - return XpressIsCorrectlyInstalled(); - } return false; } @@ -635,12 +628,6 @@ void ModelSolverHelper::Solve(const ModelBuilderHelper& model) { break; } #endif // defined(USE_HIGHS) - case MPModelRequest:: - XPRESS_LINEAR_PROGRAMMING: // ABSL_FALLTHROUGH_INTENDED - case MPModelRequest::XPRESS_MIXED_INTEGER_PROGRAMMING: { - response_ = XPressSolveProto(request); - break; - } default: { response_->set_status( MPSolverResponseStatus::MPSOLVER_SOLVER_TYPE_UNAVAILABLE); From 966a59755de17cd98bfd59caf8a482ee49016510 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 17:22:11 +0200 Subject: [PATCH 102/509] graph: fix iterator compatibility since C++17 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing iterator typedefs to custom iterator classes when std::iterator inheritance is deprecated since C++17. Signed-off-by: Clément Péron --- ortools/base/proto_enum_utils.h | 13 ++++++++++++- ortools/graph/graph.h | 7 ++++++- ortools/graph/iterators.h | 14 ++++++++++++-- 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h index a78dd61a72..bdf0331056 100644 --- a/ortools/base/proto_enum_utils.h +++ b/ortools/base/proto_enum_utils.h @@ -175,8 +175,19 @@ namespace internal { template class RepeatedEnumView { public: - class Iterator : public std::iterator { + class Iterator +#if __cplusplus < 201703L + : public std::iterator +#endif + { public: + using difference_type = ptrdiff_t; + using value_type = E; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = E*; + using reference = E&; +#endif explicit Iterator(RepeatedField::const_iterator ptr) : ptr_(ptr) {} bool operator==(const Iterator& it) const { return ptr_ == it.ptr_; } bool operator!=(const Iterator& it) const { return ptr_ != it.ptr_; } diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index c8b7ef0b83..6df4c13379 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -315,7 +315,7 @@ class BaseGraph { template class ArcPropertyIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { @@ -324,6 +324,11 @@ class ArcPropertyIterator // TODO(b/385094969): This should be `NodeIndex` for integers, // `NodeIndex::value_type` for strong signed integer types. using difference_type = std::ptrdiff_t; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = PropertyT*; + using reference = PropertyT&; +#endif ArcPropertyIterator() = default; diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index 73f67a07bd..2506c0478b 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -124,13 +124,18 @@ class IntegerRangeIterator // TODO(b/385094969): In C++17, `std::iterator_traits` required // explicitly specifying the iterator category. Remove this when backwards // compatibility with C++17 is no longer needed. -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IntegerType; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = IntegerType*; + using reference = IntegerType&; +#endif IntegerRangeIterator() : index_{} {} @@ -243,13 +248,18 @@ class IntegerRange : public BeginEndWrapper> { // different iterators with the same index type and sentinel. template class ChasingIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IndexT; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = IndexT*; + using reference = IndexT&; +#endif ChasingIterator() : index_(sentinel), next_(nullptr) {} From c4b01c129496383bc73da1f573b2f139db63a946 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 18 Jun 2025 18:20:30 +0200 Subject: [PATCH 103/509] reorganize gurobi and xpress side loading --- ortools/base/BUILD.bazel | 16 - ortools/base/dynamic_library.h | 120 --- ortools/gurobi/BUILD.bazel | 22 - ortools/gurobi/environment.cc | 469 --------- ortools/gurobi/environment.h | 740 ------------- ortools/gurobi/isv_public/BUILD.bazel | 2 +- ortools/gurobi/isv_public/gurobi_isv.h | 2 +- ortools/init/BUILD.bazel | 2 +- ortools/init/init.cc | 2 +- ortools/linear_solver/BUILD.bazel | 21 +- ortools/linear_solver/gurobi_interface.cc | 9 +- ortools/linear_solver/gurobi_util.cc | 33 +- ortools/linear_solver/gurobi_util.h | 11 +- .../linear_solver/proto_solver/BUILD.bazel | 3 +- .../proto_solver/gurobi_proto_solver.cc | 4 +- .../proto_solver/gurobi_proto_solver.h | 4 +- .../proto_solver/xpress_proto_solver.cc | 970 ------------------ .../proto_solver/xpress_proto_solver.h | 27 - .../wrappers/model_builder_helper.cc | 2 +- ortools/linear_solver/xpress_interface.cc | 2 +- .../linear_solver/xpress_interface_test.cc | 2 +- ortools/math_opt/solvers/BUILD.bazel | 8 +- ortools/math_opt/solvers/gurobi/BUILD.bazel | 2 +- ortools/math_opt/solvers/gurobi/g_gurobi.cc | 8 +- ortools/math_opt/solvers/gurobi/g_gurobi.h | 2 +- ortools/math_opt/solvers/gurobi_callback.cc | 3 +- ortools/math_opt/solvers/gurobi_callback.h | 2 +- ortools/math_opt/solvers/gurobi_solver.cc | 3 - ortools/math_opt/solvers/gurobi_solver.h | 3 +- ortools/math_opt/solvers/xpress/BUILD.bazel | 2 +- ortools/math_opt/solvers/xpress/g_xpress.cc | 2 +- ortools/math_opt/solvers/xpress/g_xpress.h | 2 +- ortools/math_opt/solvers/xpress_solver.cc | 2 +- ortools/math_opt/solvers/xpress_solver.h | 2 +- .../math_opt/solvers/xpress_solver_test.cc | 2 +- ortools/xpress/BUILD.bazel | 35 - ortools/xpress/CMakeLists.txt | 38 - ortools/xpress/environment.cc | 390 ------- ortools/xpress/environment.h | 551 ---------- 39 files changed, 102 insertions(+), 3418 deletions(-) delete mode 100644 ortools/base/dynamic_library.h delete mode 100644 ortools/gurobi/environment.cc delete mode 100644 ortools/gurobi/environment.h delete mode 100644 ortools/linear_solver/proto_solver/xpress_proto_solver.cc delete mode 100644 ortools/linear_solver/proto_solver/xpress_proto_solver.h delete mode 100644 ortools/xpress/BUILD.bazel delete mode 100644 ortools/xpress/CMakeLists.txt delete mode 100644 ortools/xpress/environment.cc delete mode 100644 ortools/xpress/environment.h diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index e86da36068..568647b0e2 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -208,22 +208,6 @@ cc_test( ], ) -cc_library( - name = "dynamic_library", - hdrs = ["dynamic_library.h"], - linkopts = select({ - "on_linux": ["-Wl,--no-as-needed -ldl"], - "on_macos": [], - "on_windows": [], - "//conditions:default": [], - }), - deps = [ - ":base", - ":logging", - "@abseil-cpp//absl/strings", - ], -) - cc_library( name = "encodingutils", hdrs = ["encodingutils.h"], diff --git a/ortools/base/dynamic_library.h b/ortools/base/dynamic_library.h deleted file mode 100644 index c47a7e7130..0000000000 --- a/ortools/base/dynamic_library.h +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ -#define OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ - -#include -#include - -#include "absl/strings/string_view.h" -#include "ortools/base/logging.h" - -#if defined(_MSC_VER) -#define WIN32_LEAN_AND_MEAN // disables several conflicting macros -#include -#elif defined(__MINGW32__) || defined(__MINGW64__) -#include -#elif defined(__GNUC__) -#include -#endif - -class DynamicLibrary { - public: - DynamicLibrary() : library_handle_(nullptr) {} - - ~DynamicLibrary() { - if (library_handle_ == nullptr) { - return; - } - -#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) - FreeLibrary(static_cast(library_handle_)); -#elif defined(__GNUC__) - dlclose(library_handle_); -#endif - } - - bool TryToLoad(const absl::string_view library_name) { - library_name_ = library_name; -#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) - library_handle_ = static_cast(LoadLibraryA(library_name_.c_str())); -#elif defined(__GNUC__) - library_handle_ = dlopen(library_name_.c_str(), RTLD_NOW); -#endif - return library_handle_ != nullptr; - } - - bool LibraryIsLoaded() const { return library_handle_ != nullptr; } - - template - std::function GetFunction(const char* function_name) { -#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) - // On Windows, avoid casting to void*: not supported by MinGW. - FARPROC function_address = - GetProcAddress(static_cast(library_handle_), function_name); -#else // Not Windows. - const void* function_address = dlsym(library_handle_, function_name); -#endif // MinGW. - - CHECK(function_address) - << "Error: could not find function " << std::string(function_name) - << " in " << library_name_; - - return TypeParser::CreateFunction(function_address); - } - - template - std::function GetFunction(const std::string& function_name) { - return GetFunction(function_name.c_str()); - } - - template - void GetFunction(std::function* function, const char* function_name) { - *function = GetFunction(function_name); - } - - template - void GetFunction(std::function* function, - const std::string function_name) { - GetFunction(function, function_name.c_str()); - } - - private: - void* library_handle_ = nullptr; - std::string library_name_; - - template - struct TypeParser {}; - - template - struct TypeParser { -#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) - // Windows: take a FARPROC as argument. - static std::function CreateFunction( - const FARPROC function_address) { - return std::function( - reinterpret_cast(function_address)); - } -#else - // Not Windows: take a void* as argument. - static std::function CreateFunction( - const void* function_address) { - return std::function(reinterpret_cast( - const_cast(function_address))); - } -#endif - }; -}; - -#endif // OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ diff --git a/ortools/gurobi/BUILD.bazel b/ortools/gurobi/BUILD.bazel index a64e5c070b..07070d46d5 100644 --- a/ortools/gurobi/BUILD.bazel +++ b/ortools/gurobi/BUILD.bazel @@ -13,28 +13,6 @@ package(default_visibility = ["//visibility:public"]) -cc_library( - name = "environment", - srcs = [ - "environment.cc", - ], - hdrs = [ - "environment.h", - ], - deps = [ - "//ortools/base", - "//ortools/base:dynamic_library", - "//ortools/base:file", - "//ortools/base:status_macros", - "@abseil-cpp//absl/flags:flag", - "@abseil-cpp//absl/status", - "@abseil-cpp//absl/status:statusor", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/synchronization", - "@abseil-cpp//absl/types:optional", - ], -) - cc_library( name = "gurobi_stdout_matchers", testonly = True, diff --git a/ortools/gurobi/environment.cc b/ortools/gurobi/environment.cc deleted file mode 100644 index 65485c7e27..0000000000 --- a/ortools/gurobi/environment.cc +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/gurobi/environment.h" - -#include -#include - -#include "absl/flags/flag.h" -#include "absl/status/status.h" -#include "absl/status/statusor.h" -#include "absl/strings/match.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" -#include "absl/strings/str_join.h" -#include "absl/synchronization/mutex.h" -#include "ortools/base/file.h" -#include "ortools/base/logging.h" -#include "ortools/base/status_macros.h" - -namespace operations_research { - -bool GurobiIsCorrectlyInstalled() { - absl::StatusOr status = GetGurobiEnv(); - if (!status.ok() || status.value() == nullptr) { - LOG(WARNING) << status.status(); - return false; - } - - GRBfreeenv(status.value()); - - return true; -} - -// This was generated with the parse_header.py script. -// See the comment at the top of the script. - -// This is the 'define' section. -std::function GRBisattravailable = - nullptr; -std::function - GRBgetintattr = nullptr; -std::function - GRBsetintattr = nullptr; -std::function - GRBgetintattrelement = nullptr; -std::function - GRBsetintattrelement = nullptr; -std::function - GRBgetintattrarray = nullptr; -std::function - GRBsetintattrarray = nullptr; -std::function - GRBsetintattrlist = nullptr; -std::function - GRBgetcharattrelement = nullptr; -std::function - GRBsetcharattrelement = nullptr; -std::function - GRBgetcharattrarray = nullptr; -std::function - GRBsetcharattrarray = nullptr; -std::function - GRBsetcharattrlist = nullptr; -std::function - GRBgetdblattr = nullptr; -std::function - GRBsetdblattr = nullptr; -std::function - GRBgetdblattrelement = nullptr; -std::function - GRBsetdblattrelement = nullptr; -std::function - GRBgetdblattrarray = nullptr; -std::function - GRBsetdblattrarray = nullptr; -std::function - GRBsetdblattrlist = nullptr; -std::function - GRBgetstrattr = nullptr; -std::function - GRBsetstrattr = nullptr; -std::function - GRBsetcallbackfunc = nullptr; -std::function GRBcbget = - nullptr; -std::function - GRBcbsolution = nullptr; -std::function - GRBcbcut = nullptr; -std::function - GRBcblazy = nullptr; -std::function - GRBgetvars = nullptr; -std::function GRBoptimize = nullptr; -std::function GRBcomputeIIS = nullptr; -std::function GRBwrite = nullptr; -std::function - GRBnewmodel = nullptr; -std::function - GRBaddvar = nullptr; -std::function - GRBaddvars = nullptr; -std::function - GRBaddconstr = nullptr; -std::function - GRBaddconstrs = nullptr; -std::function - GRBaddrangeconstr = nullptr; -std::function - GRBaddsos = nullptr; -std::function - GRBaddgenconstrMax = nullptr; -std::function - GRBaddgenconstrMin = nullptr; -std::function - GRBaddgenconstrAbs = nullptr; -std::function - GRBaddgenconstrAnd = nullptr; -std::function - GRBaddgenconstrOr = nullptr; -std::function - GRBaddgenconstrIndicator = nullptr; -std::function - GRBaddqconstr = nullptr; -std::function - GRBaddqpterms = nullptr; -std::function GRBdelvars = nullptr; -std::function GRBdelconstrs = nullptr; -std::function GRBdelsos = nullptr; -std::function GRBdelgenconstrs = - nullptr; -std::function GRBdelqconstrs = nullptr; -std::function GRBdelq = nullptr; -std::function - GRBchgcoeffs = nullptr; -std::function GRBupdatemodel = nullptr; -std::function GRBfreemodel = nullptr; -std::function GRBterminate = nullptr; -std::function - GRBsetobjectiven = nullptr; -std::function - GRBgetintparam = nullptr; -std::function - GRBgetdblparam = nullptr; -std::function - GRBgetstrparam = nullptr; -std::function - GRBgetintparaminfo = nullptr; -std::function - GRBgetdblparaminfo = nullptr; -std::function - GRBgetstrparaminfo = nullptr; -std::function GRBgetparamtype = - nullptr; -std::function GRBgetparamname = - nullptr; -std::function - GRBsetparam = nullptr; -std::function - GRBsetintparam = nullptr; -std::function - GRBsetdblparam = nullptr; -std::function - GRBsetstrparam = nullptr; -std::function GRBresetparams = nullptr; -std::function GRBcopyparams = nullptr; -std::function GRBgetnumparams = nullptr; -std::function GRBemptyenv = nullptr; -std::function GRBloadenv = nullptr; -std::function GRBstartenv = nullptr; -std::function GRBgetenv = nullptr; -std::function GRBgetmultiobjenv = nullptr; -std::function GRBdiscardmultiobjenvs = nullptr; -std::function GRBfreeenv = nullptr; -std::function GRBgeterrormsg = nullptr; -std::function GRBversion = - nullptr; -std::function GRBplatform = nullptr; - -void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { - // This was generated with the parse_header.py script. - // See the comment at the top of the script. - - // This is the 'assign' section. - gurobi_dynamic_library->GetFunction(&GRBisattravailable, - "GRBisattravailable"); - gurobi_dynamic_library->GetFunction(&GRBgetintattr, "GRBgetintattr"); - gurobi_dynamic_library->GetFunction(&GRBsetintattr, "GRBsetintattr"); - gurobi_dynamic_library->GetFunction(&GRBgetintattrelement, - "GRBgetintattrelement"); - gurobi_dynamic_library->GetFunction(&GRBsetintattrelement, - "GRBsetintattrelement"); - gurobi_dynamic_library->GetFunction(&GRBgetintattrarray, - "GRBgetintattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetintattrarray, - "GRBsetintattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetintattrlist, "GRBsetintattrlist"); - gurobi_dynamic_library->GetFunction(&GRBgetcharattrelement, - "GRBgetcharattrelement"); - gurobi_dynamic_library->GetFunction(&GRBsetcharattrelement, - "GRBsetcharattrelement"); - gurobi_dynamic_library->GetFunction(&GRBgetcharattrarray, - "GRBgetcharattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetcharattrarray, - "GRBsetcharattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetcharattrlist, - "GRBsetcharattrlist"); - gurobi_dynamic_library->GetFunction(&GRBgetdblattr, "GRBgetdblattr"); - gurobi_dynamic_library->GetFunction(&GRBsetdblattr, "GRBsetdblattr"); - gurobi_dynamic_library->GetFunction(&GRBgetdblattrelement, - "GRBgetdblattrelement"); - gurobi_dynamic_library->GetFunction(&GRBsetdblattrelement, - "GRBsetdblattrelement"); - gurobi_dynamic_library->GetFunction(&GRBgetdblattrarray, - "GRBgetdblattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetdblattrarray, - "GRBsetdblattrarray"); - gurobi_dynamic_library->GetFunction(&GRBsetdblattrlist, "GRBsetdblattrlist"); - gurobi_dynamic_library->GetFunction(&GRBgetstrattr, "GRBgetstrattr"); - gurobi_dynamic_library->GetFunction(&GRBsetstrattr, "GRBsetstrattr"); - gurobi_dynamic_library->GetFunction(&GRBsetcallbackfunc, - "GRBsetcallbackfunc"); - gurobi_dynamic_library->GetFunction(&GRBcbget, "GRBcbget"); - gurobi_dynamic_library->GetFunction(&GRBcbsolution, "GRBcbsolution"); - gurobi_dynamic_library->GetFunction(&GRBcbcut, "GRBcbcut"); - gurobi_dynamic_library->GetFunction(&GRBcblazy, "GRBcblazy"); - gurobi_dynamic_library->GetFunction(&GRBgetvars, "GRBgetvars"); - gurobi_dynamic_library->GetFunction(&GRBoptimize, "GRBoptimize"); - gurobi_dynamic_library->GetFunction(&GRBcomputeIIS, "GRBcomputeIIS"); - gurobi_dynamic_library->GetFunction(&GRBwrite, "GRBwrite"); - gurobi_dynamic_library->GetFunction(&GRBnewmodel, "GRBnewmodel"); - gurobi_dynamic_library->GetFunction(&GRBaddvar, "GRBaddvar"); - gurobi_dynamic_library->GetFunction(&GRBaddvars, "GRBaddvars"); - gurobi_dynamic_library->GetFunction(&GRBaddconstr, "GRBaddconstr"); - gurobi_dynamic_library->GetFunction(&GRBaddconstrs, "GRBaddconstrs"); - gurobi_dynamic_library->GetFunction(&GRBaddrangeconstr, "GRBaddrangeconstr"); - gurobi_dynamic_library->GetFunction(&GRBaddsos, "GRBaddsos"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrMax, - "GRBaddgenconstrMax"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrMin, - "GRBaddgenconstrMin"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrAbs, - "GRBaddgenconstrAbs"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrAnd, - "GRBaddgenconstrAnd"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrOr, "GRBaddgenconstrOr"); - gurobi_dynamic_library->GetFunction(&GRBaddgenconstrIndicator, - "GRBaddgenconstrIndicator"); - gurobi_dynamic_library->GetFunction(&GRBaddqconstr, "GRBaddqconstr"); - gurobi_dynamic_library->GetFunction(&GRBaddqpterms, "GRBaddqpterms"); - gurobi_dynamic_library->GetFunction(&GRBdelvars, "GRBdelvars"); - gurobi_dynamic_library->GetFunction(&GRBdelconstrs, "GRBdelconstrs"); - gurobi_dynamic_library->GetFunction(&GRBdelsos, "GRBdelsos"); - gurobi_dynamic_library->GetFunction(&GRBdelgenconstrs, "GRBdelgenconstrs"); - gurobi_dynamic_library->GetFunction(&GRBdelqconstrs, "GRBdelqconstrs"); - gurobi_dynamic_library->GetFunction(&GRBdelq, "GRBdelq"); - gurobi_dynamic_library->GetFunction(&GRBchgcoeffs, "GRBchgcoeffs"); - gurobi_dynamic_library->GetFunction(&GRBupdatemodel, "GRBupdatemodel"); - gurobi_dynamic_library->GetFunction(&GRBfreemodel, "GRBfreemodel"); - gurobi_dynamic_library->GetFunction(&GRBterminate, "GRBterminate"); - gurobi_dynamic_library->GetFunction(&GRBsetobjectiven, "GRBsetobjectiven"); - gurobi_dynamic_library->GetFunction(&GRBgetintparam, "GRBgetintparam"); - gurobi_dynamic_library->GetFunction(&GRBgetdblparam, "GRBgetdblparam"); - gurobi_dynamic_library->GetFunction(&GRBgetstrparam, "GRBgetstrparam"); - gurobi_dynamic_library->GetFunction(&GRBsetparam, "GRBsetparam"); - gurobi_dynamic_library->GetFunction(&GRBsetintparam, "GRBsetintparam"); - gurobi_dynamic_library->GetFunction(&GRBsetdblparam, "GRBsetdblparam"); - gurobi_dynamic_library->GetFunction(&GRBsetstrparam, "GRBsetstrparam"); - gurobi_dynamic_library->GetFunction(&GRBresetparams, "GRBresetparams"); - gurobi_dynamic_library->GetFunction(&GRBcopyparams, "GRBcopyparams"); - gurobi_dynamic_library->GetFunction(&GRBloadenv, "GRBloadenv"); - gurobi_dynamic_library->GetFunction(&GRBstartenv, "GRBstartenv"); - gurobi_dynamic_library->GetFunction(&GRBemptyenv, "GRBemptyenv"); - gurobi_dynamic_library->GetFunction(&GRBgetnumparams, "GRBgetnumparams"); - gurobi_dynamic_library->GetFunction(&GRBgetparamname, "GRBgetparamname"); - gurobi_dynamic_library->GetFunction(&GRBgetparamtype, "GRBgetparamtype"); - gurobi_dynamic_library->GetFunction(&GRBgetintparaminfo, - "GRBgetintparaminfo"); - gurobi_dynamic_library->GetFunction(&GRBgetdblparaminfo, - "GRBgetdblparaminfo"); - gurobi_dynamic_library->GetFunction(&GRBgetstrparaminfo, - "GRBgetstrparaminfo"); - gurobi_dynamic_library->GetFunction(&GRBgetenv, "GRBgetenv"); - gurobi_dynamic_library->GetFunction(&GRBgetmultiobjenv, "GRBgetmultiobjenv"); - gurobi_dynamic_library->GetFunction(&GRBdiscardmultiobjenvs, - "GRBdiscardmultiobjenvs"); - gurobi_dynamic_library->GetFunction(&GRBfreeenv, "GRBfreeenv"); - gurobi_dynamic_library->GetFunction(&GRBgeterrormsg, "GRBgeterrormsg"); - gurobi_dynamic_library->GetFunction(&GRBversion, "GRBversion"); - gurobi_dynamic_library->GetFunction(&GRBplatform, "GRBplatform"); -} - -std::vector GurobiDynamicLibraryPotentialPaths() { - std::vector potential_paths; - const std::vector kGurobiVersions = { - "1202", "1201", "1200", "1103", "1102", "1101", "1100", - "1003", "1002", "1001", "1000", "952", "951", "950", - "911", "910", "903", "902", "811", "801", "752"}; - potential_paths.reserve(kGurobiVersions.size() * 3); - - // Look for libraries pointed by GUROBI_HOME first. - const char* gurobi_home_from_env = getenv("GUROBI_HOME"); - if (gurobi_home_from_env != nullptr) { - for (const absl::string_view version : kGurobiVersions) { - const absl::string_view lib = version.substr(0, version.size() - 1); -#if defined(_MSC_VER) // Windows - potential_paths.push_back( - absl::StrCat(gurobi_home_from_env, "\\bin\\gurobi", lib, ".dll")); -#elif defined(__APPLE__) // OS X - potential_paths.push_back( - absl::StrCat(gurobi_home_from_env, "/lib/libgurobi", lib, ".dylib")); -#elif defined(__GNUC__) // Linux - potential_paths.push_back( - absl::StrCat(gurobi_home_from_env, "/lib/libgurobi", lib, ".so")); - potential_paths.push_back( - absl::StrCat(gurobi_home_from_env, "/lib64/libgurobi", lib, ".so")); -#else - LOG(ERROR) << "OS Not recognized by gurobi/environment.cc." - << " You won't be able to use Gurobi."; -#endif - } - } - - // Search for canonical places. - for (const absl::string_view version : kGurobiVersions) { - const absl::string_view lib = version.substr(0, version.size() - 1); -#if defined(_MSC_VER) // Windows - potential_paths.push_back(absl::StrCat("C:\\Program Files\\gurobi", version, - "\\win64\\bin\\gurobi", lib, - ".dll")); - potential_paths.push_back(absl::StrCat( - "C:\\gurobi", version, "\\win64\\bin\\gurobi", lib, ".dll")); - potential_paths.push_back(absl::StrCat("gurobi", lib, ".dll")); -#elif defined(__APPLE__) // OS X - potential_paths.push_back(absl::StrCat( - "/Library/gurobi", version, "/mac64/lib/libgurobi", lib, ".dylib")); - potential_paths.push_back(absl::StrCat("/Library/gurobi", version, - "/macos_universal2/lib/libgurobi", - lib, ".dylib")); -#elif defined(__GNUC__) // Linux - potential_paths.push_back(absl::StrCat( - "/opt/gurobi", version, "/linux64/lib/libgurobi", lib, ".so")); - potential_paths.push_back(absl::StrCat( - "/opt/gurobi", version, "/linux64/lib64/libgurobi", lib, ".so")); - potential_paths.push_back( - absl::StrCat("/opt/gurobi/linux64/lib/libgurobi", lib, ".so")); - potential_paths.push_back( - absl::StrCat("/opt/gurobi/linux64/lib64/libgurobi", lib, ".so")); -#else - LOG(ERROR) << "OS Not recognized by gurobi/environment.cc." - << " You won't be able to use Gurobi."; -#endif - } - -#if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. - for (const absl::string_view version : - {"12.0.2", "12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", - "10.0.3", "10.0.2", "10.0.1", "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { - potential_paths.push_back( - absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); - } -#endif - return potential_paths; -} - -absl::Status LoadGurobiDynamicLibrary( - std::vector potential_paths) { - static std::once_flag gurobi_loading_done; - static absl::Status gurobi_load_status; - static DynamicLibrary gurobi_library; - static absl::Mutex mutex; - - absl::MutexLock lock(&mutex); - - std::call_once(gurobi_loading_done, [&potential_paths]() { - const std::vector canonical_paths = - GurobiDynamicLibraryPotentialPaths(); - potential_paths.insert(potential_paths.end(), canonical_paths.begin(), - canonical_paths.end()); - for (const absl::string_view path : potential_paths) { - if (gurobi_library.TryToLoad(path)) { - LOG(INFO) << "Found the Gurobi library in '" << path << "."; - break; - } - } - - if (gurobi_library.LibraryIsLoaded()) { - LoadGurobiFunctions(&gurobi_library); - gurobi_load_status = absl::OkStatus(); - } else { - gurobi_load_status = absl::NotFoundError(absl::StrCat( - "Could not find the Gurobi shared library. Looked in: [", - absl::StrJoin(potential_paths, "', '"), - "]. If you know where it" - " is, pass the full path to 'LoadGurobiDynamicLibrary()'.")); - } - }); - return gurobi_load_status; -} - -absl::StatusOr GetGurobiEnv() { - RETURN_IF_ERROR(LoadGurobiDynamicLibrary({})); - - GRBenv* env = nullptr; - - if (GRBloadenv(&env, nullptr) != 0 || env == nullptr) { - return absl::FailedPreconditionError( - absl::StrCat("Found the Gurobi shared library, but could not create " - "Gurobi environment: is Gurobi licensed on this machine?", - GRBgeterrormsg(env))); - } - return env; -} - -} // namespace operations_research diff --git a/ortools/gurobi/environment.h b/ortools/gurobi/environment.h deleted file mode 100644 index ab3bd46e76..0000000000 --- a/ortools/gurobi/environment.h +++ /dev/null @@ -1,740 +0,0 @@ -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OR_TOOLS_GUROBI_ENVIRONMENT_H_ -#define OR_TOOLS_GUROBI_ENVIRONMENT_H_ - -#include "absl/flags/declare.h" -#include "absl/flags/flag.h" -#include "absl/status/status.h" -#include "absl/status/statusor.h" -#include "absl/strings/string_view.h" -#include "ortools/base/dynamic_library.h" -#include "ortools/base/logging.h" - -#if defined(_MSC_VER) -#define GUROBI_STDCALL __stdcall -#else -#define GUROBI_STDCALL -#endif - -extern "C" { -typedef struct _GRBmodel GRBmodel; -typedef struct _GRBenv GRBenv; -typedef struct _GRBsvec { - int len; - int* ind; - double* val; -} GRBsvec; -} - -namespace operations_research { - -absl::StatusOr GetGurobiEnv(); - -// This returns true if the Gurobi shared library is properly loaded (otherwise, -// tries to find it and load it) and if a Gurobi license can be obtained (it -// does that by trying to grab a license and then release it). -bool GurobiIsCorrectlyInstalled(); - -// clang-format off -// Force the loading of the gurobi dynamic library. It returns true if the -// library was successfully loaded. This method can only be called once. -// Successive calls are no-op. -// -// Note that it does not check if a token license can be grabbed. -absl::Status LoadGurobiDynamicLibrary(std::vector potential_paths); - -// The list of #define and extern std::function<> below is generated directly -// from gurobi_c.h via parse_header.py -// See the top comment on the parse_header.py file. -// This is the header section - -#define GRB_VERSION_MAJOR 10 -#define GRB_VERSION_MINOR 0 -#define GRB_VERSION_TECHNICAL 0 -#define DEFAULT_CS_PRIORITY 0 -#define MAX_CS_PRIORITY 100 -#define DEFAULT_CS_PORT 61000 -#define DEFAULT_CS_HANGUP 60 -#define GRB_ERROR_OUT_OF_MEMORY 10001 -#define GRB_ERROR_NULL_ARGUMENT 10002 -#define GRB_ERROR_INVALID_ARGUMENT 10003 -#define GRB_ERROR_UNKNOWN_ATTRIBUTE 10004 -#define GRB_ERROR_DATA_NOT_AVAILABLE 10005 -#define GRB_ERROR_INDEX_OUT_OF_RANGE 10006 -#define GRB_ERROR_UNKNOWN_PARAMETER 10007 -#define GRB_ERROR_VALUE_OUT_OF_RANGE 10008 -#define GRB_ERROR_NO_LICENSE 10009 -#define GRB_ERROR_SIZE_LIMIT_EXCEEDED 10010 -#define GRB_ERROR_CALLBACK 10011 -#define GRB_ERROR_FILE_READ 10012 -#define GRB_ERROR_FILE_WRITE 10013 -#define GRB_ERROR_NUMERIC 10014 -#define GRB_ERROR_IIS_NOT_INFEASIBLE 10015 -#define GRB_ERROR_NOT_FOR_MIP 10016 -#define GRB_ERROR_OPTIMIZATION_IN_PROGRESS 10017 -#define GRB_ERROR_DUPLICATES 10018 -#define GRB_ERROR_NODEFILE 10019 -#define GRB_ERROR_Q_NOT_PSD 10020 -#define GRB_ERROR_QCP_EQUALITY_CONSTRAINT 10021 -#define GRB_ERROR_NETWORK 10022 -#define GRB_ERROR_JOB_REJECTED 10023 -#define GRB_ERROR_NOT_SUPPORTED 10024 -#define GRB_ERROR_EXCEED_2B_NONZEROS 10025 -#define GRB_ERROR_INVALID_PIECEWISE_OBJ 10026 -#define GRB_ERROR_UPDATEMODE_CHANGE 10027 -#define GRB_ERROR_CLOUD 10028 -#define GRB_ERROR_MODEL_MODIFICATION 10029 -#define GRB_ERROR_CSWORKER 10030 -#define GRB_ERROR_TUNE_MODEL_TYPES 10031 -#define GRB_ERROR_SECURITY 10032 -#define GRB_LESS_EQUAL '<' -#define GRB_GREATER_EQUAL '>' -#define GRB_EQUAL '=' -#define GRB_CONTINUOUS 'C' -#define GRB_BINARY 'B' -#define GRB_INTEGER 'I' -#define GRB_SEMICONT 'S' -#define GRB_SEMIINT 'N' -#define GRB_MINIMIZE 1 -#define GRB_MAXIMIZE -1 -#define GRB_SOS_TYPE1 1 -#define GRB_SOS_TYPE2 2 -#define GRB_INFINITY 1e100 -#define GRB_UNDEFINED 1e101 -#define GRB_MAXINT 2000000000 -#define GRB_MAX_NAMELEN 255 -#define GRB_MAX_STRLEN 512 -#define GRB_MAX_TAGLEN 10240 -#define GRB_MAX_CONCURRENT 64 -#define CB_ARGS GRBmodel *model, void *cbdata, int where, void *usrdata -#define LOGCB_ARGS char *msg, void *logdata -extern std::function GRBisattravailable; -extern std::function GRBgetintattr; -extern std::function GRBsetintattr; -extern std::function GRBgetintattrelement; -extern std::function GRBsetintattrelement; -extern std::function GRBgetintattrarray; -extern std::function GRBsetintattrarray; -extern std::function GRBsetintattrlist; -extern std::function GRBgetcharattrelement; -extern std::function GRBsetcharattrelement; -extern std::function GRBgetcharattrarray; -extern std::function GRBsetcharattrarray; -extern std::function GRBsetcharattrlist; -extern std::function GRBgetdblattr; -extern std::function GRBsetdblattr; -extern std::function GRBgetdblattrelement; -extern std::function GRBsetdblattrelement; -extern std::function GRBgetdblattrarray; -extern std::function GRBsetdblattrarray; -extern std::function GRBsetdblattrlist; -extern std::function GRBgetstrattr; -extern std::function GRBsetstrattr; -extern std::function GRBsetcallbackfunc; -extern std::function GRBcbget; -extern std::function GRBcbsolution; -extern std::function GRBcbcut; -extern std::function GRBcblazy; -#define GRB_INT_ATTR_NUMCONSTRS "NumConstrs" -#define GRB_INT_ATTR_NUMVARS "NumVars" -#define GRB_INT_ATTR_NUMSOS "NumSOS" -#define GRB_INT_ATTR_NUMQCONSTRS "NumQConstrs" -#define GRB_INT_ATTR_NUMGENCONSTRS "NumGenConstrs" -#define GRB_INT_ATTR_NUMNZS "NumNZs" -#define GRB_DBL_ATTR_DNUMNZS "DNumNZs" -#define GRB_INT_ATTR_NUMQNZS "NumQNZs" -#define GRB_INT_ATTR_NUMQCNZS "NumQCNZs" -#define GRB_INT_ATTR_NUMINTVARS "NumIntVars" -#define GRB_INT_ATTR_NUMBINVARS "NumBinVars" -#define GRB_INT_ATTR_NUMPWLOBJVARS "NumPWLObjVars" -#define GRB_STR_ATTR_MODELNAME "ModelName" -#define GRB_INT_ATTR_MODELSENSE "ModelSense" -#define GRB_DBL_ATTR_OBJCON "ObjCon" -#define GRB_INT_ATTR_IS_MIP "IsMIP" -#define GRB_INT_ATTR_IS_QP "IsQP" -#define GRB_INT_ATTR_IS_QCP "IsQCP" -#define GRB_INT_ATTR_IS_MULTIOBJ "IsMultiObj" -#define GRB_INT_ATTR_LICENSE_EXPIRATION "LicenseExpiration" -#define GRB_INT_ATTR_NUMTAGGED "NumTagged" -#define GRB_INT_ATTR_FINGERPRINT "Fingerprint" -#define GRB_INT_ATTR_BATCHERRORCODE "BatchErrorCode" -#define GRB_STR_ATTR_BATCHERRORMESSAGE "BatchErrorMessage" -#define GRB_STR_ATTR_BATCHID "BatchID" -#define GRB_INT_ATTR_BATCHSTATUS "BatchStatus" -#define GRB_DBL_ATTR_LB "LB" -#define GRB_DBL_ATTR_UB "UB" -#define GRB_DBL_ATTR_OBJ "Obj" -#define GRB_CHAR_ATTR_VTYPE "VType" -#define GRB_DBL_ATTR_START "Start" -#define GRB_DBL_ATTR_PSTART "PStart" -#define GRB_INT_ATTR_BRANCHPRIORITY "BranchPriority" -#define GRB_STR_ATTR_VARNAME "VarName" -#define GRB_INT_ATTR_PWLOBJCVX "PWLObjCvx" -#define GRB_DBL_ATTR_VARHINTVAL "VarHintVal" -#define GRB_INT_ATTR_VARHINTPRI "VarHintPri" -#define GRB_INT_ATTR_PARTITION "Partition" -#define GRB_INT_ATTR_POOLIGNORE "PoolIgnore" -#define GRB_STR_ATTR_VTAG "VTag" -#define GRB_STR_ATTR_CTAG "CTag" -#define GRB_DBL_ATTR_RHS "RHS" -#define GRB_DBL_ATTR_DSTART "DStart" -#define GRB_CHAR_ATTR_SENSE "Sense" -#define GRB_STR_ATTR_CONSTRNAME "ConstrName" -#define GRB_INT_ATTR_LAZY "Lazy" -#define GRB_STR_ATTR_QCTAG "QCTag" -#define GRB_DBL_ATTR_QCRHS "QCRHS" -#define GRB_CHAR_ATTR_QCSENSE "QCSense" -#define GRB_STR_ATTR_QCNAME "QCName" -#define GRB_INT_ATTR_GENCONSTRTYPE "GenConstrType" -#define GRB_STR_ATTR_GENCONSTRNAME "GenConstrName" -#define GRB_INT_ATTR_FUNCPIECES "FuncPieces" -#define GRB_DBL_ATTR_FUNCPIECEERROR "FuncPieceError" -#define GRB_DBL_ATTR_FUNCPIECELENGTH "FuncPieceLength" -#define GRB_DBL_ATTR_FUNCPIECERATIO "FuncPieceRatio" -#define GRB_DBL_ATTR_MAX_COEFF "MaxCoeff" -#define GRB_DBL_ATTR_MIN_COEFF "MinCoeff" -#define GRB_DBL_ATTR_MAX_BOUND "MaxBound" -#define GRB_DBL_ATTR_MIN_BOUND "MinBound" -#define GRB_DBL_ATTR_MAX_OBJ_COEFF "MaxObjCoeff" -#define GRB_DBL_ATTR_MIN_OBJ_COEFF "MinObjCoeff" -#define GRB_DBL_ATTR_MAX_RHS "MaxRHS" -#define GRB_DBL_ATTR_MIN_RHS "MinRHS" -#define GRB_DBL_ATTR_MAX_QCCOEFF "MaxQCCoeff" -#define GRB_DBL_ATTR_MIN_QCCOEFF "MinQCCoeff" -#define GRB_DBL_ATTR_MAX_QOBJ_COEFF "MaxQObjCoeff" -#define GRB_DBL_ATTR_MIN_QOBJ_COEFF "MinQObjCoeff" -#define GRB_DBL_ATTR_MAX_QCLCOEFF "MaxQCLCoeff" -#define GRB_DBL_ATTR_MIN_QCLCOEFF "MinQCLCoeff" -#define GRB_DBL_ATTR_MAX_QCRHS "MaxQCRHS" -#define GRB_DBL_ATTR_MIN_QCRHS "MinQCRHS" -#define GRB_DBL_ATTR_RUNTIME "Runtime" -#define GRB_DBL_ATTR_WORK "Work" -#define GRB_INT_ATTR_STATUS "Status" -#define GRB_DBL_ATTR_OBJVAL "ObjVal" -#define GRB_DBL_ATTR_OBJBOUND "ObjBound" -#define GRB_DBL_ATTR_OBJBOUNDC "ObjBoundC" -#define GRB_DBL_ATTR_POOLOBJBOUND "PoolObjBound" -#define GRB_DBL_ATTR_POOLOBJVAL "PoolObjVal" -#define GRB_DBL_ATTR_MIPGAP "MIPGap" -#define GRB_INT_ATTR_SOLCOUNT "SolCount" -#define GRB_DBL_ATTR_ITERCOUNT "IterCount" -#define GRB_INT_ATTR_BARITERCOUNT "BarIterCount" -#define GRB_DBL_ATTR_NODECOUNT "NodeCount" -#define GRB_DBL_ATTR_OPENNODECOUNT "OpenNodeCount" -#define GRB_INT_ATTR_HASDUALNORM "HasDualNorm" -#define GRB_INT_ATTR_CONCURRENTWINMETHOD "ConcurrentWinMethod" -#define GRB_DBL_ATTR_X "X" -#define GRB_DBL_ATTR_XN "Xn" -#define GRB_DBL_ATTR_BARX "BarX" -#define GRB_DBL_ATTR_RC "RC" -#define GRB_DBL_ATTR_VDUALNORM "VDualNorm" -#define GRB_INT_ATTR_VBASIS "VBasis" -#define GRB_DBL_ATTR_PI "Pi" -#define GRB_DBL_ATTR_QCPI "QCPi" -#define GRB_DBL_ATTR_SLACK "Slack" -#define GRB_DBL_ATTR_QCSLACK "QCSlack" -#define GRB_DBL_ATTR_CDUALNORM "CDualNorm" -#define GRB_INT_ATTR_CBASIS "CBasis" -#define GRB_DBL_ATTR_MAX_VIO "MaxVio" -#define GRB_DBL_ATTR_BOUND_VIO "BoundVio" -#define GRB_DBL_ATTR_BOUND_SVIO "BoundSVio" -#define GRB_INT_ATTR_BOUND_VIO_INDEX "BoundVioIndex" -#define GRB_INT_ATTR_BOUND_SVIO_INDEX "BoundSVioIndex" -#define GRB_DBL_ATTR_BOUND_VIO_SUM "BoundVioSum" -#define GRB_DBL_ATTR_BOUND_SVIO_SUM "BoundSVioSum" -#define GRB_DBL_ATTR_CONSTR_VIO "ConstrVio" -#define GRB_DBL_ATTR_CONSTR_SVIO "ConstrSVio" -#define GRB_INT_ATTR_CONSTR_VIO_INDEX "ConstrVioIndex" -#define GRB_INT_ATTR_CONSTR_SVIO_INDEX "ConstrSVioIndex" -#define GRB_DBL_ATTR_CONSTR_VIO_SUM "ConstrVioSum" -#define GRB_DBL_ATTR_CONSTR_SVIO_SUM "ConstrSVioSum" -#define GRB_DBL_ATTR_CONSTR_RESIDUAL "ConstrResidual" -#define GRB_DBL_ATTR_CONSTR_SRESIDUAL "ConstrSResidual" -#define GRB_INT_ATTR_CONSTR_RESIDUAL_INDEX "ConstrResidualIndex" -#define GRB_INT_ATTR_CONSTR_SRESIDUAL_INDEX "ConstrSResidualIndex" -#define GRB_DBL_ATTR_CONSTR_RESIDUAL_SUM "ConstrResidualSum" -#define GRB_DBL_ATTR_CONSTR_SRESIDUAL_SUM "ConstrSResidualSum" -#define GRB_DBL_ATTR_DUAL_VIO "DualVio" -#define GRB_DBL_ATTR_DUAL_SVIO "DualSVio" -#define GRB_INT_ATTR_DUAL_VIO_INDEX "DualVioIndex" -#define GRB_INT_ATTR_DUAL_SVIO_INDEX "DualSVioIndex" -#define GRB_DBL_ATTR_DUAL_VIO_SUM "DualVioSum" -#define GRB_DBL_ATTR_DUAL_SVIO_SUM "DualSVioSum" -#define GRB_DBL_ATTR_DUAL_RESIDUAL "DualResidual" -#define GRB_DBL_ATTR_DUAL_SRESIDUAL "DualSResidual" -#define GRB_INT_ATTR_DUAL_RESIDUAL_INDEX "DualResidualIndex" -#define GRB_INT_ATTR_DUAL_SRESIDUAL_INDEX "DualSResidualIndex" -#define GRB_DBL_ATTR_DUAL_RESIDUAL_SUM "DualResidualSum" -#define GRB_DBL_ATTR_DUAL_SRESIDUAL_SUM "DualSResidualSum" -#define GRB_DBL_ATTR_INT_VIO "IntVio" -#define GRB_INT_ATTR_INT_VIO_INDEX "IntVioIndex" -#define GRB_DBL_ATTR_INT_VIO_SUM "IntVioSum" -#define GRB_DBL_ATTR_COMPL_VIO "ComplVio" -#define GRB_INT_ATTR_COMPL_VIO_INDEX "ComplVioIndex" -#define GRB_DBL_ATTR_COMPL_VIO_SUM "ComplVioSum" -#define GRB_DBL_ATTR_KAPPA "Kappa" -#define GRB_DBL_ATTR_KAPPA_EXACT "KappaExact" -#define GRB_DBL_ATTR_N2KAPPA "N2Kappa" -#define GRB_DBL_ATTR_SA_OBJLOW "SAObjLow" -#define GRB_DBL_ATTR_SA_OBJUP "SAObjUp" -#define GRB_DBL_ATTR_SA_LBLOW "SALBLow" -#define GRB_DBL_ATTR_SA_LBUP "SALBUp" -#define GRB_DBL_ATTR_SA_UBLOW "SAUBLow" -#define GRB_DBL_ATTR_SA_UBUP "SAUBUp" -#define GRB_DBL_ATTR_SA_RHSLOW "SARHSLow" -#define GRB_DBL_ATTR_SA_RHSUP "SARHSUp" -#define GRB_INT_ATTR_IIS_MINIMAL "IISMinimal" -#define GRB_INT_ATTR_IIS_LB "IISLB" -#define GRB_INT_ATTR_IIS_UB "IISUB" -#define GRB_INT_ATTR_IIS_CONSTR "IISConstr" -#define GRB_INT_ATTR_IIS_SOS "IISSOS" -#define GRB_INT_ATTR_IIS_QCONSTR "IISQConstr" -#define GRB_INT_ATTR_IIS_GENCONSTR "IISGenConstr" -#define GRB_INT_ATTR_IIS_LBFORCE "IISLBForce" -#define GRB_INT_ATTR_IIS_UBFORCE "IISUBForce" -#define GRB_INT_ATTR_IIS_CONSTRFORCE "IISConstrForce" -#define GRB_INT_ATTR_IIS_SOSFORCE "IISSOSForce" -#define GRB_INT_ATTR_IIS_QCONSTRFORCE "IISQConstrForce" -#define GRB_INT_ATTR_IIS_GENCONSTRFORCE "IISGenConstrForce" -#define GRB_INT_ATTR_TUNE_RESULTCOUNT "TuneResultCount" -#define GRB_DBL_ATTR_FARKASDUAL "FarkasDual" -#define GRB_DBL_ATTR_FARKASPROOF "FarkasProof" -#define GRB_DBL_ATTR_UNBDRAY "UnbdRay" -#define GRB_INT_ATTR_INFEASVAR "InfeasVar" -#define GRB_INT_ATTR_UNBDVAR "UnbdVar" -#define GRB_INT_ATTR_VARPRESTAT "VarPreStat" -#define GRB_DBL_ATTR_PREFIXVAL "PreFixVal" -#define GRB_DBL_ATTR_OBJN "ObjN" -#define GRB_DBL_ATTR_OBJNVAL "ObjNVal" -#define GRB_DBL_ATTR_OBJNCON "ObjNCon" -#define GRB_DBL_ATTR_OBJNWEIGHT "ObjNWeight" -#define GRB_INT_ATTR_OBJNPRIORITY "ObjNPriority" -#define GRB_DBL_ATTR_OBJNRELTOL "ObjNRelTol" -#define GRB_DBL_ATTR_OBJNABSTOL "ObjNAbsTol" -#define GRB_STR_ATTR_OBJNNAME "ObjNName" -#define GRB_DBL_ATTR_SCENNLB "ScenNLB" -#define GRB_DBL_ATTR_SCENNUB "ScenNUB" -#define GRB_DBL_ATTR_SCENNOBJ "ScenNObj" -#define GRB_DBL_ATTR_SCENNRHS "ScenNRHS" -#define GRB_STR_ATTR_SCENNNAME "ScenNName" -#define GRB_DBL_ATTR_SCENNX "ScenNX" -#define GRB_DBL_ATTR_SCENNOBJBOUND "ScenNObjBound" -#define GRB_DBL_ATTR_SCENNOBJVAL "ScenNObjVal" -#define GRB_INT_ATTR_NUMOBJ "NumObj" -#define GRB_INT_ATTR_NUMSCENARIOS "NumScenarios" -#define GRB_INT_ATTR_NUMSTART "NumStart" -#define GRB_GENCONSTR_MAX 0 -#define GRB_GENCONSTR_MIN 1 -#define GRB_GENCONSTR_ABS 2 -#define GRB_GENCONSTR_AND 3 -#define GRB_GENCONSTR_OR 4 -#define GRB_GENCONSTR_NORM 5 -#define GRB_GENCONSTR_INDICATOR 6 -#define GRB_GENCONSTR_PWL 7 -#define GRB_GENCONSTR_POLY 8 -#define GRB_GENCONSTR_EXP 9 -#define GRB_GENCONSTR_EXPA 10 -#define GRB_GENCONSTR_LOG 11 -#define GRB_GENCONSTR_LOGA 12 -#define GRB_GENCONSTR_POW 13 -#define GRB_GENCONSTR_SIN 14 -#define GRB_GENCONSTR_COS 15 -#define GRB_GENCONSTR_TAN 16 -#define GRB_GENCONSTR_LOGISTIC 17 -#define GRB_CB_POLLING 0 -#define GRB_CB_PRESOLVE 1 -#define GRB_CB_SIMPLEX 2 -#define GRB_CB_MIP 3 -#define GRB_CB_MIPSOL 4 -#define GRB_CB_MIPNODE 5 -#define GRB_CB_MESSAGE 6 -#define GRB_CB_BARRIER 7 -#define GRB_CB_MULTIOBJ 8 -#define GRB_CB_IIS 9 -#define GRB_CB_PRE_COLDEL 1000 -#define GRB_CB_PRE_ROWDEL 1001 -#define GRB_CB_PRE_SENCHG 1002 -#define GRB_CB_PRE_BNDCHG 1003 -#define GRB_CB_PRE_COECHG 1004 -#define GRB_CB_SPX_ITRCNT 2000 -#define GRB_CB_SPX_OBJVAL 2001 -#define GRB_CB_SPX_PRIMINF 2002 -#define GRB_CB_SPX_DUALINF 2003 -#define GRB_CB_SPX_ISPERT 2004 -#define GRB_CB_MIP_OBJBST 3000 -#define GRB_CB_MIP_OBJBND 3001 -#define GRB_CB_MIP_NODCNT 3002 -#define GRB_CB_MIP_SOLCNT 3003 -#define GRB_CB_MIP_CUTCNT 3004 -#define GRB_CB_MIP_NODLFT 3005 -#define GRB_CB_MIP_ITRCNT 3006 -#define GRB_CB_MIP_OPENSCENARIOS 3007 -#define GRB_CB_MIP_PHASE 3008 -#define GRB_CB_MIPSOL_SOL 4001 -#define GRB_CB_MIPSOL_OBJ 4002 -#define GRB_CB_MIPSOL_OBJBST 4003 -#define GRB_CB_MIPSOL_OBJBND 4004 -#define GRB_CB_MIPSOL_NODCNT 4005 -#define GRB_CB_MIPSOL_SOLCNT 4006 -#define GRB_CB_MIPSOL_OPENSCENARIOS 4007 -#define GRB_CB_MIPSOL_PHASE 4008 -#define GRB_CB_MIPNODE_STATUS 5001 -#define GRB_CB_MIPNODE_REL 5002 -#define GRB_CB_MIPNODE_OBJBST 5003 -#define GRB_CB_MIPNODE_OBJBND 5004 -#define GRB_CB_MIPNODE_NODCNT 5005 -#define GRB_CB_MIPNODE_SOLCNT 5006 -#define GRB_CB_MIPNODE_BRVAR 5007 -#define GRB_CB_MIPNODE_OPENSCENARIOS 5008 -#define GRB_CB_MIPNODE_PHASE 5009 -#define GRB_CB_MSG_STRING 6001 -#define GRB_CB_RUNTIME 6002 -#define GRB_CB_WORK 6003 -#define GRB_CB_BARRIER_ITRCNT 7001 -#define GRB_CB_BARRIER_PRIMOBJ 7002 -#define GRB_CB_BARRIER_DUALOBJ 7003 -#define GRB_CB_BARRIER_PRIMINF 7004 -#define GRB_CB_BARRIER_DUALINF 7005 -#define GRB_CB_BARRIER_COMPL 7006 -#define GRB_CB_MULTIOBJ_OBJCNT 8001 -#define GRB_CB_MULTIOBJ_SOLCNT 8002 -#define GRB_CB_MULTIOBJ_SOL 8003 -#define GRB_CB_IIS_CONSTRMIN 9001 -#define GRB_CB_IIS_CONSTRMAX 9002 -#define GRB_CB_IIS_CONSTRGUESS 9003 -#define GRB_CB_IIS_BOUNDMIN 9004 -#define GRB_CB_IIS_BOUNDMAX 9005 -#define GRB_CB_IIS_BOUNDGUESS 9006 -#define GRB_FEASRELAX_LINEAR 0 -#define GRB_FEASRELAX_QUADRATIC 1 -#define GRB_FEASRELAX_CARDINALITY 2 -extern std::function GRBgetvars; -extern std::function GRBoptimize; -extern std::function GRBcomputeIIS; -#define MALLOCCB_ARGS size_t size, void *syscbusrdata -#define CALLOCCB_ARGS size_t nmemb, size_t size, void *syscbusrdata -#define REALLOCCB_ARGS void *ptr, size_t size, void *syscbusrdata -#define FREECB_ARGS void *ptr, void *syscbusrdata -#define THREADCREATECB_ARGS void **threadP, void (*start_routine)(void *), void *arg, void *syscbusrdata -#define THREADJOINCB_ARGS void *thread, void *syscbusrdata -extern std::function GRBwrite; -extern std::function GRBnewmodel; -extern std::function GRBaddvar; -extern std::function GRBaddvars; -extern std::function GRBaddconstr; -extern std::function GRBaddconstrs; -extern std::function GRBaddrangeconstr; -extern std::function GRBaddsos; -extern std::function GRBaddgenconstrMax; -extern std::function GRBaddgenconstrMin; -extern std::function GRBaddgenconstrAbs; -extern std::function GRBaddgenconstrAnd; -extern std::function GRBaddgenconstrOr; -extern std::function GRBaddgenconstrIndicator; -extern std::function GRBaddqconstr; -extern std::function GRBaddqpterms; -extern std::function GRBdelvars; -extern std::function GRBdelconstrs; -extern std::function GRBdelsos; -extern std::function GRBdelgenconstrs; -extern std::function GRBdelqconstrs; -extern std::function GRBdelq; -extern std::function GRBchgcoeffs; -extern std::function GRBupdatemodel; -extern std::function GRBfreemodel; -#define GRB_LOADED 1 -#define GRB_OPTIMAL 2 -#define GRB_INFEASIBLE 3 -#define GRB_INF_OR_UNBD 4 -#define GRB_UNBOUNDED 5 -#define GRB_CUTOFF 6 -#define GRB_ITERATION_LIMIT 7 -#define GRB_NODE_LIMIT 8 -#define GRB_TIME_LIMIT 9 -#define GRB_SOLUTION_LIMIT 10 -#define GRB_INTERRUPTED 11 -#define GRB_NUMERIC 12 -#define GRB_SUBOPTIMAL 13 -#define GRB_INPROGRESS 14 -#define GRB_USER_OBJ_LIMIT 15 -#define GRB_WORK_LIMIT 16 -#define GRB_MEM_LIMIT 17 -#define GRB_BASIC 0 -#define GRB_NONBASIC_LOWER -1 -#define GRB_NONBASIC_UPPER -2 -#define GRB_SUPERBASIC -3 -#define GRB_INT_PAR_BARITERLIMIT "BarIterLimit" -#define GRB_DBL_PAR_CUTOFF "Cutoff" -#define GRB_DBL_PAR_ITERATIONLIMIT "IterationLimit" -#define GRB_DBL_PAR_NODELIMIT "NodeLimit" -#define GRB_INT_PAR_SOLUTIONLIMIT "SolutionLimit" -#define GRB_DBL_PAR_TIMELIMIT "TimeLimit" -#define GRB_DBL_PAR_WORKLIMIT "WorkLimit" -#define GRB_DBL_PAR_MEMLIMIT "MemLimit" -#define GRB_DBL_PAR_SOFTMEMLIMIT "SoftMemLimit" -#define GRB_DBL_PAR_BESTOBJSTOP "BestObjStop" -#define GRB_DBL_PAR_BESTBDSTOP "BestBdStop" -#define GRB_DBL_PAR_FEASIBILITYTOL "FeasibilityTol" -#define GRB_DBL_PAR_INTFEASTOL "IntFeasTol" -#define GRB_DBL_PAR_MARKOWITZTOL "MarkowitzTol" -#define GRB_DBL_PAR_MIPGAP "MIPGap" -#define GRB_DBL_PAR_MIPGAPABS "MIPGapAbs" -#define GRB_DBL_PAR_OPTIMALITYTOL "OptimalityTol" -#define GRB_DBL_PAR_PSDTOL "PSDTol" -#define GRB_INT_PAR_METHOD "Method" -#define GRB_DBL_PAR_PERTURBVALUE "PerturbValue" -#define GRB_DBL_PAR_OBJSCALE "ObjScale" -#define GRB_INT_PAR_SCALEFLAG "ScaleFlag" -#define GRB_INT_PAR_SIMPLEXPRICING "SimplexPricing" -#define GRB_INT_PAR_QUAD "Quad" -#define GRB_INT_PAR_NORMADJUST "NormAdjust" -#define GRB_INT_PAR_SIFTING "Sifting" -#define GRB_INT_PAR_SIFTMETHOD "SiftMethod" -#define GRB_INT_PAR_LPWARMSTART "LPWarmStart" -#define GRB_INT_PAR_NETWORKALG "NetworkAlg" -#define GRB_DBL_PAR_BARCONVTOL "BarConvTol" -#define GRB_INT_PAR_BARCORRECTORS "BarCorrectors" -#define GRB_INT_PAR_BARHOMOGENEOUS "BarHomogeneous" -#define GRB_INT_PAR_BARORDER "BarOrder" -#define GRB_DBL_PAR_BARQCPCONVTOL "BarQCPConvTol" -#define GRB_INT_PAR_CROSSOVER "Crossover" -#define GRB_INT_PAR_CROSSOVERBASIS "CrossoverBasis" -#define GRB_INT_PAR_BRANCHDIR "BranchDir" -#define GRB_INT_PAR_DEGENMOVES "DegenMoves" -#define GRB_INT_PAR_DISCONNECTED "Disconnected" -#define GRB_DBL_PAR_HEURISTICS "Heuristics" -#define GRB_DBL_PAR_IMPROVESTARTGAP "ImproveStartGap" -#define GRB_DBL_PAR_IMPROVESTARTTIME "ImproveStartTime" -#define GRB_DBL_PAR_IMPROVESTARTNODES "ImproveStartNodes" -#define GRB_INT_PAR_INTEGRALITYFOCUS "IntegralityFocus" -#define GRB_INT_PAR_MINRELNODES "MinRelNodes" -#define GRB_INT_PAR_MIPFOCUS "MIPFocus" -#define GRB_INT_PAR_NLPHEUR "NLPHeur" -#define GRB_STR_PAR_NODEFILEDIR "NodefileDir" -#define GRB_DBL_PAR_NODEFILESTART "NodefileStart" -#define GRB_INT_PAR_NODEMETHOD "NodeMethod" -#define GRB_DBL_PAR_NORELHEURTIME "NoRelHeurTime" -#define GRB_DBL_PAR_NORELHEURWORK "NoRelHeurWork" -#define GRB_INT_PAR_OBBT "OBBT" -#define GRB_INT_PAR_PUMPPASSES "PumpPasses" -#define GRB_INT_PAR_RINS "RINS" -#define GRB_STR_PAR_SOLFILES "SolFiles" -#define GRB_INT_PAR_STARTNODELIMIT "StartNodeLimit" -#define GRB_INT_PAR_SUBMIPNODES "SubMIPNodes" -#define GRB_INT_PAR_SYMMETRY "Symmetry" -#define GRB_INT_PAR_VARBRANCH "VarBranch" -#define GRB_INT_PAR_SOLUTIONNUMBER "SolutionNumber" -#define GRB_INT_PAR_ZEROOBJNODES "ZeroObjNodes" -#define GRB_INT_PAR_CUTS "Cuts" -#define GRB_INT_PAR_CLIQUECUTS "CliqueCuts" -#define GRB_INT_PAR_COVERCUTS "CoverCuts" -#define GRB_INT_PAR_FLOWCOVERCUTS "FlowCoverCuts" -#define GRB_INT_PAR_FLOWPATHCUTS "FlowPathCuts" -#define GRB_INT_PAR_GUBCOVERCUTS "GUBCoverCuts" -#define GRB_INT_PAR_IMPLIEDCUTS "ImpliedCuts" -#define GRB_INT_PAR_PROJIMPLIEDCUTS "ProjImpliedCuts" -#define GRB_INT_PAR_MIPSEPCUTS "MIPSepCuts" -#define GRB_INT_PAR_MIRCUTS "MIRCuts" -#define GRB_INT_PAR_STRONGCGCUTS "StrongCGCuts" -#define GRB_INT_PAR_MODKCUTS "ModKCuts" -#define GRB_INT_PAR_ZEROHALFCUTS "ZeroHalfCuts" -#define GRB_INT_PAR_NETWORKCUTS "NetworkCuts" -#define GRB_INT_PAR_SUBMIPCUTS "SubMIPCuts" -#define GRB_INT_PAR_INFPROOFCUTS "InfProofCuts" -#define GRB_INT_PAR_RLTCUTS "RLTCuts" -#define GRB_INT_PAR_RELAXLIFTCUTS "RelaxLiftCuts" -#define GRB_INT_PAR_BQPCUTS "BQPCuts" -#define GRB_INT_PAR_PSDCUTS "PSDCuts" -#define GRB_INT_PAR_LIFTPROJECTCUTS "LiftProjectCuts" -#define GRB_INT_PAR_CUTAGGPASSES "CutAggPasses" -#define GRB_INT_PAR_CUTPASSES "CutPasses" -#define GRB_INT_PAR_GOMORYPASSES "GomoryPasses" -#define GRB_STR_PAR_WORKERPOOL "WorkerPool" -#define GRB_STR_PAR_WORKERPASSWORD "WorkerPassword" -#define GRB_STR_PAR_COMPUTESERVER "ComputeServer" -#define GRB_STR_PAR_TOKENSERVER "TokenServer" -#define GRB_STR_PAR_SERVERPASSWORD "ServerPassword" -#define GRB_INT_PAR_SERVERTIMEOUT "ServerTimeout" -#define GRB_STR_PAR_CSROUTER "CSRouter" -#define GRB_STR_PAR_CSGROUP "CSGroup" -#define GRB_DBL_PAR_CSQUEUETIMEOUT "CSQueueTimeout" -#define GRB_INT_PAR_CSPRIORITY "CSPriority" -#define GRB_INT_PAR_CSIDLETIMEOUT "CSIdleTimeout" -#define GRB_INT_PAR_CSTLSINSECURE "CSTLSInsecure" -#define GRB_INT_PAR_TSPORT "TSPort" -#define GRB_STR_PAR_CLOUDACCESSID "CloudAccessID" -#define GRB_STR_PAR_CLOUDSECRETKEY "CloudSecretKey" -#define GRB_STR_PAR_CLOUDPOOL "CloudPool" -#define GRB_STR_PAR_CLOUDHOST "CloudHost" -#define GRB_STR_PAR_CSMANAGER "CSManager" -#define GRB_STR_PAR_CSAUTHTOKEN "CSAuthToken" -#define GRB_STR_PAR_CSAPIACCESSID "CSAPIAccessID" -#define GRB_STR_PAR_CSAPISECRET "CSAPISecret" -#define GRB_INT_PAR_CSBATCHMODE "CSBatchMode" -#define GRB_STR_PAR_USERNAME "Username" -#define GRB_STR_PAR_CSAPPNAME "CSAppName" -#define GRB_INT_PAR_CSCLIENTLOG "CSClientLog" -#define GRB_STR_PAR_WLSACCESSID "WLSAccessID" -#define GRB_STR_PAR_WLSSECRET "WLSSecret" -#define GRB_INT_PAR_WLSTOKENDURATION "WLSTokenDuration" -#define GRB_DBL_PAR_WLSTOKENREFRESH "WLSTokenRefresh" -#define GRB_STR_PAR_WLSTOKEN "WLSToken" -#define GRB_INT_PAR_LICENSEID "LicenseID" -#define GRB_INT_PAR_AGGREGATE "Aggregate" -#define GRB_INT_PAR_AGGFILL "AggFill" -#define GRB_INT_PAR_CONCURRENTMIP "ConcurrentMIP" -#define GRB_INT_PAR_CONCURRENTJOBS "ConcurrentJobs" -#define GRB_INT_PAR_DISPLAYINTERVAL "DisplayInterval" -#define GRB_INT_PAR_DISTRIBUTEDMIPJOBS "DistributedMIPJobs" -#define GRB_INT_PAR_DUALREDUCTIONS "DualReductions" -#define GRB_DBL_PAR_FEASRELAXBIGM "FeasRelaxBigM" -#define GRB_INT_PAR_IISMETHOD "IISMethod" -#define GRB_INT_PAR_INFUNBDINFO "InfUnbdInfo" -#define GRB_INT_PAR_JSONSOLDETAIL "JSONSolDetail" -#define GRB_INT_PAR_LAZYCONSTRAINTS "LazyConstraints" -#define GRB_STR_PAR_LOGFILE "LogFile" -#define GRB_INT_PAR_LOGTOCONSOLE "LogToConsole" -#define GRB_INT_PAR_MIQCPMETHOD "MIQCPMethod" -#define GRB_INT_PAR_NONCONVEX "NonConvex" -#define GRB_INT_PAR_NUMERICFOCUS "NumericFocus" -#define GRB_INT_PAR_OUTPUTFLAG "OutputFlag" -#define GRB_INT_PAR_PRECRUSH "PreCrush" -#define GRB_INT_PAR_PREDEPROW "PreDepRow" -#define GRB_INT_PAR_PREDUAL "PreDual" -#define GRB_INT_PAR_PREPASSES "PrePasses" -#define GRB_INT_PAR_PREQLINEARIZE "PreQLinearize" -#define GRB_INT_PAR_PRESOLVE "Presolve" -#define GRB_DBL_PAR_PRESOS1BIGM "PreSOS1BigM" -#define GRB_DBL_PAR_PRESOS2BIGM "PreSOS2BigM" -#define GRB_INT_PAR_PRESOS1ENCODING "PreSOS1Encoding" -#define GRB_INT_PAR_PRESOS2ENCODING "PreSOS2Encoding" -#define GRB_INT_PAR_PRESPARSIFY "PreSparsify" -#define GRB_INT_PAR_PREMIQCPFORM "PreMIQCPForm" -#define GRB_INT_PAR_QCPDUAL "QCPDual" -#define GRB_INT_PAR_RECORD "Record" -#define GRB_STR_PAR_RESULTFILE "ResultFile" -#define GRB_INT_PAR_SEED "Seed" -#define GRB_INT_PAR_SOLUTIONTARGET "SolutionTarget" -#define GRB_INT_PAR_THREADS "Threads" -#define GRB_DBL_PAR_TUNETIMELIMIT "TuneTimeLimit" -#define GRB_INT_PAR_TUNERESULTS "TuneResults" -#define GRB_INT_PAR_TUNECRITERION "TuneCriterion" -#define GRB_INT_PAR_TUNETRIALS "TuneTrials" -#define GRB_INT_PAR_TUNEOUTPUT "TuneOutput" -#define GRB_INT_PAR_TUNEJOBS "TuneJobs" -#define GRB_DBL_PAR_TUNECLEANUP "TuneCleanup" -#define GRB_DBL_PAR_TUNETARGETMIPGAP "TuneTargetMIPGap" -#define GRB_DBL_PAR_TUNETARGETTIME "TuneTargetTime" -#define GRB_INT_PAR_TUNEMETRIC "TuneMetric" -#define GRB_INT_PAR_UPDATEMODE "UpdateMode" -#define GRB_INT_PAR_OBJNUMBER "ObjNumber" -#define GRB_INT_PAR_MULTIOBJMETHOD "MultiObjMethod" -#define GRB_INT_PAR_MULTIOBJPRE "MultiObjPre" -#define GRB_INT_PAR_SCENARIONUMBER "ScenarioNumber" -#define GRB_INT_PAR_POOLSOLUTIONS "PoolSolutions" -#define GRB_DBL_PAR_POOLGAP "PoolGap" -#define GRB_DBL_PAR_POOLGAPABS "PoolGapAbs" -#define GRB_INT_PAR_POOLSEARCHMODE "PoolSearchMode" -#define GRB_INT_PAR_IGNORENAMES "IgnoreNames" -#define GRB_INT_PAR_STARTNUMBER "StartNumber" -#define GRB_INT_PAR_PARTITIONPLACE "PartitionPlace" -#define GRB_INT_PAR_FUNCPIECES "FuncPieces" -#define GRB_DBL_PAR_FUNCPIECELENGTH "FuncPieceLength" -#define GRB_DBL_PAR_FUNCPIECEERROR "FuncPieceError" -#define GRB_DBL_PAR_FUNCPIECERATIO "FuncPieceRatio" -#define GRB_DBL_PAR_FUNCMAXVAL "FuncMaxVal" -#define GRB_STR_PAR_DUMMY "Dummy" -#define GRB_STR_PAR_JOBID "JobID" -#define GRB_CUTS_AUTO -1 -#define GRB_CUTS_OFF 0 -#define GRB_CUTS_CONSERVATIVE 1 -#define GRB_CUTS_AGGRESSIVE 2 -#define GRB_CUTS_VERYAGGRESSIVE 3 -#define GRB_PRESOLVE_AUTO -1 -#define GRB_PRESOLVE_OFF 0 -#define GRB_PRESOLVE_CONSERVATIVE 1 -#define GRB_PRESOLVE_AGGRESSIVE 2 -#define GRB_METHOD_NONE -1 -#define GRB_METHOD_AUTO -1 -#define GRB_METHOD_PRIMAL 0 -#define GRB_METHOD_DUAL 1 -#define GRB_METHOD_BARRIER 2 -#define GRB_METHOD_CONCURRENT 3 -#define GRB_METHOD_DETERMINISTIC_CONCURRENT 4 -#define GRB_METHOD_DETERMINISTIC_CONCURRENT_SIMPLEX 5 -#define GRB_BARHOMOGENEOUS_AUTO -1 -#define GRB_BARHOMOGENEOUS_OFF 0 -#define GRB_BARHOMOGENEOUS_ON 1 -#define GRB_MIPFOCUS_BALANCED 0 -#define GRB_MIPFOCUS_FEASIBILITY 1 -#define GRB_MIPFOCUS_OPTIMALITY 2 -#define GRB_MIPFOCUS_BESTBOUND 3 -#define GRB_BARORDER_AUTOMATIC -1 -#define GRB_BARORDER_AMD 0 -#define GRB_BARORDER_NESTEDDISSECTION 1 -#define GRB_SIMPLEXPRICING_AUTO -1 -#define GRB_SIMPLEXPRICING_PARTIAL 0 -#define GRB_SIMPLEXPRICING_STEEPEST_EDGE 1 -#define GRB_SIMPLEXPRICING_DEVEX 2 -#define GRB_SIMPLEXPRICING_STEEPEST_QUICK 3 -#define GRB_VARBRANCH_AUTO -1 -#define GRB_VARBRANCH_PSEUDO_REDUCED 0 -#define GRB_VARBRANCH_PSEUDO_SHADOW 1 -#define GRB_VARBRANCH_MAX_INFEAS 2 -#define GRB_VARBRANCH_STRONG 3 -#define GRB_PARTITION_EARLY 16 -#define GRB_PARTITION_ROOTSTART 8 -#define GRB_PARTITION_ROOTEND 4 -#define GRB_PARTITION_NODES 2 -#define GRB_PARTITION_CLEANUP 1 -#define GRB_PHASE_MIP_NOREL 0 -#define GRB_PHASE_MIP_SEARCH 1 -#define GRB_PHASE_MIP_IMPROVE 2 -extern std::function GRBterminate; -extern std::function GRBsetobjectiven; -extern std::function GRBgetintparam; -extern std::function GRBgetdblparam; -extern std::function GRBgetstrparam; -extern std::function GRBsetparam; -extern std::function GRBsetintparam; -extern std::function GRBsetdblparam; -extern std::function GRBsetstrparam; -extern std::function GRBresetparams; -extern std::function GRBcopyparams; -extern std::function GRBloadenv; -extern std::function GRBstartenv; -extern std::function GRBemptyenv; -extern std::function GRBgetnumparams; -extern std::function GRBgetparamname; -extern std::function GRBgetparamtype; -extern std::function GRBgetintparaminfo; -extern std::function GRBgetdblparaminfo; -extern std::function GRBgetstrparaminfo; -extern std::function GRBgetenv; -extern std::function GRBgetmultiobjenv; -extern std::function GRBdiscardmultiobjenvs; -extern std::function GRBfreeenv; -extern std::function GRBgeterrormsg; -extern std::function GRBversion; -extern std::function GRBplatform; -#define GRB_BATCH_STATUS_UNKNOWN 0 -#define GRB_BATCH_CREATED 1 -#define GRB_BATCH_SUBMITTED 2 -#define GRB_BATCH_ABORTED 3 -#define GRB_BATCH_FAILED 4 -#define GRB_BATCH_COMPLETED 5 -} // namespace operations_research - -#endif // OR_TOOLS_GUROBI_ENVIRONMENT_H_ diff --git a/ortools/gurobi/isv_public/BUILD.bazel b/ortools/gurobi/isv_public/BUILD.bazel index d50111b386..cabab7cc8b 100644 --- a/ortools/gurobi/isv_public/BUILD.bazel +++ b/ortools/gurobi/isv_public/BUILD.bazel @@ -18,8 +18,8 @@ cc_library( srcs = ["gurobi_isv.cc"], hdrs = ["gurobi_isv.h"], deps = [ - "//ortools/gurobi:environment", "//ortools/math_opt/solvers:gurobi_cc_proto", + "//ortools/third_party_solvers:gurobi_environment", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", diff --git a/ortools/gurobi/isv_public/gurobi_isv.h b/ortools/gurobi/isv_public/gurobi_isv.h index 1850e91418..ce6946db2b 100644 --- a/ortools/gurobi/isv_public/gurobi_isv.h +++ b/ortools/gurobi/isv_public/gurobi_isv.h @@ -18,7 +18,7 @@ #include #include "absl/status/statusor.h" -#include "ortools/gurobi/environment.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research::math_opt { diff --git a/ortools/init/BUILD.bazel b/ortools/init/BUILD.bazel index a6121f8f76..c3f0cc4290 100644 --- a/ortools/init/BUILD.bazel +++ b/ortools/init/BUILD.bazel @@ -19,9 +19,9 @@ cc_library( hdrs = ["init.h"], deps = [ "//ortools/base", - "//ortools/gurobi:environment", "//ortools/sat:cp_model_solver", "//ortools/sat:cp_model_solver_helpers", + "//ortools/third_party_solvers:gurobi_environment", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:globals", diff --git a/ortools/init/init.cc b/ortools/init/init.cc index aefe9ccfbe..58b1f73ead 100644 --- a/ortools/init/init.cc +++ b/ortools/init/init.cc @@ -20,9 +20,9 @@ #include "absl/log/globals.h" #include "absl/strings/string_view.h" #include "ortools/base/init_google.h" -#include "ortools/gurobi/environment.h" #include "ortools/sat/cp_model_solver.h" #include "ortools/sat/cp_model_solver_helpers.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research { void CppBridge::InitLogging(absl::string_view usage) { diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index bbf790962b..e8103e9968 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -12,7 +12,6 @@ # limitations under the License. load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") -# load("@bazel_skylib//rules:copy_file.bzl", "copy_file") load("@protobuf//bazel:cc_proto_library.bzl", "cc_proto_library") load("@protobuf//bazel:proto_library.bzl", "proto_library") load("@protobuf//bazel:py_proto_library.bzl", "py_proto_library") @@ -260,14 +259,12 @@ cc_library( ":model_validator", "//ortools/base", "//ortools/base:accurate_sum", - "//ortools/base:dynamic_library", "//ortools/base:hash", "//ortools/base:logging", "//ortools/base:map_util", "//ortools/base:status_macros", "//ortools/base:stl_util", "//ortools/base:timer", - "//ortools/gurobi:environment", "//ortools/linear_solver/proto_solver:gurobi_proto_solver", "//ortools/linear_solver/proto_solver:sat_proto_solver", "//ortools/port:file", @@ -275,9 +272,10 @@ cc_library( "//ortools/sat:cp_model_cc_proto", "//ortools/sat:cp_model_solver", "//ortools/sat:lp_utils", + "//ortools/third_party_solvers:gurobi_environment", + "//ortools/third_party_solvers:xpress_environment", "//ortools/util:fp_utils", "//ortools/util:lazy_mutable_copy", - "//ortools/xpress:environment", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", @@ -344,6 +342,21 @@ cc_library( ], ) +cc_library( + name = "gurobi_util", + srcs = ["gurobi_util.cc"], + hdrs = ["gurobi_util.h"], + deps = [ + "//ortools/third_party_solvers:gurobi_environment", + "@abseil-cpp//absl/status", + "@abseil-cpp//absl/strings", + "@abseil-cpp//absl/strings:str_format", + "@abseil-cpp//absl/flags:flag", + "@abseil-cpp//absl/log", + "@abseil-cpp//absl/status:statusor", + ], +) + cc_library( name = "scip_helper_macros", hdrs = ["scip_helper_macros.h"], diff --git a/ortools/linear_solver/gurobi_interface.cc b/ortools/linear_solver/gurobi_interface.cc index 610267832d..35d5ace103 100644 --- a/ortools/linear_solver/gurobi_interface.cc +++ b/ortools/linear_solver/gurobi_interface.cc @@ -66,12 +66,12 @@ #include "absl/time/time.h" #include "ortools/base/logging.h" #include "ortools/base/timer.h" -#include "ortools/gurobi/environment.h" #include "ortools/linear_solver/gurobi_util.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver_callback.h" #include "ortools/linear_solver/proto_solver/gurobi_proto_solver.h" #include "ortools/linear_solver/proto_solver/proto_utils.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/lazy_mutable_copy.h" #include "ortools/util/time_limit.h" @@ -543,6 +543,13 @@ struct MPCallbackWithGurobiContext { // NOTE(user): This function must have this exact API, because we are passing // it to Gurobi as a callback. + +#if defined(_MSC_VER) +#define GUROBI_STDCALL __stdcall +#else +#define GUROBI_STDCALL +#endif + int GUROBI_STDCALL CallbackImpl(GRBmodel* model, void* gurobi_internal_callback_data, int where, void* raw_model_and_callback) { diff --git a/ortools/linear_solver/gurobi_util.cc b/ortools/linear_solver/gurobi_util.cc index 2f163a4aab..0fc5ba6a59 100644 --- a/ortools/linear_solver/gurobi_util.cc +++ b/ortools/linear_solver/gurobi_util.cc @@ -17,13 +17,44 @@ #include #include +#include "absl/log/log.h" +#include "absl/status/status.h" +#include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" -#include "ortools/gurobi/environment.h" +#include "ortools/base/status_macros.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research { +bool GurobiIsCorrectlyInstalled() { + absl::StatusOr status = GetGurobiEnv(); + if (!status.ok() || status.value() == nullptr) { + LOG(WARNING) << status.status(); + return false; + } + + GRBfreeenv(status.value()); + + return true; +} + +absl::StatusOr GetGurobiEnv() { + GRBenv* env = nullptr; + + RETURN_IF_ERROR(LoadGurobiDynamicLibrary({})); + + if (GRBloadenv(&env, nullptr) != 0 || env == nullptr) { + return absl::FailedPreconditionError( + absl::StrCat("Found the Gurobi shared library, but could not create " + "Gurobi environment: is Gurobi licensed on this machine?", + GRBgeterrormsg(env))); + } + + return env; +} + std::string GurobiParamInfoForLogging(GRBenv* grb, bool one_liner_output) { const absl::ParsedFormat<'s', 's', 's'> kExtendedFormat( " Parameter: '%s' value: %s default: %s"); diff --git a/ortools/linear_solver/gurobi_util.h b/ortools/linear_solver/gurobi_util.h index 812d822df2..2a258b2696 100644 --- a/ortools/linear_solver/gurobi_util.h +++ b/ortools/linear_solver/gurobi_util.h @@ -16,10 +16,19 @@ #include -#include "ortools/gurobi/environment.h" +#include "absl/flags/declare.h" +#include "absl/status/statusor.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research { +absl::StatusOr GetGurobiEnv(); + +// This returns true if the Gurobi shared library is properly loaded (otherwise, +// tries to find it and load it) and if a Gurobi license can be obtained (it +// does that by trying to grab a license and then release it). +bool GurobiIsCorrectlyInstalled(); + // Returns a human-readable listing of all gurobi parameters that are set to // non-default values, and their current value in the given environment. If all // parameters are at their default value, returns the empty string. diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index 04986ea7fd..eea9aa0f12 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -159,9 +159,10 @@ cc_library( hdrs = ["gurobi_proto_solver.h"], deps = [ "//ortools/base:timer", - "//ortools/gurobi:environment", + "//ortools/linear_solver:gurobi_util", "//ortools/linear_solver:linear_solver_cc_proto", "//ortools/linear_solver:model_validator", + "//ortools/third_party_solvers:gurobi_environment", "//ortools/util:lazy_mutable_copy", "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/cleanup", diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc index aba0d9ded5..4e58dcdd80 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.cc @@ -36,12 +36,12 @@ #include "absl/strings/string_view.h" #include "absl/time/clock.h" #include "absl/time/time.h" -#include "absl/types/optional.h" #include "ortools/base/status_macros.h" #include "ortools/base/timer.h" -#include "ortools/gurobi/environment.h" +#include "ortools/linear_solver/gurobi_util.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_validator.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/lazy_mutable_copy.h" namespace operations_research { diff --git a/ortools/linear_solver/proto_solver/gurobi_proto_solver.h b/ortools/linear_solver/proto_solver/gurobi_proto_solver.h index e9aae72af3..4ad96852cb 100644 --- a/ortools/linear_solver/proto_solver/gurobi_proto_solver.h +++ b/ortools/linear_solver/proto_solver/gurobi_proto_solver.h @@ -14,13 +14,11 @@ #ifndef OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_GUROBI_PROTO_SOLVER_H_ #define OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_GUROBI_PROTO_SOLVER_H_ -#include - #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" -#include "ortools/gurobi/environment.h" #include "ortools/linear_solver/linear_solver.pb.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/lazy_mutable_copy.h" namespace operations_research { diff --git a/ortools/linear_solver/proto_solver/xpress_proto_solver.cc b/ortools/linear_solver/proto_solver/xpress_proto_solver.cc deleted file mode 100644 index 2e22b17bb2..0000000000 --- a/ortools/linear_solver/proto_solver/xpress_proto_solver.cc +++ /dev/null @@ -1,970 +0,0 @@ -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ortools/linear_solver/proto_solver/xpress_proto_solver.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "absl/base/attributes.h" -#include "absl/cleanup/cleanup.h" -#include "absl/log/check.h" -#include "absl/status/status.h" -#include "absl/status/statusor.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" -#include "absl/strings/str_join.h" -#include "absl/strings/str_split.h" -#include "absl/strings/string_view.h" -#include "absl/time/clock.h" -#include "absl/time/time.h" -#include "absl/types/optional.h" -#include "ortools/base/logging.h" -#include "ortools/base/status_macros.h" -#include "ortools/base/timer.h" -#include "ortools/linear_solver/linear_solver.pb.h" -#include "ortools/linear_solver/model_validator.h" -#include "ortools/util/lazy_mutable_copy.h" -#include "ortools/xpress/environment.h" - -namespace operations_research { - -// namespace { -// constexpr int XPRS_OK = 0; - -// bool XPressCodeToInvalidResponse(int error_code, const char* source_file, -// int source_line, const char* statement, -// XPRSprob prob, MPSolutionResponse* response) -// { -// if (error_code == XPRS_OK) return true; -// response->set_status(); -// response->set_status_message(absl::StrFormat( -// "XPress error code %d (file '%s', line %d) on '%s': %s", error_code, -// source_file, source_line, statement, XPRSgeterrormsg(prob))); -// return false; -// } - -// int AddIndicatorConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model, -// std::vector* tmp_variables, -// std::vector* tmp_coefficients) { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); -// CHECK(tmp_coefficients != nullptr); - -// const auto& ind_cst = gen_cst.indicator_constraint(); -// MPConstraintProto cst = ind_cst.constraint(); -// if (cst.lower_bound() > -std::numeric_limits::infinity()) { -// int status = XPRSaddgenconstrIndicator( -// xpress_model, gen_cst.name().c_str(), ind_cst.var_index(), -// ind_cst.var_value(), cst.var_index_size(), -// cst.mutable_var_index()->mutable_data(), -// cst.mutable_coefficient()->mutable_data(), -// cst.upper_bound() == cst.lower_bound() ? XPRS_EQUAL -// : XPRS_GREATER_EQUAL, -// cst.lower_bound()); -// if (status != XPRS_OK) return status; -// } -// if (cst.upper_bound() < std::numeric_limits::infinity() && -// cst.lower_bound() != cst.upper_bound()) { -// return XPRSaddgenconstrIndicator(xpress_model, gen_cst.name().c_str(), -// ind_cst.var_index(), -// ind_cst.var_value(), -// cst.var_index_size(), -// cst.mutable_var_index()->mutable_data(), -// cst.mutable_coefficient()->mutable_data(), -// XPRS_LESS_EQUAL, cst.upper_bound()); -// } - -// return XPRS_OK; -// } - -// int AddSosConstraint(const MPSosConstraint& sos_cst, XPRSprob xpress_model, -// std::vector* tmp_variables, -// std::vector* tmp_weights) { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); -// CHECK(tmp_weights != nullptr); - -// tmp_variables->resize(sos_cst.var_index_size(), 0); -// for (int v = 0; v < sos_cst.var_index_size(); ++v) { -// (*tmp_variables)[v] = sos_cst.var_index(v); -// } -// tmp_weights->resize(sos_cst.var_index_size(), 0); -// if (sos_cst.weight_size() == sos_cst.var_index_size()) { -// for (int w = 0; w < sos_cst.weight_size(); ++w) { -// (*tmp_weights)[w] = sos_cst.weight(w); -// } -// } else { -// DCHECK_EQ(sos_cst.weight_size(), 0); -// // XPress requires variable weights in their SOS constraints. -// std::iota(tmp_weights->begin(), tmp_weights->end(), 1); -// } - -// std::vector types = {sos_cst.type() == MPSosConstraint::SOS1_DEFAULT -// ? XPRS_SOS_TYPE1 -// : XPRS_SOS_TYPE2}; -// std::vector begins = {0}; -// return XPRSaddsos(xpress_model, /*numsos=*/1, -// /*nummembers=*/sos_cst.var_index_size(), -// /*types=*/types.data(), -// /*beg=*/begins.data(), /*ind=*/tmp_variables->data(), -// /*weight*/ tmp_weights->data()); -// } - -// int AddQuadraticConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model) { -// CHECK(xpress_model != nullptr); -// constexpr double kInfinity = std::numeric_limits::infinity(); - -// CHECK(gen_cst.has_quadratic_constraint()); -// const MPQuadraticConstraint& quad_cst = gen_cst.quadratic_constraint(); - -// auto addqconstr = [](XPRSprob xpress_model, MPQuadraticConstraint quad_cst, -// char sense, double rhs, const std::string& name) { -// return XPRSaddqconstr( -// xpress_model, -// /*numlnz=*/quad_cst.var_index_size(), -// /*lind=*/quad_cst.mutable_var_index()->mutable_data(), -// /*lval=*/quad_cst.mutable_coefficient()->mutable_data(), -// /*numqnz=*/quad_cst.qvar1_index_size(), -// /*qrow=*/quad_cst.mutable_qvar1_index()->mutable_data(), -// /*qcol=*/quad_cst.mutable_qvar2_index()->mutable_data(), -// /*qval=*/quad_cst.mutable_qcoefficient()->mutable_data(), -// /*sense=*/sense, -// /*rhs=*/rhs, -// /*QCname=*/name.c_str()); -// }; - -// if (quad_cst.has_lower_bound() && quad_cst.lower_bound() > -kInfinity) { -// const int xprs_status = -// addqconstr(xpress_model, gen_cst.quadratic_constraint(), -// XPRS_GREATER_EQUAL, quad_cst.lower_bound(), -// gen_cst.has_name() ? gen_cst.name() + "_lb" : ""); -// if (xprs_status != XPRS_OK) return xprs_status; -// } -// if (quad_cst.has_upper_bound() && quad_cst.upper_bound() < kInfinity) { -// const int xprs_status = -// addqconstr(xpress_model, gen_cst.quadratic_constraint(), -// XPRS_LESS_EQUAL, quad_cst.upper_bound(), -// gen_cst.has_name() ? gen_cst.name() + "_ub" : ""); -// if (xprs_status != XPRS_OK) return xprs_status; -// } - -// return XPRS_OK; -// } - -// int AddAndConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model, std::vector* tmp_variables) -// { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); - -// auto and_cst = gen_cst.and_constraint(); -// return XPRSaddgenconstrAnd( -// xpress_model, -// /*name=*/gen_cst.name().c_str(), -// /*resvar=*/and_cst.resultant_var_index(), -// /*nvars=*/and_cst.var_index_size(), -// /*vars=*/and_cst.mutable_var_index()->mutable_data()); -// } - -// int AddOrConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model, std::vector* tmp_variables) { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); - -// auto or_cst = gen_cst.or_constraint(); -// return XPRSaddgenconstrOr( -// xpress_model, -// /*name=*/gen_cst.name().c_str(), -// /*resvar=*/or_cst.resultant_var_index(), -// /*nvars=*/or_cst.var_index_size(), -// /*vars=*/or_cst.mutable_var_index()->mutable_data()); -// } - -// int AddMinConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model, std::vector* tmp_variables) -// { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); - -// auto min_cst = gen_cst.min_constraint(); -// return XPRSaddgenconstrMin( -// xpress_model, -// /*name=*/gen_cst.name().c_str(), -// /*resvar=*/min_cst.resultant_var_index(), -// /*nvars=*/min_cst.var_index_size(), -// /*vars=*/min_cst.mutable_var_index()->mutable_data(), -// /*constant=*/min_cst.has_constant() -// ? min_cst.constant() -// : std::numeric_limits::infinity()); -// } - -// int AddMaxConstraint(const MPGeneralConstraintProto& gen_cst, -// XPRSprob xpress_model, std::vector* tmp_variables) -// { -// CHECK(xpress_model != nullptr); -// CHECK(tmp_variables != nullptr); - -// auto max_cst = gen_cst.max_constraint(); -// return XPRSaddgenconstrMax( -// xpress_model, -// /*name=*/gen_cst.name().c_str(), -// /*resvar=*/max_cst.resultant_var_index(), -// /*nvars=*/max_cst.var_index_size(), -// /*vars=*/max_cst.mutable_var_index()->mutable_data(), -// /*constant=*/max_cst.has_constant() -// ? max_cst.constant() -// : -std::numeric_limits::infinity()); -// } -// } // namespace - -// std::string SetSolverSpecificParameters(absl::string_view parameters, -// XPRSprob xpress) { -// if (parameters.empty()) return absl::OkStatus(); -// std::vector error_messages; -// for (absl::string_view line : absl::StrSplit(parameters, '\n')) { -// // Empty lines are simply ignored. -// if (line.empty()) continue; -// // Comment tokens end at the next new-line, or the end of the string. -// // The first character must be '#' -// if (line[0] == '#') continue; -// for (absl::string_view token : -// absl::StrSplit(line, ',', absl::SkipWhitespace())) { -// if (token.empty()) continue; -// std::vector key_value = -// absl::StrSplit(token, absl::ByAnyChar(" ="), -// absl::SkipWhitespace()); -// // If one parameter fails, we keep processing the list of parameters. -// if (key_value.size() != 2) { -// const std::string current_message = -// absl::StrCat("Cannot parse parameter '", token, -// "'. Expected format is 'ParameterName value' or " -// "'ParameterName=value'"); -// error_messages.push_back(current_message); -// continue; -// } -// const int xpress_code = -// XPRSsetparam(xpress, key_value[0].c_str(), key_value[1].c_str()); -// if (xpress_code != XPRS_OK) { -// const std::string current_message = absl::StrCat( -// "Error setting parameter '", key_value[0], "' to value '", -// key_value[1], "': ", XPRSgeterrormsg(xpress)); -// error_messages.push_back(current_message); -// continue; -// } -// VLOG(2) << absl::StrCat("Set parameter '", key_value[0], "' to value -// '", -// key_value[1]); -// } -// } - -// if (error_messages.empty()) return ""; -// return absl::StrJoin(error_messages, "\n"); -// } - -MPSolutionResponse XPressSolveProto(LazyMutableCopy request) { - MPSolutionResponse response; - response.set_status(MPSolverResponseStatus::MPSOLVER_SOLVER_TYPE_UNAVAILABLE); - - // const absl::optional> optional_model = - // ExtractValidMPModelOrPopulateResponseStatus(request, &response); - // if (!optional_model) return response; - // const MPModelProto& model = optional_model->get(); - - // // We set `xpress_env` to point to a new environment if no existing one - // is - // // provided. We must make sure that we free this environment when we exit - // this - // // function. - // bool xpress_env_was_created = false; - // auto xpress_env_deleter = absl::MakeCleanup([&]() { - // if (xpress_env_was_created && xpress_env != nullptr) { - // XPRSfreeenv(xpress_env); - // } - // }); - // if (xpress_env == nullptr) { - // ASSIGN_OR_RETURN(xpress_env, GetXPressEnv()); - // xpress_env_was_created = true; - // } - - // XPRSprob xpress_model = nullptr; - // auto xpress_model_deleter = absl::MakeCleanup([&]() { - // const int error_code = XPRSfreemodel(xpress_model); - // LOG_IF(DFATAL, error_code != XPRS_OK) - // << "XPRSfreemodel failed with error " << error_code << ": " - // << XPRSgeterrormsg(xpress_env); - // }); - - // // `xpress_env` references ther XPRSenv argument. - // #define RETURN_IF_XPRESS_ERROR(x) \ -// RETURN_IF_ERROR( \ -// if (!XPressCodeToInvalidResponse(x, __FILE__, __LINE__, #x, xpress, - // &response)) { \ -// return response; \ -// }) - - // RETURN_IF_XPRESS_ERROR(XPRSnewmodel(xpress_env, &xpress_model, - // model.name().c_str(), - // /*numvars=*/0, - // /*obj=*/nullptr, - // /*lb=*/nullptr, - // /*ub=*/nullptr, - // /*vtype=*/nullptr, - // /*varnames=*/nullptr)); - // XPRSprob const model_env = XPRSgetenv(xpress_model); - - // if (request.has_solver_specific_parameters()) { - // const auto parameters_status = SetSolverSpecificParameters( - // request.solver_specific_parameters(), model_env); - // if (!parameters_status.ok()) { - // response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); - // response.set_status_str( - // std::string(parameters_status.message())); // NOLINT - // return response; - // } - // } - // if (request.solver_time_limit_seconds() > 0) { - // RETURN_IF_XPRESS_ERROR( - // XPRSsetdblparam(model_env, XPRS_DBL_PAR_TIMELIMIT, - // request.solver_time_limit_seconds())); - // } - // RETURN_IF_XPRESS_ERROR( - // XPRSsetintparam(model_env, XPRS_INT_PAR_OUTPUTFLAG, - // request.enable_internal_solver_output())); - - // const int variable_size = model.variable_size(); - // bool has_integer_variables = false; - // { - // std::vector obj_coeffs(variable_size, 0); - // std::vector lb(variable_size); - // std::vector ub(variable_size); - // std::vector ctype(variable_size); - // std::vector varnames(variable_size); - // for (int v = 0; v < variable_size; ++v) { - // const MPVariableProto& variable = model.variable(v); - // obj_coeffs[v] = variable.objective_coefficient(); - // lb[v] = variable.lower_bound(); - // ub[v] = variable.upper_bound(); - // ctype[v] = variable.is_integer() && - // request.solver_type() ==SolutionRes - // : XPRS_CONTINUOUS; - // if (variable.is_integer()) has_integer_variables = true; - // if (!variable.name().empty()) varnames[v] = variable.name().c_str(); - // } - - // RETURN_IF_XPRESS_ERROR( - // XPRSaddvars(xpress_model, variable_size, 0, nullptr, nullptr, - // nullptr, - // /*obj=*/obj_coeffs.data(), - // /*lb=*/lb.data(), /*ub=*/ub.data(), - // /*vtype=*/ctype.data(), - // /*varnames=*/const_cast(varnames.data()))); - - // // Set solution hints if any. - // for (int i = 0; i < model.solution_hint().var_index_size(); ++i) { - // RETURN_IF_XPRESS_ERROR(XPRSsetdblattrelement( - // xpress_model, XPRS_DBL_ATTR_START, model.solution_hint().var_inde - // const absl::optional> - // optional_model = - // ExtractValidMPModelOrPopulateResponseStatus(request, &response); - // if (!optional_model) return response; - // const MPModelProto& model = optional_model->get(); - - // // We set `xpress_env` to point to a new environment if no existing one - // is - // // provided. We must make sure that we free this environment when we exit - // this - // // function. - // bool xpress_env_was_created = false; - // auto xpress_env_deleter = absl::MakeCleanup([&]() { - // if (xpress_env_was_created && xpress_env != nullptr) { - // XPRSfreeenv(xpress_env); - // } - // }); - // if (xpress_env == nullptr) { - // ASSIGN_OR_RETURN(xpress_env, GetXPressEnv()); - // xpress_env_was_created = true; - // } - - // XPRSprob xpress_model = nullptr; - // auto xpress_model_deleter = absl::MakeCleanup([&]() { - // const int error_code = XPRSfreemodel(xpress_model); - // LOG_IF(DFATAL, error_code != XPRS_OK) - // << "XPRSfreemodel failed with error " << error_code << ": " - // << XPRSgeterrormsg(xpress_env); - // }); - - // // `xpress_env` references ther XPRSenv argument. - // #define RETURN_IF_XPRESS_ERROR(x) \ -// RETURN_IF_ERROR( \ -// XPressCodeToUtilStatus(x, __FILE__, __LINE__, #x, xpress_env)); - - // RETURN_IF_XPRESS_ERROR(XPRSnewmodel(xpress_env, &xpress_model, - // model.name().c_str(), - // /*numvars=*/0, - // /*obj=*/nullptr, - // /*lb=*/nullptr, - // /*ub=*/nullptr, - // /*vtype=*/nullptr, - // /*varnames=*/nullptr)); - // XPRSprob const model_env = XPRSgetenv(xpress_model); - - // if (request.has_solver_specific_parameters()) { - // const auto parameters_status = SetSolverSpecificParameters( - // request.solver_specific_parameters(), model_env); - // if (!parameters_status.ok()) { - // response.set_status(MPSOLVER_MODEL_INVALID_SOLVER_PARAMETERS); - // response.set_status_str( - // std::string(parameters_status.message())); // NOLINT - // return response; - // } - // } - // if (request.solver_time_limit_seconds() > 0) { - // RETURN_IF_XPRESS_ERROR( - // XPRSsetdblparam(model_env, XPRS_DBL_PAR_TIMELIMIT, - // request.solver_time_limit_seconds())); - // } - // RETURN_IF_XPRESS_ERROR( - // XPRSsetintparam(model_env, XPRS_INT_PAR_OUTPUTFLAG, - // request.enable_internal_solver_output())); - - // const int variable_size = model.variable_size(); - // bool has_integer_variables = false; - // { - // std::vector obj_coeffs(variable_size, 0); - // std::vector lb(variable_size); - // std::vector ub(variable_size); - // std::vector ctype(variable_size); - // std::vector varnames(variable_size); - // for (int v = 0; v < variable_size; ++v) { - // const MPVariableProto& variable = model.variable(v); - // obj_coeffs[v] = variable.objective_coefficient(); - // lb[v] = variable.lower_bound(); - // ub[v] = variable.upper_bound(); - // ctype[v] = variable.is_integer() && - // request.solver_type() == - // MPModelRequest::XPRESS_MIXED_INTEGER_PROGRAMMING - // ? XPRS_INTEGER - // : XPRS_CONTINUOUS; - // if (variable.is_integer()) has_integer_variables = true; - // if (!variable.name().empty()) varnames[v] = variable.name().c_str(); - // } - - // RETURN_IF_XPRESS_ERROR( - // XPRSaddvars(xpress_model, variable_size, 0, nullptr, nullptr, - // nullptr, - // /*obj=*/obj_coeffs.data(), - // /*lb=*/lb.data(), /*ub=*/ub.data(), - // /*vtype=*/ctype.data(), - // /*varnames=*/const_cast(varnames.data()))); - - // // Set solution hints if any. - // for (int i = 0; i < model.solution_hint().var_index_size(); ++i) { - // RETURN_IF_XPRESS_ERROR(XPRSsetdblattrelement( - // xpress_model, XPRS_DBL_ATTR_START, - // model.solution_hint().var_index(i), - // model.solution_hint().var_value(i))); - // } - // } - - // { - // std::vector ct_variables; - // std::vector ct_coefficients; - // for (int c = 0; c < model.constraint_size(); ++c) { - // const MPConstraintProto& constraint = model.constraint(c); - // const int size = constraint.var_index_size(); - // ct_variables.resize(size, 0); - // ct_coefficients.resize(size, 0); - // for (int i = 0; i < size; ++i) { - // ct_variables[i] = constraint.var_index(i); - // ct_coefficients[i] = constraint.coefficient(i); - // } - // // Using XPRSaddrangeconstr for constraints that don't require it - // adds - // // a slack which is not always removed by presolve. - // if (constraint.lower_bound() == constraint.upper_bound()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_EQUAL, /*rhs=*/constraint.lower_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else if (constraint.lower_bound() == - // -std::numeric_limits::infinity()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_LESS_EQUAL, /*rhs=*/constraint.upper_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else if (constraint.upper_bound() == - // std::numeric_limits::infinity()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_GREATER_EQUAL, /*rhs=*/constraint.lower_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else { - // RETURN_IF_XPRESS_ERROR(XPRSaddrangeconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*lower=*/constraint.lower_bound(), - // /*upper=*/constraint.upper_bound(), - // /*constrname=*/constraint.name().c_str())); - // } - // } - - // for (const auto& gen_cst : model.general_constraint()) { - // switch (gen_cst.general_constraint_case()) { - // case MPGeneralConstraintProto::kIndicatorConstraint: { - // RETURN_IF_XPRESS_ERROR(AddIndicatorConstraint( - // gen_cst, xpress_model, &ct_variables, &ct_coefficients)); - // break; - // } - // case MPGeneralConstraintProto::kSosConstraint: { - // RETURN_IF_XPRESS_ERROR(AddSosConstraint(gen_cst.sos_constraint(), - // xpress_model, - // &ct_variables, - // &ct_coefficients)); - // break; - // } - // case MPGeneralConstraintProto::kQuadraticConstraint: { - // RETURN_IF_XPRESS_ERROR(AddQuadraticConstraint(gen_cst, - // xpress_model)); break; - // } - // case MPGeneralConstraintProto::kAbsConstraint: { - // RETURN_IF_XPRESS_ERROR(XPRSaddgenconstrAbs( - // xpress_model, - // /*name=*/gen_cst.name().c_str(), - // /*resvar=*/gen_cst.abs_constraint().resultant_var_index(), - // /*argvar=*/gen_cst.abs_constraint().var_index())); - // break; - // } - // case MPGeneralConstraintProto::kAndConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddAndConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kOrConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddOrConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kMinConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddMinConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kMaxConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddMaxConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // default: - // return absl::UnimplementedError( - // absl::StrFormat("General constraints of type %i not - // supported.", - // gen_cst.general_constraint_case())); - // } - // } - // } - - // RETURN_IF_XPRESS_ERROR(XPRSsetintattr(xpress_model, - // XPRS_INT_ATTR_MODELSENSE, - // model.maximize() ? -1 : 1)); - // RETURN_IF_XPRESS_ERROR(XPRSsetdblattr(xpress_model, XPRS_DBL_ATTR_OBJCON, - // model.objective_offset())); - // if (model.has_quadratic_objective()) { - // MPQuadraticObjective qobj = model.quadratic_objective(); - // if (qobj.coefficient_size() > 0) { - // RETURN_IF_XPRESS_ERROR( - // XPRSaddqpterms(xpress_model, /*numqnz=*/qobj.coefficient_size(), - // /*qrow=*/qobj.mutable_qvar1_index()->mutable_data(), - // /*qcol=*/qobj.mutable_qvar2_index()->mutable_data(), - // /*qval=*/qobj.mutable_coefficient()->mutable_data())); - // } - // } - - // RETURN_IF_XPRESS_ERROR(XPRSupdatemodel(xpress_model)); - - // const absl::Time time_before = absl::Now(); - // UserTimer user_timer; - // user_timer.Start(); - - // RETURN_IF_XPRESS_ERROR(XPRSoptimize(xpress_model)); - - // const absl::Duration solving_duration = absl::Now() - time_before; - // user_timer.Stop(); - // VLOG(1) << "Finished solving in XPressSolveProto(), walltime = " - // << solving_duration << ", usertime = " << - // user_timer.GetDuration(); - // response.mutable_solve_info()->set_solve_wall_time_seconds( - // absl::ToDoubleSeconds(solving_duration)); - // response.mutable_solve_info()->set_solve_user_time_seconds( - // absl::ToDoubleSeconds(user_timer.GetDuration())); - - // int optimization_status = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetintattr(xpress_model, XPRS_INT_ATTR_STATUS, - // &optimization_status)); - // int solution_count = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetintattr(xpress_model, XPRS_INT_ATTR_SOLCOUNT, - // &solution_count)); - // switch (optimization_status) { - // case XPRS_OPTIMAL: - // response.set_status(MPSOLVER_OPTIMAL); - // break; - // case XPRS_INF_OR_UNBD: - // DLOG(INFO) << "XPress solve returned XPRS_INF_OR_UNBD, which we treat - // as " - // "INFEASIBLE even though it may mean UNBOUNDED."; - // response.set_status_str( - // "The model may actually be unbounded: XPress returned " - // "XPRS_INF_OR_UNBD"); - // ABSL_FALLTHROUGH_INTENDED; - // case XPRS_INFEASIBLE: - // response.set_status(MPSOLVER_INFEASIBLE); - // break; - // case XPRS_UNBOUNDED: - // response.set_status(MPSOLVER_UNBOUNDED); - // break; - // default: { - // if (solution_count > 0) { - // response.set_status(MPSOLVER_FEASIBLE); - // } else { - // response.set_status(MPSOLVER_NOT_SOLVED); - // response.set_status_str( - // absl::StrFormat("XPress status code %d", optimization_status)); - // } - // break; - // } - // } - - // if (solution_count > 0 && (response.status() == MPSOLVER_FEASIBLE || - // response.status() == MPSOLVER_OPTIMAL)) { - // double objective_value = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetdblattr(xpress_model, XPRS_DBL_ATTR_OBJVAL, - // &objective_value)); - // response.set_objective_value(objective_value); - // double best_objective_bound = 0; - // const int error = XPRSgetdblattr(xpress_model, XPRS_DBL_ATTR_OBJBOUND, - // &best_objective_bound); - // if (response.status() == MPSOLVER_OPTIMAL && - // error == XPRS_ERROR_DATA_NOT_AVAILABLE) { - // // If the presolve deletes all variables, there's no best bound. - // response.set_best_objective_bound(objective_value); - // } else { - // RETURN_IF_XPRESS_ERROR(error); - // response.set_best_objective_bound(best_objective_bound); - // } - - // response.mutable_variable_value()->Resize(variable_size, 0); - // RETURN_IF_XPRESS_ERROR( - // XPRSgetdblattrarray(xpress_model, XPRS_DBL_ATTR_X, 0, - // variable_size, - // response.mutable_variable_value()->mutable_data())); - // // NOTE, XPressSolveProto() is exposed to external clients via MPSolver - // API, - // // which assumes the solution values of integer variables are rounded - // to - // // integer values. - // auto round_values_of_integer_variables_fn = - // [&](google::protobuf::RepeatedField* values) { - // for (int v = 0; v < variable_size; ++v) { - // if (model.variable(v).is_integer()) { - // (*values)[v] = std::round((*values)[v]); - // } - // } - // }; - // round_values_of_integer_variables_fn(response.mutable_variable_value()); - // if (!has_integer_variables && model.general_constraint_size() == 0) { - // response.mutable_dual_value()->Resize(model.constraint_size(), 0); - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattrarray( - // xpress_model, XPRS_DBL_ATTR_PI, 0, model.constraint_size(), - // response.mutable_dual_value()->mutable_data())); - // } - // const int additional_solutions = std::min( - // solution_count, - // std::min(request.populate_additional_solutions_up_to(), - // std::numeric_limits::max() - 1) + - // 1); - // for (int i = 1; i < additional_solutions; ++i) { - // RETURN_IF_XPRESS_ERROR( - // XPRSsetintparam(model_env, XPRS_INT_PAR_SOLUTIONNUMBER, i)); - // MPSolution* solution = response.add_additional_solutions(); - // solution->mutable_variable_value()->Resize(variable_size, 0); - // double objective_value = 0; - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattr( - // xpress_model, XPRS_DBL_ATTR_POOLOBJVAL, &objective_value)); - // solution->set_objective_value(objective_value); - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattrarray( - // xpress_model, XPRS_DBL_ATTR_XN, 0, variable_size, - // solution->mutable_variable_value()->mutable_data())); - // round_values_of_integer_variables_fn(solution->mutable_variable_value()); - // } - // } - // #undef RETURN_IF_XPRESS_ERRORx(i), - // model.solution_hint().var_value(i))); - // } - // } - - // { - // std::vector ct_variables; - // std::vector ct_coefficients; - // for (int c = 0; c < model.constraint_size(); ++c) { - // const MPConstraintProto& constraint = model.constraint(c); - // const int size = constraint.var_index_size(); - // ct_variables.resize(size, 0); - // ct_coefficients.resize(size, 0); - // for (int i = 0; i < size; ++i) { - // ct_variables[i] = constraint.var_index(i); - // ct_coefficients[i] = constraint.coefficient(i); - // } - // // Using XPRSaddrangeconstr for constraints that don't require it - // adds - // // a slack which is not always removed by presolve. - // if (constraint.lower_bound() == constraint.upper_bound()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_EQUAL, /*rhs=*/constraint.lower_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else if (constraint.lower_bound() == - // -std::numeric_limits::infinity()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_LESS_EQUAL, /*rhs=*/constraint.upper_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else if (constraint.upper_bound() == - // std::numeric_limits::infinity()) { - // RETURN_IF_XPRESS_ERROR(XPRSaddconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*sense=*/XPRS_GREATER_EQUAL, /*rhs=*/constraint.lower_bound(), - // /*constrname=*/constraint.name().c_str())); - // } else { - // RETURN_IF_XPRESS_ERROR(XPRSaddrangeconstr( - // xpress_model, /*numnz=*/size, /*cind=*/ct_variables.data(), - // /*cval=*/ct_coefficients.data(), - // /*lower=*/constraint.lower_bound(), - // /*upper=*/constraint.upper_bound(), - // /*constrname=*/constraint.name().c_str())); - // } - // } - - // for (const auto& gen_cst : model.general_constraint()) { - // switch (gen_cst.general_constraint_case()) { - // case MPGeneralConstraintProto::kIndicatorConstraint: { - // RETURN_IF_XPRESS_ERROR(AddIndicatorConstraint( - // gen_cst, xpress_model, &ct_variables, &ct_coefficients)); - // break; - // } - // case MPGeneralConstraintProto::kSosConstraint: { - // RETURN_IF_XPRESS_ERROR(AddSosConstraint(gen_cst.sos_constraint(), - // xpress_model, - // &ct_variables, - // &ct_coefficients)); - // break; - // } - // case MPGeneralConstraintProto::kQuadraticConstraint: { - // RETURN_IF_XPRESS_ERROR(AddQuadraticConstraint(gen_cst, - // xpress_model)); break; - // } - // case MPGeneralConstraintProto::kAbsConstraint: { - // RETURN_IF_XPRESS_ERROR(XPRSaddgenconstrAbs( - // xpress_model, - // /*name=*/gen_cst.name().c_str(), - // /*resvar=*/gen_cst.abs_constraint().resultant_var_index(), - // /*argvar=*/gen_cst.abs_constraint().var_index())); - // break; - // } - // case MPGeneralConstraintProto::kAndConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddAndConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kOrConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddOrConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kMinConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddMinConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // case MPGeneralConstraintProto::kMaxConstraint: { - // RETURN_IF_XPRESS_ERROR( - // AddMaxConstraint(gen_cst, xpress_model, &ct_variables)); - // break; - // } - // default: - // return absl::UnimplementedError( - // absl::StrFormat("General constraints of type %i not - // supported.", - // gen_cst.general_constraint_case())); - // } - // } - // } - - // RETURN_IF_XPRESS_ERROR(XPRSsetintattr(xpress_model, - // XPRS_INT_ATTR_MODELSENSE, - // model.maximize() ? -1 : 1)); - // RETURN_IF_XPRESS_ERROR(XPRSsetdblattr(xpress_model, XPRS_DBL_ATTR_OBJCON, - // model.objective_offset())); - // if (model.has_quadratic_objective()) { - // MPQuadraticObjective qobj = model.quadratic_objective(); - // if (qobj.coefficient_size() > 0) { - // RETURN_IF_XPRESS_ERROR( - // XPRSaddqpterms(xpress_model, /*numqnz=*/qobj.coefficient_size(), - // /*qrow=*/qobj.mutable_qvar1_index()->mutable_data(), - // /*qcol=*/qobj.mutable_qvar2_index()->mutable_data(), - // /*qval=*/qobj.mutable_coefficient()->mutable_data())); - // } - // } - - // RETURN_IF_XPRESS_ERROR(XPRSupdatemodel(xpress_model)); - - // const absl::Time time_before = absl::Now(); - // UserTimer user_timer; - // user_timer.Start(); - - // RETURN_IF_XPRESS_ERROR(XPRSoptimize(xpress_model)); - - // const absl::Duration solving_duration = absl::Now() - time_before; - // user_timer.Stop(); - // VLOG(1) << "Finished solving in XPressSolveProto(), walltime = " - // << solving_duration << ", usertime = " << - // user_timer.GetDuration(); - // response.mutable_solve_info()->set_solve_wall_time_seconds( - // absl::ToDoubleSeconds(solving_duration)); - // response.mutable_solve_info()->set_solve_user_time_seconds( - // absl::ToDoubleSeconds(user_timer.GetDuration())); - - // int optimization_status = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetintattr(xpress_model, XPRS_INT_ATTR_STATUS, - // &optimization_status)); - // int solution_count = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetintattr(xpress_model, XPRS_INT_ATTR_SOLCOUNT, - // &solution_count)); - // switch (optimization_status) { - // case XPRS_OPTIMAL: - // response.set_status(MPSOLVER_OPTIMAL); - // break; - // case XPRS_INF_OR_UNBD: - // DLOG(INFO) << "XPress solve returned XPRS_INF_OR_UNBD, which we treat - // as " - // "INFEASIBLE even though it may mean UNBOUNDED."; - // response.set_status_str( - // "The model may actually be unbounded: XPress returned " - // "XPRS_INF_OR_UNBD"); - // ABSL_FALLTHROUGH_INTENDED; - // case XPRS_INFEASIBLE: - // response.set_status(MPSOLVER_INFEASIBLE); - // break; - // case XPRS_UNBOUNDED: - // response.set_status(MPSOLVER_UNBOUNDED); - // break; - // default: { - // if (solution_count > 0) { - // response.set_status(MPSOLVER_FEASIBLE); - // } else { - // response.set_status(MPSOLVER_NOT_SOLVED); - // response.set_status_str( - // absl::StrFormat("XPress status code %d", optimization_status)); - // } - // break; - // } - // } - - // if (solution_count > 0 && (response.status() == MPSOLVER_FEASIBLE || - // response.status() == MPSOLVER_OPTIMAL)) { - // double objective_value = 0; - // RETURN_IF_XPRESS_ERROR( - // XPRSgetdblattr(xpress_model, XPRS_DBL_ATTR_OBJVAL, - // &objective_value)); - // response.set_objective_value(objective_value); - // double best_objective_bound = 0; - // const int error = XPRSgetdblattr(xpress_model, XPRS_DBL_ATTR_OBJBOUND, - // &best_objective_bound); - // if (response.status() == MPSOLVER_OPTIMAL && - // error == XPRS_ERROR_DATA_NOT_AVAILABLE) { - // // If the presolve deletes all variables, there's no best bound. - // response.set_best_objective_bound(objective_value); - // } else { - // RETURN_IF_XPRESS_ERROR(error); - // response.set_best_objective_bound(best_objective_bound); - // } - - // response.mutable_variable_value()->Resize(variable_size, 0); - // RETURN_IF_XPRESS_ERROR( - // XPRSgetdblattrarray(xpress_model, XPRS_DBL_ATTR_X, 0, - // variable_size, - // response.mutable_variable_value()->mutable_data())); - // // NOTE, XPressSolveProto() is exposed to external clients via MPSolver - // API, - // // which assumes the solution values of integer variables are rounded - // to - // // integer values. - // auto round_values_of_integer_variables_fn = - // [&](google::protobuf::RepeatedField* values) { - // for (int v = 0; v < variable_size; ++v) { - // if (model.variable(v).is_integer()) { - // (*values)[v] = std::round((*values)[v]); - // } - // } - // }; - // round_values_of_integer_variables_fn(response.mutable_variable_value()); - // if (!has_integer_variables && model.general_constraint_size() == 0) { - // response.mutable_dual_value()->Resize(model.constraint_size(), 0); - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattrarray( - // xpress_model, XPRS_DBL_ATTR_PI, 0, model.constraint_size(), - // response.mutable_dual_value()->mutable_data())); - // } - // const int additional_solutions = std::min( - // solution_count, - // std::min(request.populate_additional_solutions_up_to(), - // std::numeric_limits::max() - 1) + - // 1); - // for (int i = 1; i < additional_solutions; ++i) { - // RETURN_IF_XPRESS_ERROR( - // XPRSsetintparam(model_env, XPRS_INT_PAR_SOLUTIONNUMBER, i)); - // MPSolution* solution = response.add_additional_solutions(); - // solution->mutable_variable_value()->Resize(variable_size, 0); - // double objective_value = 0; - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattr( - // xpress_model, XPRS_DBL_ATTR_POOLOBJVAL, &objective_value)); - // solution->set_objective_value(objective_value); - // RETURN_IF_XPRESS_ERROR(XPRSgetdblattrarray( - // xpress_model, XPRS_DBL_ATTR_XN, 0, variable_size, - // solution->mutable_variable_value()->mutable_data())); - // round_values_of_integer_variables_fn(solution->mutable_variable_value()); - // } - // } - // #undef RETURN_IF_XPRESS_ERROR - - return response; -} - -} // namespace operations_research diff --git a/ortools/linear_solver/proto_solver/xpress_proto_solver.h b/ortools/linear_solver/proto_solver/xpress_proto_solver.h deleted file mode 100644 index c56e5acc88..0000000000 --- a/ortools/linear_solver/proto_solver/xpress_proto_solver.h +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_XPRESS_PROTO_SOLVER_H_ -#define OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_XPRESS_PROTO_SOLVER_H_ - -#include "ortools/linear_solver/linear_solver.pb.h" -#include "ortools/util/lazy_mutable_copy.h" - -namespace operations_research { - -// Solves the input request. -MPSolutionResponse XPressSolveProto(LazyMutableCopy request); - -} // namespace operations_research - -#endif // OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_XPRESS_PROTO_SOLVER_H_ diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 020e9d3a10..64e810c118 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -30,7 +30,7 @@ #include "absl/strings/str_join.h" #include "ortools/base/helpers.h" #include "ortools/base/options.h" -#include "ortools/gurobi/environment.h" +#include "ortools/linear_solver/gurobi_util.h" #include "ortools/linear_solver/linear_solver.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_exporter.h" diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 89f6d4653f..c93f5e8caa 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -28,7 +28,7 @@ #include "ortools/base/logging.h" #include "ortools/base/timer.h" #include "ortools/linear_solver/linear_solver.h" -#include "ortools/xpress/environment.h" +#include "ortools/third_party_solvers/xpress_environment.h" #define XPRS_INTEGER 'I' #define XPRS_CONTINUOUS 'C' diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index 35c0cfcaf0..4aa21c2363 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -22,7 +22,7 @@ #include "gtest/gtest.h" #include "ortools/base/init_google.h" #include "ortools/linear_solver/linear_solver.h" -#include "ortools/xpress/environment.h" +#include "ortools/third_party_solvers/xpress_environment.h" #define XPRS_NAMELENGTH 1028 namespace operations_research { diff --git a/ortools/math_opt/solvers/BUILD.bazel b/ortools/math_opt/solvers/BUILD.bazel index 1c4a899094..ec2c9e34b7 100644 --- a/ortools/math_opt/solvers/BUILD.bazel +++ b/ortools/math_opt/solvers/BUILD.bazel @@ -115,7 +115,6 @@ cc_library( "//ortools/base:logging", "//ortools/base:protoutil", "//ortools/base:status_macros", - "//ortools/gurobi:environment", "//ortools/math_opt:callback_cc_proto", "//ortools/math_opt:solution_cc_proto", "//ortools/math_opt:sparse_containers_cc_proto", @@ -123,6 +122,7 @@ cc_library( "//ortools/math_opt/core:solver_interface", "//ortools/math_opt/core:sparse_vector_view", "//ortools/math_opt/solvers/gurobi:g_gurobi", + "//ortools/third_party_solvers:gurobi_environment", "//ortools/util:solve_interrupter", "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/status", @@ -152,7 +152,6 @@ cc_library( "//ortools/base:map_util", "//ortools/base:protoutil", "//ortools/base:status_macros", - "//ortools/gurobi:environment", "//ortools/gurobi/isv_public:gurobi_isv", "//ortools/math_opt:callback_cc_proto", "//ortools/math_opt:infeasible_subsystem_cc_proto", @@ -173,6 +172,7 @@ cc_library( "//ortools/math_opt/solvers/gurobi:g_gurobi", "//ortools/math_opt/validators:callback_validator", "//ortools/port:proto_utils", + "//ortools/third_party_solvers:gurobi_environment", "//ortools/util:solve_interrupter", "//ortools/util:testing_utils", "@abseil-cpp//absl/algorithm:container", @@ -710,8 +710,8 @@ cc_library( "//ortools/math_opt/solvers/xpress:g_xpress", "//ortools/math_opt/validators:callback_validator", "//ortools/port:proto_utils", + "//ortools/third_party_solvers:xpress_environment", "//ortools/util:solve_interrupter", - "//ortools/xpress:environment", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/memory", "@abseil-cpp//absl/status", @@ -744,7 +744,7 @@ cc_test( "//ortools/math_opt/solver_tests:qp_tests", "//ortools/math_opt/solver_tests:second_order_cone_tests", "//ortools/math_opt/solver_tests:status_tests", - "//ortools/xpress:environment", + "//ortools/third_party_solvers:xpress_environment", "@abseil-cpp//absl/log", ], ) diff --git a/ortools/math_opt/solvers/gurobi/BUILD.bazel b/ortools/math_opt/solvers/gurobi/BUILD.bazel index f70c68d9ab..5a35646323 100644 --- a/ortools/math_opt/solvers/gurobi/BUILD.bazel +++ b/ortools/math_opt/solvers/gurobi/BUILD.bazel @@ -29,9 +29,9 @@ cc_library( "//ortools/base:logging", "//ortools/base:source_location", "//ortools/base:status_macros", - "//ortools/gurobi:environment", "//ortools/gurobi/isv_public:gurobi_isv", "//ortools/math_opt/solvers:gurobi_cc_proto", + "//ortools/third_party_solvers:gurobi_environment", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:die_if_null", diff --git a/ortools/math_opt/solvers/gurobi/g_gurobi.cc b/ortools/math_opt/solvers/gurobi/g_gurobi.cc index 76eadc1cdb..128badd522 100644 --- a/ortools/math_opt/solvers/gurobi/g_gurobi.cc +++ b/ortools/math_opt/solvers/gurobi/g_gurobi.cc @@ -32,9 +32,9 @@ #include "ortools/base/source_location.h" #include "ortools/base/status_builder.h" #include "ortools/base/status_macros.h" -#include "ortools/gurobi/environment.h" #include "ortools/gurobi/isv_public/gurobi_isv.h" #include "ortools/math_opt/solvers/gurobi.pb.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research::math_opt { @@ -47,6 +47,12 @@ struct UserCallbackData { Gurobi* gurobi = nullptr; }; +#if defined(_MSC_VER) +#define GUROBI_STDCALL __stdcall +#else +#define GUROBI_STDCALL +#endif + int GUROBI_STDCALL GurobiCallback(GRBmodel* const model, void* const cbdata, const int where, void* const usrdata) { CHECK(usrdata != nullptr); diff --git a/ortools/math_opt/solvers/gurobi/g_gurobi.h b/ortools/math_opt/solvers/gurobi/g_gurobi.h index ed4e9d222e..c6bd7c98df 100644 --- a/ortools/math_opt/solvers/gurobi/g_gurobi.h +++ b/ortools/math_opt/solvers/gurobi/g_gurobi.h @@ -43,8 +43,8 @@ #include "absl/status/statusor.h" #include "absl/types/span.h" #include "ortools/base/source_location.h" -#include "ortools/gurobi/environment.h" #include "ortools/gurobi/isv_public/gurobi_isv.h" +#include "ortools/third_party_solvers/gurobi_environment.h" namespace operations_research::math_opt { diff --git a/ortools/math_opt/solvers/gurobi_callback.cc b/ortools/math_opt/solvers/gurobi_callback.cc index 50f11f293f..2e678cb080 100644 --- a/ortools/math_opt/solvers/gurobi_callback.cc +++ b/ortools/math_opt/solvers/gurobi_callback.cc @@ -14,7 +14,6 @@ #include "ortools/math_opt/solvers/gurobi_callback.h" #include -#include #include #include #include @@ -32,7 +31,6 @@ #include "ortools/base/logging.h" #include "ortools/base/protoutil.h" #include "ortools/base/status_macros.h" -#include "ortools/gurobi/environment.h" #include "ortools/math_opt/callback.pb.h" #include "ortools/math_opt/core/math_opt_proto_utils.h" #include "ortools/math_opt/core/solver_interface.h" @@ -40,6 +38,7 @@ #include "ortools/math_opt/solution.pb.h" #include "ortools/math_opt/solvers/message_callback_data.h" #include "ortools/math_opt/sparse_containers.pb.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/solve_interrupter.h" namespace operations_research { diff --git a/ortools/math_opt/solvers/gurobi_callback.h b/ortools/math_opt/solvers/gurobi_callback.h index 1524f31057..f9223bdc30 100644 --- a/ortools/math_opt/solvers/gurobi_callback.h +++ b/ortools/math_opt/solvers/gurobi_callback.h @@ -21,12 +21,12 @@ #include "absl/status/status.h" #include "absl/time/time.h" #include "ortools/base/linked_hash_map.h" -#include "ortools/gurobi/environment.h" #include "ortools/math_opt/callback.pb.h" #include "ortools/math_opt/core/solver_interface.h" #include "ortools/math_opt/solvers/gurobi/g_gurobi.h" #include "ortools/math_opt/solvers/message_callback_data.h" #include "ortools/math_opt/sparse_containers.pb.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/solve_interrupter.h" namespace operations_research { diff --git a/ortools/math_opt/solvers/gurobi_solver.cc b/ortools/math_opt/solvers/gurobi_solver.cc index 72c0c71328..964864db0e 100644 --- a/ortools/math_opt/solvers/gurobi_solver.cc +++ b/ortools/math_opt/solvers/gurobi_solver.cc @@ -2756,9 +2756,6 @@ absl::StatusOr GurobiSolver::Update( absl::StatusOr> GurobiSolver::New( const ModelProto& input_model, const SolverInterface::InitArgs& init_args) { - if (!GurobiIsCorrectlyInstalled()) { - return absl::InvalidArgumentError("Gurobi is not correctly installed."); - } RETURN_IF_ERROR( ModelIsSupported(input_model, kGurobiSupportedStructures, "Gurobi")); if (!input_model.auxiliary_objectives().empty() && diff --git a/ortools/math_opt/solvers/gurobi_solver.h b/ortools/math_opt/solvers/gurobi_solver.h index 7789afe062..03478bcd76 100644 --- a/ortools/math_opt/solvers/gurobi_solver.h +++ b/ortools/math_opt/solvers/gurobi_solver.h @@ -28,7 +28,6 @@ #include "absl/time/time.h" #include "absl/types/span.h" #include "ortools/base/linked_hash_map.h" -#include "ortools/gurobi/environment.h" #include "ortools/math_opt/callback.pb.h" #include "ortools/math_opt/core/invalid_indicators.h" #include "ortools/math_opt/core/inverted_bounds.h" @@ -44,8 +43,10 @@ #include "ortools/math_opt/solvers/gurobi_callback.h" #include "ortools/math_opt/solvers/message_callback_data.h" #include "ortools/math_opt/sparse_containers.pb.h" +#include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/solve_interrupter.h" + namespace operations_research { namespace math_opt { diff --git a/ortools/math_opt/solvers/xpress/BUILD.bazel b/ortools/math_opt/solvers/xpress/BUILD.bazel index 77d89b809f..b339b3b6e7 100644 --- a/ortools/math_opt/solvers/xpress/BUILD.bazel +++ b/ortools/math_opt/solvers/xpress/BUILD.bazel @@ -22,7 +22,7 @@ cc_library( "//ortools/base:logging", "//ortools/base:source_location", "//ortools/base:status_macros", - "//ortools/xpress:environment", + "//ortools/third_party_solvers:xpress_environment", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:die_if_null", "@abseil-cpp//absl/memory", diff --git a/ortools/math_opt/solvers/xpress/g_xpress.cc b/ortools/math_opt/solvers/xpress/g_xpress.cc index 7f5782bbcf..e94b674dec 100644 --- a/ortools/math_opt/solvers/xpress/g_xpress.cc +++ b/ortools/math_opt/solvers/xpress/g_xpress.cc @@ -30,7 +30,7 @@ #include "ortools/base/logging.h" #include "ortools/base/status_builder.h" #include "ortools/base/status_macros.h" -#include "ortools/xpress/environment.h" +#include "ortools/third_party_solvers/xpress_environment.h" namespace operations_research::math_opt { diff --git a/ortools/math_opt/solvers/xpress/g_xpress.h b/ortools/math_opt/solvers/xpress/g_xpress.h index 23a6e2f46c..70f5ba74c6 100644 --- a/ortools/math_opt/solvers/xpress/g_xpress.h +++ b/ortools/math_opt/solvers/xpress/g_xpress.h @@ -34,7 +34,7 @@ #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" -#include "ortools/xpress/environment.h" +#include "ortools/third_party_solvers/xpress_environment.h" namespace operations_research::math_opt { diff --git a/ortools/math_opt/solvers/xpress_solver.cc b/ortools/math_opt/solvers/xpress_solver.cc index 5f0a0f413f..b94b31763d 100644 --- a/ortools/math_opt/solvers/xpress_solver.cc +++ b/ortools/math_opt/solvers/xpress_solver.cc @@ -40,8 +40,8 @@ #include "ortools/math_opt/solvers/xpress/g_xpress.h" #include "ortools/math_opt/validators/callback_validator.h" #include "ortools/port/proto_utils.h" +#include "ortools/third_party_solvers/xpress_environment.h" #include "ortools/util/solve_interrupter.h" -#include "ortools/xpress/environment.h" namespace operations_research { namespace math_opt { diff --git a/ortools/math_opt/solvers/xpress_solver.h b/ortools/math_opt/solvers/xpress_solver.h index 81c4547ab3..65c98b456f 100644 --- a/ortools/math_opt/solvers/xpress_solver.h +++ b/ortools/math_opt/solvers/xpress_solver.h @@ -36,8 +36,8 @@ #include "ortools/math_opt/solution.pb.h" #include "ortools/math_opt/solvers/xpress/g_xpress.h" #include "ortools/math_opt/sparse_containers.pb.h" +#include "ortools/third_party_solvers/xpress_environment.h" #include "ortools/util/solve_interrupter.h" -#include "ortools/xpress/environment.h" namespace operations_research::math_opt { diff --git a/ortools/math_opt/solvers/xpress_solver_test.cc b/ortools/math_opt/solvers/xpress_solver_test.cc index 41dd81f860..23e4c8d767 100644 --- a/ortools/math_opt/solvers/xpress_solver_test.cc +++ b/ortools/math_opt/solvers/xpress_solver_test.cc @@ -35,7 +35,7 @@ #include "ortools/math_opt/solver_tests/qp_tests.h" #include "ortools/math_opt/solver_tests/second_order_cone_tests.h" #include "ortools/math_opt/solver_tests/status_tests.h" -#include "ortools/xpress/environment.h" +#include "ortools/third_party_solvers/xpress_environment.h" namespace operations_research { namespace math_opt { diff --git a/ortools/xpress/BUILD.bazel b/ortools/xpress/BUILD.bazel deleted file mode 100644 index c5fb93d522..0000000000 --- a/ortools/xpress/BUILD.bazel +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -package(default_visibility = ["//visibility:public"]) - -cc_library( - name = "environment", - srcs = [ - "environment.cc", - ], - hdrs = [ - "environment.h", - ], - deps = [ - "//ortools/base", - "//ortools/base:dynamic_library", - "//ortools/base:file", - "//ortools/base:status_macros", - "@abseil-cpp//absl/status", - "@abseil-cpp//absl/status:statusor", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/synchronization", - "@abseil-cpp//absl/types:optional", - ], -) diff --git a/ortools/xpress/CMakeLists.txt b/ortools/xpress/CMakeLists.txt deleted file mode 100644 index add1ace6ac..0000000000 --- a/ortools/xpress/CMakeLists.txt +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -file(GLOB _SRCS "*.h" "*.cc") -set(NAME ${PROJECT_NAME}_xpress) - -add_library(${NAME} OBJECT ${_SRCS}) -set_target_properties(${NAME} PROPERTIES - CXX_STANDARD 17 - CXX_STANDARD_REQUIRED ON - CXX_EXTENSIONS OFF - POSITION_INDEPENDENT_CODE ON - ) -if(MSVC AND BUILD_SHARED_LIBS) - target_compile_definitions(${NAME} PUBLIC "OR_BUILD_DLL") - target_compile_definitions(${NAME} PRIVATE "OR_EXPORT") -endif() -target_include_directories(${NAME} PRIVATE - ${PROJECT_SOURCE_DIR} - ${PROJECT_BINARY_DIR}) -target_link_libraries(${NAME} PRIVATE - absl::hash - absl::meta - absl::memory - absl::strings - absl::str_format - protobuf::libprotobuf - ${PROJECT_NAMESPACE}::ortools_proto) diff --git a/ortools/xpress/environment.cc b/ortools/xpress/environment.cc deleted file mode 100644 index 5e628099e2..0000000000 --- a/ortools/xpress/environment.cc +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2019-2023 RTE -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Initial version of this code was provided by RTE - -#include "ortools/xpress/environment.h" - -#include -// NOLINTNEXTLINE(build/c++17) -#include -#include -#include -#include - -#include "absl/base/call_once.h" -#include "absl/base/const_init.h" -#include "absl/status/status.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" -#include "absl/synchronization/mutex.h" -#include "ortools/base/dynamic_library.h" -#include "ortools/base/logging.h" - -namespace operations_research { - -#define STRINGIFY2(X) #X -#define STRINGIFY(X) STRINGIFY2(X) - -// Let's not reformat for rest of the file. -// This was generated with the parse_header_xpress.py script. -// See the comment at the top of the script. - -// This is the 'define' section. -// NOLINTBEGIN(whitespace/line_length) -// NOLINTBEGIN(google3-runtime-global-variables) -// clang-format off -std::function XPRScreateprob = nullptr; -std::function XPRSdestroyprob = nullptr; -std::function XPRSinit = nullptr; -std::function XPRSfree = nullptr; -std::function XPRSgetlicerrmsg = nullptr; -std::function XPRSlicense = nullptr; -std::function XPRSgetbanner = nullptr; -std::function XPRSgetversion = nullptr; -std::function XPRSsetprobname = nullptr; -std::function XPRSsetdefaultcontrol = nullptr; -std::function XPRSinterrupt = nullptr; -std::function XPRSsetintcontrol = nullptr; -std::function XPRSsetintcontrol64 = nullptr; -std::function XPRSsetdblcontrol = nullptr; -std::function XPRSsetstrcontrol = nullptr; -std::function XPRSgetintcontrol = nullptr; -std::function XPRSgetintcontrol64 = nullptr; -std::function XPRSgetdblcontrol = nullptr; -std::function XPRSgetstringcontrol = nullptr; -std::function XPRSgetintattrib = nullptr; -std::function XPRSgetstringattrib = nullptr; -std::function XPRSgetdblattrib = nullptr; -std::function XPRSgetcontrolinfo = nullptr; -std::function XPRSgetobj = nullptr; -std::function XPRSgetrhs = nullptr; -std::function XPRSgetrhsrange = nullptr; -std::function XPRSgetlb = nullptr; -std::function XPRSgetub = nullptr; -std::function XPRSgetcoef = nullptr; -std::function XPRSgetduals = nullptr; -std::function XPRSgetredcosts = nullptr; -std::function XPRSaddrows = nullptr; -std::function XPRSdelrows = nullptr; -std::function XPRSaddcols = nullptr; -std::function XPRSaddnames = nullptr; -std::function XPRSgetnames = nullptr; -std::function XPRSdelcols = nullptr; -std::function XPRSchgcoltype = nullptr; -std::function XPRSloadbasis = nullptr; -std::function XPRSpostsolve = nullptr; -std::function XPRSchgobjsense = nullptr; -std::function XPRSgetlasterror = nullptr; -std::function XPRSgetbasis = nullptr; -std::function XPRSwriteprob = nullptr; -std::function XPRSgetrowtype = nullptr; -std::function XPRSgetcoltype = nullptr; -std::function XPRSchgbounds = nullptr; -std::function XPRSaddmipsol = nullptr; -std::function XPRSgetlpsol = nullptr; -std::function XPRSgetmipsol = nullptr; -std::function XPRSchgobj = nullptr; -std::function XPRSchgcoef = nullptr; -std::function XPRSchgmcoef = nullptr; -std::function XPRSchgmcoef64 = nullptr; -std::function XPRSchgmqobj = nullptr; -std::function XPRSchgrhs = nullptr; -std::function XPRSchgrhsrange = nullptr; -std::function XPRSchgrowtype = nullptr; -std::function XPRSdelobj = nullptr; -std::function XPRSaddcbintsol = nullptr; -std::function XPRSremovecbintsol = nullptr; -std::function XPRSaddcbmessage = nullptr; -std::function XPRSlpoptimize = nullptr; -std::function XPRSmipoptimize = nullptr; -std::function XPRSoptimize = nullptr; -// clang-format on -// NOLINTEND(google3-runtime-global-variables) -// NOLINTEND(whitespace/line_length) - -void LoadXpressFunctions(DynamicLibrary* xpress_dynamic_library) { - // This was generated with the parse_header_xpress.py script. - // See the comment at the top of the script. - - // This is the 'assign' section. - // NOLINTBEGIN(whitespace/line_length) - // clang-format off - xpress_dynamic_library->GetFunction(&XPRScreateprob, "XPRScreateprob"); - xpress_dynamic_library->GetFunction(&XPRSdestroyprob, "XPRSdestroyprob"); - xpress_dynamic_library->GetFunction(&XPRSinit, "XPRSinit"); - xpress_dynamic_library->GetFunction(&XPRSfree, "XPRSfree"); - xpress_dynamic_library->GetFunction(&XPRSgetlicerrmsg, "XPRSgetlicerrmsg"); - xpress_dynamic_library->GetFunction(&XPRSlicense, "XPRSlicense"); - xpress_dynamic_library->GetFunction(&XPRSgetbanner, "XPRSgetbanner"); - xpress_dynamic_library->GetFunction(&XPRSgetversion, "XPRSgetversion"); - xpress_dynamic_library->GetFunction(&XPRSsetprobname, "XPRSsetprobname"); - xpress_dynamic_library->GetFunction(&XPRSsetdefaultcontrol, "XPRSsetdefaultcontrol"); - xpress_dynamic_library->GetFunction(&XPRSinterrupt, "XPRSinterrupt"); - xpress_dynamic_library->GetFunction(&XPRSsetintcontrol, "XPRSsetintcontrol"); - xpress_dynamic_library->GetFunction(&XPRSsetintcontrol64, "XPRSsetintcontrol64"); - xpress_dynamic_library->GetFunction(&XPRSsetdblcontrol, "XPRSsetdblcontrol"); - xpress_dynamic_library->GetFunction(&XPRSsetstrcontrol, "XPRSsetstrcontrol"); - xpress_dynamic_library->GetFunction(&XPRSgetintcontrol, "XPRSgetintcontrol"); - xpress_dynamic_library->GetFunction(&XPRSgetintcontrol64, "XPRSgetintcontrol64"); - xpress_dynamic_library->GetFunction(&XPRSgetdblcontrol, "XPRSgetdblcontrol"); - xpress_dynamic_library->GetFunction(&XPRSgetstringcontrol, "XPRSgetstringcontrol"); - xpress_dynamic_library->GetFunction(&XPRSgetintattrib, "XPRSgetintattrib"); - xpress_dynamic_library->GetFunction(&XPRSgetstringattrib, "XPRSgetstringattrib"); - xpress_dynamic_library->GetFunction(&XPRSgetdblattrib, "XPRSgetdblattrib"); - xpress_dynamic_library->GetFunction(&XPRSgetobj, "XPRSgetobj"); - xpress_dynamic_library->GetFunction(&XPRSgetrhs, "XPRSgetrhs"); - xpress_dynamic_library->GetFunction(&XPRSgetrhsrange, "XPRSgetrhsrange"); - xpress_dynamic_library->GetFunction(&XPRSgetlb, "XPRSgetlb"); - xpress_dynamic_library->GetFunction(&XPRSgetub, "XPRSgetub"); - xpress_dynamic_library->GetFunction(&XPRSgetcoef, "XPRSgetcoef"); - xpress_dynamic_library->GetFunction(&XPRSgetduals, "XPRSgetduals"); - xpress_dynamic_library->GetFunction(&XPRSgetredcosts, "XPRSgetredcosts"); - xpress_dynamic_library->GetFunction(&XPRSaddrows, "XPRSaddrows"); - xpress_dynamic_library->GetFunction(&XPRSdelrows, "XPRSdelrows"); - xpress_dynamic_library->GetFunction(&XPRSaddcols, "XPRSaddcols"); - xpress_dynamic_library->GetFunction(&XPRSaddnames, "XPRSaddnames"); - xpress_dynamic_library->GetFunction(&XPRSgetnames, "XPRSgetnames"); - xpress_dynamic_library->GetFunction(&XPRSdelcols, "XPRSdelcols"); - xpress_dynamic_library->GetFunction(&XPRSchgcoltype, "XPRSchgcoltype"); - xpress_dynamic_library->GetFunction(&XPRSloadbasis, "XPRSloadbasis"); - xpress_dynamic_library->GetFunction(&XPRSpostsolve, "XPRSpostsolve"); - xpress_dynamic_library->GetFunction(&XPRSchgobjsense, "XPRSchgobjsense"); - xpress_dynamic_library->GetFunction(&XPRSgetlasterror, "XPRSgetlasterror"); - xpress_dynamic_library->GetFunction(&XPRSgetbasis, "XPRSgetbasis"); - xpress_dynamic_library->GetFunction(&XPRSwriteprob, "XPRSwriteprob"); - xpress_dynamic_library->GetFunction(&XPRSgetrowtype, "XPRSgetrowtype"); - xpress_dynamic_library->GetFunction(&XPRSgetcoltype, "XPRSgetcoltype"); - xpress_dynamic_library->GetFunction(&XPRSchgbounds, "XPRSchgbounds"); - xpress_dynamic_library->GetFunction(&XPRSaddmipsol, "XPRSaddmipsol"); - xpress_dynamic_library->GetFunction(&XPRSgetlpsol, "XPRSgetlpsol"); - xpress_dynamic_library->GetFunction(&XPRSgetmipsol, "XPRSgetmipsol"); - xpress_dynamic_library->GetFunction(&XPRSchgobj, "XPRSchgobj"); - xpress_dynamic_library->GetFunction(&XPRSchgcoef, "XPRSchgcoef"); - xpress_dynamic_library->GetFunction(&XPRSchgmcoef, "XPRSchgmcoef"); - xpress_dynamic_library->GetFunction(&XPRSchgmcoef64, "XPRSchgmcoef64"); - xpress_dynamic_library->GetFunction(&XPRSchgmqobj, "XPRSchgmqobj"); - xpress_dynamic_library->GetFunction(&XPRSchgrhs, "XPRSchgrhs"); - xpress_dynamic_library->GetFunction(&XPRSchgrhsrange, "XPRSchgrhsrange"); - xpress_dynamic_library->GetFunction(&XPRSchgrowtype, "XPRSchgrowtype"); - xpress_dynamic_library->GetFunction(&XPRSdelobj, "XPRSdelobj"); - xpress_dynamic_library->GetFunction(&XPRSaddcbintsol, "XPRSaddcbintsol"); - xpress_dynamic_library->GetFunction(&XPRSremovecbintsol, "XPRSremovecbintsol"); - xpress_dynamic_library->GetFunction(&XPRSaddcbmessage, "XPRSaddcbmessage"); - xpress_dynamic_library->GetFunction(&XPRSlpoptimize, "XPRSlpoptimize"); - xpress_dynamic_library->GetFunction(&XPRSmipoptimize, "XPRSmipoptimize"); - xpress_dynamic_library->GetFunction(&XPRSoptimize, "XPRSoptimize"); - // clang-format on - // NOLINTEND(whitespace/line_length) -} - -void printXpressBanner(bool error) { - char banner[XPRS_MAXBANNERLENGTH]; - XPRSgetbanner(banner); - - if (error) { - LOG(ERROR) << "XpressInterface : Xpress banner :\n" << banner << "\n"; - } else { - LOG(WARNING) << "XpressInterface : Xpress banner :\n" << banner << "\n"; - } -} - -std::vector XpressDynamicLibraryPotentialPaths() { - std::vector potential_paths; - - // Look for libraries pointed by XPRESSDIR first. - const char* xpressdir_from_env = getenv("XPRESSDIR"); - if (xpressdir_from_env != nullptr) { - LOG(INFO) << "Environment variable XPRESSDIR = " << xpressdir_from_env; -#if defined(_MSC_VER) // Windows - potential_paths.push_back( - absl::StrCat(xpressdir_from_env, "\\bin\\xprs.dll")); -#elif defined(__APPLE__) // macOS - potential_paths.push_back( - absl::StrCat(xpressdir_from_env, "/lib/libxprs.dylib")); -#elif defined(__GNUC__) // Linux - potential_paths.push_back( - absl::StrCat(xpressdir_from_env, "/lib/libxprs.so")); -#else - LOG(ERROR) << "OS Not recognized by xpress/environment.cc." - << " You won't be able to use Xpress."; -#endif - } else { - LOG(WARNING) << "Environment variable XPRESSDIR undefined."; - } - - // Search for canonical places. -#if defined(_MSC_VER) // Windows - potential_paths.push_back(absl::StrCat("C:\\xpressmp\\bin\\xprs.dll")); - potential_paths.push_back( - absl::StrCat("C:\\Program Files\\xpressmp\\bin\\xprs.dll")); -#elif defined(__APPLE__) // macOS - potential_paths.push_back( - absl::StrCat("/Library/xpressmp/lib/libxprs.dylib")); -#elif defined(__GNUC__) // Linux - potential_paths.push_back(absl::StrCat("/opt/xpressmp/lib/libxprs.so")); -#else - LOG(ERROR) << "OS Not recognized by xpress/environment.cc." - << " You won't be able to use Xpress."; -#endif - return potential_paths; -} - -absl::Status LoadXpressDynamicLibrary(std::string& xpresspath) { - static std::string* xpress_lib_path = new std::string; - static absl::once_flag xpress_loading_done; - static absl::Status* xpress_load_status = new absl::Status; - static DynamicLibrary* xpress_library = new DynamicLibrary; - static absl::Mutex mutex(absl::kConstInit); - - absl::MutexLock lock(&mutex); - - absl::call_once(xpress_loading_done, []() { - const std::vector canonical_paths = - XpressDynamicLibraryPotentialPaths(); - for (const std::string& path : canonical_paths) { - if (xpress_library->TryToLoad(path)) { - LOG(INFO) << "Found the Xpress library in " << path << "."; - xpress_lib_path->clear(); - std::filesystem::path p(path); - p.remove_filename(); - xpress_lib_path->append(p.string()); - break; - } - } - - if (xpress_library->LibraryIsLoaded()) { - LOG(INFO) << "Loading all Xpress functions"; - LoadXpressFunctions(xpress_library); - *xpress_load_status = absl::OkStatus(); - } else { - *xpress_load_status = absl::NotFoundError( - absl::StrCat("Could not find the Xpress shared library. Looked in: [", - absl::StrJoin(canonical_paths, "', '"), - "]. Please check environment variable XPRESSDIR")); - } - }); - xpresspath.clear(); - xpresspath.append(*xpress_lib_path); - return *xpress_load_status; -} - -void log_message_about_XPRSinit_argument(); -void log_full_license_error(int code, const std::string& xpress_lib_dir); -//! init XPRESS environment. -bool initXpressEnv(bool verbose, int xpress_oem_license_key) { - std::string xpress_lib_dir; - absl::Status status = LoadXpressDynamicLibrary(xpress_lib_dir); - if (!status.ok()) { - LOG(WARNING) << status << "\n"; - return false; - } - - int code; - // if not an OEM key - if (xpress_oem_license_key == 0) { - if (verbose) { - log_message_about_XPRSinit_argument(); - } - - code = XPRSinit(nullptr); - - if (!code) { - // XPRSbanner informs about Xpress version, options and error messages - if (verbose) { - printXpressBanner(false); - char version[16]; - XPRSgetversion(version); - LOG(WARNING) << "Optimizer version: " << version - << " (OR-Tools was compiled with version " << XPVERSION - << ")."; - } - return true; - } else { - log_full_license_error(code, xpress_lib_dir); - return false; - } - } else { - // if OEM key - if (verbose) { - LOG(WARNING) << "XpressInterface : Initialising xpress-MP with OEM key " - << xpress_oem_license_key; - } - - int nvalue = 0; - int ierr; - char slicmsg[256] = ""; - char errmsg[256]; - - XPRSlicense(&nvalue, slicmsg); - if (verbose) { - DLOG(INFO) << "XpressInterface : First message from XPRSLicense : " - << slicmsg; - } - - nvalue = xpress_oem_license_key - ((nvalue * nvalue) / 19); - ierr = XPRSlicense(&nvalue, slicmsg); - - if (verbose) { - DLOG(INFO) << "XpressInterface : Second message from XPRSLicense : " - << slicmsg; - } - if (ierr == 16) { - if (verbose) { - DLOG(INFO) - << "XpressInterface : Optimizer development software detected"; - } - } else if (ierr != 0) { - // get the license error message - XPRSgetlicerrmsg(errmsg, 256); - - LOG(ERROR) << "XpressInterface : " << errmsg; - return false; - } - - code = XPRSinit(nullptr); - - if (!code) { - return true; - } else { - LOG(ERROR) << "XPRSinit returned code : " << code << "\n"; - return false; - } - } -} -void log_full_license_error(int code, const std::string& xpress_lib_dir) { - LOG(WARNING) << "XpressInterface: Xpress found at " << xpress_lib_dir << "\n"; - char errmsg[256]; - XPRSgetlicerrmsg(errmsg, 256); - - LOG(ERROR) << "XpressInterface : License error : " << errmsg - << " (XPRSinit returned code " << code << "). \n"; - LOG(ERROR) - << "|_Your Xpress installation should have set the env var XPAUTH_PATH" - " to the full path of your licence file\n"; -} -void log_message_about_XPRSinit_argument() { - LOG(WARNING) - << "XpressInterface : Initialising xpress-MP with default parameters"; -} - -bool XpressIsCorrectlyInstalled() { - bool correctlyInstalled = initXpressEnv(false); - if (correctlyInstalled) { - XPRSfree(); - } - return correctlyInstalled; -} - -} // namespace operations_research diff --git a/ortools/xpress/environment.h b/ortools/xpress/environment.h deleted file mode 100644 index 9a1fe558be..0000000000 --- a/ortools/xpress/environment.h +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright 2019-2023 RTE -// Copyright 2010-2025 Google LLC -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Initial version of this code was provided by RTE - -#ifndef OR_TOOLS_XPRESS_ENVIRONMENT_H_ -#define OR_TOOLS_XPRESS_ENVIRONMENT_H_ - -#include -#include - -#include "absl/status/status.h" -#include "ortools/base/base_export.h" - -extern "C" { -typedef struct xo_prob_struct* XPRSprob; -} - -namespace operations_research { - -void printXpressBanner(bool error); - -bool initXpressEnv(bool verbose = true, int xpress_oem_license_key = 0); - -bool XpressIsCorrectlyInstalled(); - -// Force the loading of the xpress dynamic library. It returns true if the -// library was successfully loaded. This method can only be called once. -// Successive calls are no-op. -// -// Note that it does not check if a token license can be grabbed. -absl::Status LoadXpressDynamicLibrary(std::string& xpresspath); - -// The list of #define and extern std::function<> below is generated directly -// from xprs.h via parse_header_xpress.py -// See the top comment on the parse_header_xpress.py file. -// This is the header section -// NOLINTBEGIN(runtime/int) -#if defined(_WIN32) -#define XPRSint64 __int64 -#elif defined(__LP64__) || defined(_LP64) || defined(__ILP64__) || \ - defined(_ILP64) -#define XPRSint64 long -#else -#define XPRSint64 long long -#endif -// NOLINTEND(runtime/int) - -#if defined(_MSC_VER) -#define XPRS_CC __stdcall -#else -#define XPRS_CC -#endif -// *************************************************************************** -// * values related to XPRSinterrupt * -// *************************************************************************** -#define XPRS_STOP_NONE 0 -#define XPRS_STOP_TIMELIMIT 1 -#define XPRS_STOP_CTRLC 2 -#define XPRS_STOP_NODELIMIT 3 -#define XPRS_STOP_ITERLIMIT 4 -#define XPRS_STOP_MIPGAP 5 -#define XPRS_STOP_SOLLIMIT 6 -#define XPRS_STOP_GENERICERROR 7 -#define XPRS_STOP_MEMORYERROR 8 -#define XPRS_STOP_USER 9 -#define XPRS_STOP_SOLVECOMPLETE 10 -#define XPRS_STOP_LICENSELOST 11 -#define XPRS_STOP_NUMERICALERROR 13 -// *************************************************************************** -// * values related to Set/GetControl/Attribinfo * -// *************************************************************************** -#define XPRS_TYPE_NOTDEFINED 0 -#define XPRS_TYPE_INT 1 -#define XPRS_TYPE_INT64 2 -#define XPRS_TYPE_DOUBLE 3 -#define XPRS_TYPE_STRING 4 -// *************************************************************************** -// * values related to NAMESPACES * -// *************************************************************************** -#define XPRS_NAMES_ROW 1 -#define XPRS_NAMES_COLUMN 2 - -#define XPRS_PLUSINFINITY 1.0e+20 -#define XPRS_MINUSINFINITY -1.0e+20 -#define XPRS_MAXBANNERLENGTH 512 -#define XPVERSION 41 -#define XPRS_MPSRHSNAME 6001 -#define XPRS_MPSOBJNAME 6002 -#define XPRS_MPSRANGENAME 6003 -#define XPRS_MPSBOUNDNAME 6004 -#define XPRS_OUTPUTMASK 6005 -#define XPRS_TUNERMETHODFILE 6017 -#define XPRS_TUNEROUTPUTPATH 6018 -#define XPRS_TUNERSESSIONNAME 6019 -#define XPRS_COMPUTEEXECSERVICE 6022 -#define XPRS_MAXCUTTIME 8149 -#define XPRS_MAXSTALLTIME 8443 -#define XPRS_TUNERMAXTIME 8364 -#define XPRS_MATRIXTOL 7001 -#define XPRS_PIVOTTOL 7002 -#define XPRS_FEASTOL 7003 -#define XPRS_OUTPUTTOL 7004 -#define XPRS_SOSREFTOL 7005 -#define XPRS_OPTIMALITYTOL 7006 -#define XPRS_ETATOL 7007 -#define XPRS_RELPIVOTTOL 7008 -#define XPRS_MIPTOL 7009 -#define XPRS_MIPTOLTARGET 7010 -#define XPRS_BARPERTURB 7011 -#define XPRS_MIPADDCUTOFF 7012 -#define XPRS_MIPABSCUTOFF 7013 -#define XPRS_MIPRELCUTOFF 7014 -#define XPRS_PSEUDOCOST 7015 -#define XPRS_PENALTY 7016 -#define XPRS_BIGM 7018 -#define XPRS_MIPABSSTOP 7019 -#define XPRS_MIPRELSTOP 7020 -#define XPRS_CROSSOVERACCURACYTOL 7023 -#define XPRS_PRIMALPERTURB 7024 -#define XPRS_DUALPERTURB 7025 -#define XPRS_BAROBJSCALE 7026 -#define XPRS_BARRHSSCALE 7027 -#define XPRS_CHOLESKYTOL 7032 -#define XPRS_BARGAPSTOP 7033 -#define XPRS_BARDUALSTOP 7034 -#define XPRS_BARPRIMALSTOP 7035 -#define XPRS_BARSTEPSTOP 7036 -#define XPRS_ELIMTOL 7042 -#define XPRS_MARKOWITZTOL 7047 -#define XPRS_MIPABSGAPNOTIFY 7064 -#define XPRS_MIPRELGAPNOTIFY 7065 -#define XPRS_BARLARGEBOUND 7067 -#define XPRS_PPFACTOR 7069 -#define XPRS_REPAIRINDEFINITEQMAX 7071 -#define XPRS_BARGAPTARGET 7073 -#define XPRS_DUMMYCONTROL 7075 -#define XPRS_BARSTARTWEIGHT 7076 -#define XPRS_BARFREESCALE 7077 -#define XPRS_SBEFFORT 7086 -#define XPRS_HEURDIVERANDOMIZE 7089 -#define XPRS_HEURSEARCHEFFORT 7090 -#define XPRS_CUTFACTOR 7091 -#define XPRS_EIGENVALUETOL 7097 -#define XPRS_INDLINBIGM 7099 -#define XPRS_TREEMEMORYSAVINGTARGET 7100 -#define XPRS_INDPRELINBIGM 7102 -#define XPRS_RELAXTREEMEMORYLIMIT 7105 -#define XPRS_MIPABSGAPNOTIFYOBJ 7108 -#define XPRS_MIPABSGAPNOTIFYBOUND 7109 -#define XPRS_PRESOLVEMAXGROW 7110 -#define XPRS_HEURSEARCHTARGETSIZE 7112 -#define XPRS_CROSSOVERRELPIVOTTOL 7113 -#define XPRS_CROSSOVERRELPIVOTTOLSAFE 7114 -#define XPRS_DETLOGFREQ 7116 -#define XPRS_MAXIMPLIEDBOUND 7120 -#define XPRS_FEASTOLTARGET 7121 -#define XPRS_OPTIMALITYTOLTARGET 7122 -#define XPRS_PRECOMPONENTSEFFORT 7124 -#define XPRS_LPLOGDELAY 7127 -#define XPRS_HEURDIVEITERLIMIT 7128 -#define XPRS_BARKERNEL 7130 -#define XPRS_FEASTOLPERTURB 7132 -#define XPRS_CROSSOVERFEASWEIGHT 7133 -#define XPRS_LUPIVOTTOL 7139 -#define XPRS_MIPRESTARTGAPTHRESHOLD 7140 -#define XPRS_NODEPROBINGEFFORT 7141 -#define XPRS_INPUTTOL 7143 -#define XPRS_MIPRESTARTFACTOR 7145 -#define XPRS_BAROBJPERTURB 7146 -#define XPRS_CPIALPHA 7149 -#define XPRS_GLOBALBOUNDINGBOX 7154 -#define XPRS_TIMELIMIT 7158 -#define XPRS_SOLTIMELIMIT 7159 -#define XPRS_REPAIRINFEASTIMELIMIT 7160 -#define XPRS_EXTRAROWS 8004 -#define XPRS_EXTRACOLS 8005 -#define XPRS_LPITERLIMIT 8007 -#define XPRS_LPLOG 8009 -#define XPRS_SCALING 8010 -#define XPRS_PRESOLVE 8011 -#define XPRS_CRASH 8012 -#define XPRS_PRICINGALG 8013 -#define XPRS_INVERTFREQ 8014 -#define XPRS_INVERTMIN 8015 -#define XPRS_MAXNODE 8018 -#define XPRS_MAXTIME 8020 -#define XPRS_MAXMIPSOL 8021 -#define XPRS_SIFTPASSES 8022 -#define XPRS_DEFAULTALG 8023 -#define XPRS_VARSELECTION 8025 -#define XPRS_NODESELECTION 8026 -#define XPRS_BACKTRACK 8027 -#define XPRS_MIPLOG 8028 -#define XPRS_KEEPNROWS 8030 -#define XPRS_MPSECHO 8032 -#define XPRS_MAXPAGELINES 8034 -#define XPRS_OUTPUTLOG 8035 -#define XPRS_BARSOLUTION 8038 -#define XPRS_CACHESIZE 8043 -#define XPRS_CROSSOVER 8044 -#define XPRS_BARITERLIMIT 8045 -#define XPRS_CHOLESKYALG 8046 -#define XPRS_BAROUTPUT 8047 -#define XPRS_EXTRAMIPENTS 8051 -#define XPRS_REFACTOR 8052 -#define XPRS_BARTHREADS 8053 -#define XPRS_KEEPBASIS 8054 -#define XPRS_CROSSOVEROPS 8060 -#define XPRS_VERSION 8061 -#define XPRS_CROSSOVERTHREADS 8065 -#define XPRS_BIGMMETHOD 8068 -#define XPRS_MPSNAMELENGTH 8071 -#define XPRS_ELIMFILLIN 8073 -#define XPRS_PRESOLVEOPS 8077 -#define XPRS_MIPPRESOLVE 8078 -#define XPRS_MIPTHREADS 8079 -#define XPRS_BARORDER 8080 -#define XPRS_BREADTHFIRST 8082 -#define XPRS_AUTOPERTURB 8084 -#define XPRS_DENSECOLLIMIT 8086 -#define XPRS_CALLBACKFROMMASTERTHREAD 8090 -#define XPRS_MAXMCOEFFBUFFERELEMS 8091 -#define XPRS_REFINEOPS 8093 -#define XPRS_LPREFINEITERLIMIT 8094 -#define XPRS_MIPREFINEITERLIMIT 8095 -#define XPRS_DUALIZEOPS 8097 -#define XPRS_CROSSOVERITERLIMIT 8104 -#define XPRS_PREBASISRED 8106 -#define XPRS_PRESORT 8107 -#define XPRS_PREPERMUTE 8108 -#define XPRS_PREPERMUTESEED 8109 -#define XPRS_MAXMEMORYSOFT 8112 -#define XPRS_CUTFREQ 8116 -#define XPRS_SYMSELECT 8117 -#define XPRS_SYMMETRY 8118 -#define XPRS_MAXMEMORYHARD 8119 -#define XPRS_MIQCPALG 8125 -#define XPRS_QCCUTS 8126 -#define XPRS_QCROOTALG 8127 -#define XPRS_PRECONVERTSEPARABLE 8128 -#define XPRS_ALGAFTERNETWORK 8129 -#define XPRS_TRACE 8130 -#define XPRS_MAXIIS 8131 -#define XPRS_CPUTIME 8133 -#define XPRS_COVERCUTS 8134 -#define XPRS_GOMCUTS 8135 -#define XPRS_LPFOLDING 8136 -#define XPRS_MPSFORMAT 8137 -#define XPRS_CUTSTRATEGY 8138 -#define XPRS_CUTDEPTH 8139 -#define XPRS_TREECOVERCUTS 8140 -#define XPRS_TREEGOMCUTS 8141 -#define XPRS_CUTSELECT 8142 -#define XPRS_TREECUTSELECT 8143 -#define XPRS_DUALIZE 8144 -#define XPRS_DUALGRADIENT 8145 -#define XPRS_SBITERLIMIT 8146 -#define XPRS_SBBEST 8147 -#define XPRS_BARINDEFLIMIT 8153 -#define XPRS_HEURFREQ 8155 -#define XPRS_HEURDEPTH 8156 -#define XPRS_HEURMAXSOL 8157 -#define XPRS_HEURNODES 8158 -#define XPRS_LNPBEST 8160 -#define XPRS_LNPITERLIMIT 8161 -#define XPRS_BRANCHCHOICE 8162 -#define XPRS_BARREGULARIZE 8163 -#define XPRS_SBSELECT 8164 -#define XPRS_LOCALCHOICE 8170 -#define XPRS_LOCALBACKTRACK 8171 -#define XPRS_DUALSTRATEGY 8174 -#define XPRS_L1CACHE 8175 -#define XPRS_HEURDIVESTRATEGY 8177 -#define XPRS_HEURSELECT 8178 -#define XPRS_BARSTART 8180 -#define XPRS_PRESOLVEPASSES 8183 -#define XPRS_BARNUMSTABILITY 8186 -#define XPRS_BARORDERTHREADS 8187 -#define XPRS_EXTRASETS 8190 -#define XPRS_FEASIBILITYPUMP 8193 -#define XPRS_PRECOEFELIM 8194 -#define XPRS_PREDOMCOL 8195 -#define XPRS_HEURSEARCHFREQ 8196 -#define XPRS_HEURDIVESPEEDUP 8197 -#define XPRS_SBESTIMATE 8198 -#define XPRS_BARCORES 8202 -#define XPRS_MAXCHECKSONMAXTIME 8203 -#define XPRS_MAXCHECKSONMAXCUTTIME 8204 -#define XPRS_HISTORYCOSTS 8206 -#define XPRS_ALGAFTERCROSSOVER 8208 -#define XPRS_MUTEXCALLBACKS 8210 -#define XPRS_BARCRASH 8211 -#define XPRS_HEURDIVESOFTROUNDING 8215 -#define XPRS_HEURSEARCHROOTSELECT 8216 -#define XPRS_HEURSEARCHTREESELECT 8217 -#define XPRS_MPS18COMPATIBLE 8223 -#define XPRS_ROOTPRESOLVE 8224 -#define XPRS_CROSSOVERDRP 8227 -#define XPRS_FORCEOUTPUT 8229 -#define XPRS_PRIMALOPS 8231 -#define XPRS_DETERMINISTIC 8232 -#define XPRS_PREPROBING 8238 -#define XPRS_TREEMEMORYLIMIT 8242 -#define XPRS_TREECOMPRESSION 8243 -#define XPRS_TREEDIAGNOSTICS 8244 -#define XPRS_MAXTREEFILESIZE 8245 -#define XPRS_PRECLIQUESTRATEGY 8247 -#define XPRS_REPAIRINFEASMAXTIME 8250 -#define XPRS_IFCHECKCONVEXITY 8251 -#define XPRS_PRIMALUNSHIFT 8252 -#define XPRS_REPAIRINDEFINITEQ 8254 -#define XPRS_MIPRAMPUP 8255 -#define XPRS_MAXLOCALBACKTRACK 8257 -#define XPRS_USERSOLHEURISTIC 8258 -#define XPRS_FORCEPARALLELDUAL 8265 -#define XPRS_BACKTRACKTIE 8266 -#define XPRS_BRANCHDISJ 8267 -#define XPRS_MIPFRACREDUCE 8270 -#define XPRS_CONCURRENTTHREADS 8274 -#define XPRS_MAXSCALEFACTOR 8275 -#define XPRS_HEURTHREADS 8276 -#define XPRS_THREADS 8278 -#define XPRS_HEURBEFORELP 8280 -#define XPRS_PREDOMROW 8281 -#define XPRS_BRANCHSTRUCTURAL 8282 -#define XPRS_QUADRATICUNSHIFT 8284 -#define XPRS_BARPRESOLVEOPS 8286 -#define XPRS_QSIMPLEXOPS 8288 -#define XPRS_MIPRESTART 8290 -#define XPRS_CONFLICTCUTS 8292 -#define XPRS_PREPROTECTDUAL 8293 -#define XPRS_CORESPERCPU 8296 -#define XPRS_RESOURCESTRATEGY 8297 -#define XPRS_CLAMPING 8301 -#define XPRS_SLEEPONTHREADWAIT 8302 -#define XPRS_PREDUPROW 8307 -#define XPRS_CPUPLATFORM 8312 -#define XPRS_BARALG 8315 -#define XPRS_SIFTING 8319 -#define XPRS_LPLOGSTYLE 8326 -#define XPRS_RANDOMSEED 8328 -#define XPRS_TREEQCCUTS 8331 -#define XPRS_PRELINDEP 8333 -#define XPRS_DUALTHREADS 8334 -#define XPRS_PREOBJCUTDETECT 8336 -#define XPRS_PREBNDREDQUAD 8337 -#define XPRS_PREBNDREDCONE 8338 -#define XPRS_PRECOMPONENTS 8339 -#define XPRS_MAXMIPTASKS 8347 -#define XPRS_MIPTERMINATIONMETHOD 8348 -#define XPRS_PRECONEDECOMP 8349 -#define XPRS_HEURFORCESPECIALOBJ 8350 -#define XPRS_HEURSEARCHROOTCUTFREQ 8351 -#define XPRS_PREELIMQUAD 8353 -#define XPRS_PREIMPLICATIONS 8356 -#define XPRS_TUNERMODE 8359 -#define XPRS_TUNERMETHOD 8360 -#define XPRS_TUNERTARGET 8362 -#define XPRS_TUNERTHREADS 8363 -#define XPRS_TUNERHISTORY 8365 -#define XPRS_TUNERPERMUTE 8366 -#define XPRS_TUNERVERBOSE 8370 -#define XPRS_TUNEROUTPUT 8372 -#define XPRS_PREANALYTICCENTER 8374 -#define XPRS_NETCUTS 8382 -#define XPRS_LPFLAGS 8385 -#define XPRS_MIPKAPPAFREQ 8386 -#define XPRS_OBJSCALEFACTOR 8387 -#define XPRS_TREEFILELOGINTERVAL 8389 -#define XPRS_IGNORECONTAINERCPULIMIT 8390 -#define XPRS_IGNORECONTAINERMEMORYLIMIT 8391 -#define XPRS_MIPDUALREDUCTIONS 8392 -#define XPRS_GENCONSDUALREDUCTIONS 8395 -#define XPRS_PWLDUALREDUCTIONS 8396 -#define XPRS_BARFAILITERLIMIT 8398 -#define XPRS_AUTOSCALING 8406 -#define XPRS_GENCONSABSTRANSFORMATION 8408 -#define XPRS_COMPUTEJOBPRIORITY 8409 -#define XPRS_PREFOLDING 8410 -#define XPRS_NETSTALLLIMIT 8412 -#define XPRS_SERIALIZEPREINTSOL 8413 -#define XPRS_NUMERICALEMPHASIS 8416 -#define XPRS_PWLNONCONVEXTRANSFORMATION 8420 -#define XPRS_MIPCOMPONENTS 8421 -#define XPRS_MIPCONCURRENTNODES 8422 -#define XPRS_MIPCONCURRENTSOLVES 8423 -#define XPRS_OUTPUTCONTROLS 8424 -#define XPRS_SIFTSWITCH 8425 -#define XPRS_HEUREMPHASIS 8427 -#define XPRS_COMPUTEMATX 8428 -#define XPRS_COMPUTEMATX_IIS 8429 -#define XPRS_COMPUTEMATX_IISMAXTIME 8430 -#define XPRS_BARREFITER 8431 -#define XPRS_COMPUTELOG 8434 -#define XPRS_SIFTPRESOLVEOPS 8435 -#define XPRS_CHECKINPUTDATA 8436 -#define XPRS_ESCAPENAMES 8440 -#define XPRS_IOTIMEOUT 8442 -#define XPRS_AUTOCUTTING 8446 -#define XPRS_CALLBACKCHECKTIMEDELAY 8451 -#define XPRS_MULTIOBJOPS 8457 -#define XPRS_MULTIOBJLOG 8458 -#define XPRS_GLOBALSPATIALBRANCHIFPREFERORIG 8465 -#define XPRS_PRECONFIGURATION 8470 -#define XPRS_FEASIBILITYJUMP 8471 -#define XPRS_EXTRAELEMS 8006 -#define XPRS_EXTRASETELEMS 8191 -#define XPRS_LPOBJVAL 2001 -#define XPRS_MIPOBJVAL 2003 -#define XPRS_BESTBOUND 2004 -#define XPRS_OBJRHS 2005 -#define XPRS_OBJSENSE 2008 -#define XPRS_ROWS 1001 -#define XPRS_SIMPLEXITER 1009 -#define XPRS_BARITER 5001 -#define XPRS_SOLSTATUS_NOTFOUND 0 -#define XPRS_SOLSTATUS_OPTIMAL 1 -#define XPRS_SOLSTATUS_FEASIBLE 2 -#define XPRS_SOLSTATUS_INFEASIBLE 3 -#define XPRS_SOLSTATUS_UNBOUNDED 4 -#define XPRS_LPSTATUS 1010 -#define XPRS_MIPSTATUS 1011 -#define XPRS_NODES 1013 -#define XPRS_COLS 1018 -#define XPRS_MAXPROBNAMELENGTH 1158 -#define XPRS_LP_UNSTARTED 0 -#define XPRS_LP_OPTIMAL 1 -#define XPRS_LP_INFEAS 2 -#define XPRS_LP_CUTOFF 3 -#define XPRS_LP_UNFINISHED 4 -#define XPRS_LP_UNBOUNDED 5 -#define XPRS_LP_CUTOFF_IN_DUAL 6 -#define XPRS_LP_UNSOLVED 7 -#define XPRS_LP_NONCONVEX 8 -#define XPRS_MIP_SOLUTION 4 -#define XPRS_MIP_INFEAS 5 -#define XPRS_MIP_OPTIMAL 6 -#define XPRS_MIP_UNBOUNDED 7 -#define XPRS_ALG_DUAL 2 -#define XPRS_ALG_PRIMAL 3 -#define XPRS_ALG_BARRIER 4 -#define XPRS_OBJ_MINIMIZE 1 -#define XPRS_OBJ_MAXIMIZE -1 -#define XPRS_UUID 3011 -// *************************************************************************** -// * variable types * -// *************************************************************************** -#define XPRS_INTEGER 'I' -#define XPRS_CONTINUOUS 'C' -// *************************************************************************** -// * constraint types * -// *************************************************************************** -#define XPRS_LESS_EQUAL 'L' -#define XPRS_GREATER_EQUAL 'G' -#define XPRS_EQUAL 'E' -#define XPRS_RANGE 'R' -#define XPRS_NONBINDING 'N' -// *************************************************************************** -// * basis status * -// *************************************************************************** -#define XPRS_AT_LOWER 0 -#define XPRS_BASIC 1 -#define XPRS_AT_UPPER 2 -#define XPRS_FREE_SUPER 3 - -// Let's not reformat for rest of the file. -// NOLINTBEGIN(whitespace/line_length) -// clang-format off -extern std::function XPRScreateprob; -extern std::function XPRSdestroyprob; -extern std::function XPRSinit; -extern std::function XPRSfree; -extern std::function XPRSgetlicerrmsg; -extern std::function XPRSlicense; -extern std::function XPRSgetbanner; -extern std::function XPRSgetversion; -extern std::function XPRSsetprobname; -extern std::function XPRSsetdefaultcontrol; -extern std::function XPRSinterrupt; -extern std::function XPRSsetintcontrol; -extern std::function XPRSsetintcontrol64; -extern std::function XPRSsetdblcontrol; -extern std::function XPRSsetstrcontrol; -OR_DLL extern std::function XPRSgetintcontrol; -OR_DLL extern std::function XPRSgetintcontrol64; -OR_DLL extern std::function XPRSgetdblcontrol; -OR_DLL extern std::function XPRSgetstringcontrol; -OR_DLL extern std::function XPRSgetintattrib; -OR_DLL extern std::function XPRSgetstringattrib; -OR_DLL extern std::function XPRSgetdblattrib; -extern std::function XPRSgetcontrolinfo; -OR_DLL extern std::function XPRSgetobj; -OR_DLL extern std::function XPRSgetrhs; -OR_DLL extern std::function XPRSgetrhsrange; -OR_DLL extern std::function XPRSgetlb; -OR_DLL extern std::function XPRSgetub; -OR_DLL extern std::function XPRSgetcoef; -extern std::function XPRSgetduals; -extern std::function XPRSgetredcosts; -extern std::function XPRSaddrows; -extern std::function XPRSdelrows; -extern std::function XPRSaddcols; -extern std::function XPRSaddnames; -extern std::function XPRSgetnames; -extern std::function XPRSdelcols; -extern std::function XPRSchgcoltype; -extern std::function XPRSloadbasis; -extern std::function XPRSpostsolve; -extern std::function XPRSchgobjsense; -extern std::function XPRSgetlasterror; -extern std::function XPRSgetbasis; -extern std::function XPRSwriteprob; -OR_DLL extern std::function XPRSgetrowtype; -OR_DLL extern std::function XPRSgetcoltype; -extern std::function XPRSchgbounds; -extern std::function XPRSaddmipsol; -extern std::function XPRSgetlpsol; -extern std::function XPRSgetmipsol; -extern std::function XPRSchgobj; -extern std::function XPRSchgcoef; -extern std::function XPRSchgmcoef; -extern std::function XPRSchgmcoef64; -extern std::function XPRSchgmqobj; -extern std::function XPRSchgrhs; -extern std::function XPRSchgrhsrange; -extern std::function XPRSchgrowtype; -extern std::function XPRSdelobj; -extern std::function XPRSaddcbintsol; -extern std::function XPRSremovecbintsol; -extern std::function XPRSaddcbmessage; -extern std::function XPRSlpoptimize; -extern std::function XPRSmipoptimize; -extern std::function XPRSoptimize; -// clang-format on -// NOLINTEND(whitespace/line_length) - -} // namespace operations_research - -#endif // OR_TOOLS_XPRESS_ENVIRONMENT_H_ From b61ec9860ccb2b226587012980e468c4ea4dc8b0 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 18 Jun 2025 18:22:06 +0200 Subject: [PATCH 104/509] reorganize gurobi and xpress side loading --- cmake/cpp.cmake | 2 +- ortools/third_party_solvers/BUILD.bazel | 88 +++ ortools/third_party_solvers/CMakeLists.txt | 36 + ortools/third_party_solvers/dynamic_library.h | 120 +++ .../third_party_solvers/gurobi_environment.cc | 443 +++++++++++ .../third_party_solvers/gurobi_environment.h | 736 ++++++++++++++++++ .../gurobi_parse_header.py | 280 +++++++ .../third_party_solvers/xpress_environment.cc | 389 +++++++++ .../third_party_solvers/xpress_environment.h | 550 +++++++++++++ 9 files changed, 2643 insertions(+), 1 deletion(-) create mode 100644 ortools/third_party_solvers/BUILD.bazel create mode 100644 ortools/third_party_solvers/CMakeLists.txt create mode 100644 ortools/third_party_solvers/dynamic_library.h create mode 100644 ortools/third_party_solvers/gurobi_environment.cc create mode 100644 ortools/third_party_solvers/gurobi_environment.h create mode 100644 ortools/third_party_solvers/gurobi_parse_header.py create mode 100644 ortools/third_party_solvers/xpress_environment.cc create mode 100644 ortools/third_party_solvers/xpress_environment.h diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index c3fdb9ce8f..269275b0af 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -533,13 +533,13 @@ foreach(SUBPROJECT IN ITEMS ${GUROBI_DIR} ${PDLP_DIR} sat - xpress lp_data packing routing scheduling set_cover port + third_party_solvers util) add_subdirectory(ortools/${SUBPROJECT}) #target_link_libraries(${PROJECT_NAME} PRIVATE ${PROJECT_NAME}_${SUBPROJECT}) diff --git a/ortools/third_party_solvers/BUILD.bazel b/ortools/third_party_solvers/BUILD.bazel new file mode 100644 index 0000000000..a931395271 --- /dev/null +++ b/ortools/third_party_solvers/BUILD.bazel @@ -0,0 +1,88 @@ +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +package(default_visibility = ["//visibility:public"]) + +config_setting( + name = "on_linux", + constraint_values = ["@platforms//os:linux"], +) + +config_setting( + name = "on_macos", + constraint_values = ["@platforms//os:macos"], +) + +config_setting( + name = "on_windows", + constraint_values = ["@platforms//os:windows"], +) + +cc_library( + name = "dynamic_library", + hdrs = ["dynamic_library.h"], + linkopts = select({ + "on_linux": ["-Wl,--no-as-needed -ldl"], + "on_macos": [], + "on_windows": [], + "//conditions:default": [], + }), + deps = [ + "//ortools/base", + "//ortools/base:logging", + "@abseil-cpp//absl/strings", + ], +) + +cc_library( + name = "gurobi_environment", + srcs = [ + "gurobi_environment.cc", + ], + hdrs = [ + "gurobi_environment.h", + ], + deps = [ + ":dynamic_library", + "//ortools/base", + "//ortools/base:file", + "//ortools/base:status_macros", + "@abseil-cpp//absl/status", + "@abseil-cpp//absl/status:statusor", + "@abseil-cpp//absl/strings", + "@abseil-cpp//absl/synchronization", + "@abseil-cpp//absl/types:optional", + ], +) + +cc_library( + name = "xpress_environment", + srcs = [ + "xpress_environment.cc", + ], + hdrs = [ + "xpress_environment.h", + ], + deps = [ + ":dynamic_library", + "//ortools/base", + "//ortools/base:file", + "//ortools/base:status_macros", + "@abseil-cpp//absl/flags:flag", + "@abseil-cpp//absl/status", + "@abseil-cpp//absl/status:statusor", + "@abseil-cpp//absl/strings", + "@abseil-cpp//absl/synchronization", + "@abseil-cpp//absl/types:optional", + ], +) diff --git a/ortools/third_party_solvers/CMakeLists.txt b/ortools/third_party_solvers/CMakeLists.txt new file mode 100644 index 0000000000..ac5bf08d0d --- /dev/null +++ b/ortools/third_party_solvers/CMakeLists.txt @@ -0,0 +1,36 @@ +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +file(GLOB _SRCS "*.h" "*.cc") +set(NAME ${PROJECT_NAME}_third_party_solvers) + +add_library(${NAME} OBJECT ${_SRCS}) +set_target_properties(${NAME} PROPERTIES + CXX_STANDARD 17 + CXX_STANDARD_REQUIRED ON + CXX_EXTENSIONS OFF + POSITION_INDEPENDENT_CODE ON + ) +if(MSVC AND BUILD_SHARED_LIBS) + target_compile_definitions(${NAME} PUBLIC "OR_BUILD_DLL") + target_compile_definitions(${NAME} PRIVATE "OR_EXPORT") +endif() +target_include_directories(${NAME} PRIVATE + ${PROJECT_SOURCE_DIR} + ${PROJECT_BINARY_DIR}) +target_link_libraries(${NAME} PRIVATE + absl::status + absl::strings + absl::str_format + absl::synchronization + ) diff --git a/ortools/third_party_solvers/dynamic_library.h b/ortools/third_party_solvers/dynamic_library.h new file mode 100644 index 0000000000..c47a7e7130 --- /dev/null +++ b/ortools/third_party_solvers/dynamic_library.h @@ -0,0 +1,120 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ +#define OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ + +#include +#include + +#include "absl/strings/string_view.h" +#include "ortools/base/logging.h" + +#if defined(_MSC_VER) +#define WIN32_LEAN_AND_MEAN // disables several conflicting macros +#include +#elif defined(__MINGW32__) || defined(__MINGW64__) +#include +#elif defined(__GNUC__) +#include +#endif + +class DynamicLibrary { + public: + DynamicLibrary() : library_handle_(nullptr) {} + + ~DynamicLibrary() { + if (library_handle_ == nullptr) { + return; + } + +#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) + FreeLibrary(static_cast(library_handle_)); +#elif defined(__GNUC__) + dlclose(library_handle_); +#endif + } + + bool TryToLoad(const absl::string_view library_name) { + library_name_ = library_name; +#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) + library_handle_ = static_cast(LoadLibraryA(library_name_.c_str())); +#elif defined(__GNUC__) + library_handle_ = dlopen(library_name_.c_str(), RTLD_NOW); +#endif + return library_handle_ != nullptr; + } + + bool LibraryIsLoaded() const { return library_handle_ != nullptr; } + + template + std::function GetFunction(const char* function_name) { +#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) + // On Windows, avoid casting to void*: not supported by MinGW. + FARPROC function_address = + GetProcAddress(static_cast(library_handle_), function_name); +#else // Not Windows. + const void* function_address = dlsym(library_handle_, function_name); +#endif // MinGW. + + CHECK(function_address) + << "Error: could not find function " << std::string(function_name) + << " in " << library_name_; + + return TypeParser::CreateFunction(function_address); + } + + template + std::function GetFunction(const std::string& function_name) { + return GetFunction(function_name.c_str()); + } + + template + void GetFunction(std::function* function, const char* function_name) { + *function = GetFunction(function_name); + } + + template + void GetFunction(std::function* function, + const std::string function_name) { + GetFunction(function, function_name.c_str()); + } + + private: + void* library_handle_ = nullptr; + std::string library_name_; + + template + struct TypeParser {}; + + template + struct TypeParser { +#if defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__) + // Windows: take a FARPROC as argument. + static std::function CreateFunction( + const FARPROC function_address) { + return std::function( + reinterpret_cast(function_address)); + } +#else + // Not Windows: take a void* as argument. + static std::function CreateFunction( + const void* function_address) { + return std::function(reinterpret_cast( + const_cast(function_address))); + } +#endif + }; +}; + +#endif // OR_TOOLS_BASE_DYNAMIC_LIBRARY_H_ diff --git a/ortools/third_party_solvers/gurobi_environment.cc b/ortools/third_party_solvers/gurobi_environment.cc new file mode 100644 index 0000000000..ba2d3ae212 --- /dev/null +++ b/ortools/third_party_solvers/gurobi_environment.cc @@ -0,0 +1,443 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/third_party_solvers/gurobi_environment.h" + +#include +#include +#include +#include + +#include "absl/status/status.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "ortools/base/logging.h" +#include "ortools/third_party_solvers/dynamic_library.h" + +namespace operations_research { + +// This was generated with the parse_header.py script. +// See the comment at the top of the script. +// Let's not reformat the rest of the file. +// clang-format off + +// This is the 'define' section. +std::function GRBisattravailable = + nullptr; +std::function + GRBgetintattr = nullptr; +std::function + GRBsetintattr = nullptr; +std::function + GRBgetintattrelement = nullptr; +std::function + GRBsetintattrelement = nullptr; +std::function + GRBgetintattrarray = nullptr; +std::function + GRBsetintattrarray = nullptr; +std::function + GRBsetintattrlist = nullptr; +std::function + GRBgetcharattrelement = nullptr; +std::function + GRBsetcharattrelement = nullptr; +std::function + GRBgetcharattrarray = nullptr; +std::function + GRBsetcharattrarray = nullptr; +std::function + GRBsetcharattrlist = nullptr; +std::function + GRBgetdblattr = nullptr; +std::function + GRBsetdblattr = nullptr; +std::function + GRBgetdblattrelement = nullptr; +std::function + GRBsetdblattrelement = nullptr; +std::function + GRBgetdblattrarray = nullptr; +std::function + GRBsetdblattrarray = nullptr; +std::function + GRBsetdblattrlist = nullptr; +std::function + GRBgetstrattr = nullptr; +std::function + GRBsetstrattr = nullptr; +std::function + GRBsetcallbackfunc = nullptr; +std::function GRBcbget = + nullptr; +std::function + GRBcbsolution = nullptr; +std::function + GRBcbcut = nullptr; +std::function + GRBcblazy = nullptr; +std::function + GRBgetvars = nullptr; +std::function GRBoptimize = nullptr; +std::function GRBcomputeIIS = nullptr; +std::function GRBwrite = nullptr; +std::function + GRBnewmodel = nullptr; +std::function + GRBaddvar = nullptr; +std::function + GRBaddvars = nullptr; +std::function + GRBaddconstr = nullptr; +std::function + GRBaddconstrs = nullptr; +std::function + GRBaddrangeconstr = nullptr; +std::function + GRBaddsos = nullptr; +std::function + GRBaddgenconstrMax = nullptr; +std::function + GRBaddgenconstrMin = nullptr; +std::function + GRBaddgenconstrAbs = nullptr; +std::function + GRBaddgenconstrAnd = nullptr; +std::function + GRBaddgenconstrOr = nullptr; +std::function + GRBaddgenconstrIndicator = nullptr; +std::function + GRBaddqconstr = nullptr; +std::function + GRBaddqpterms = nullptr; +std::function GRBdelvars = nullptr; +std::function GRBdelconstrs = nullptr; +std::function GRBdelsos = nullptr; +std::function GRBdelgenconstrs = + nullptr; +std::function GRBdelqconstrs = nullptr; +std::function GRBdelq = nullptr; +std::function + GRBchgcoeffs = nullptr; +std::function GRBupdatemodel = nullptr; +std::function GRBfreemodel = nullptr; +std::function GRBterminate = nullptr; +std::function + GRBsetobjectiven = nullptr; +std::function + GRBgetintparam = nullptr; +std::function + GRBgetdblparam = nullptr; +std::function + GRBgetstrparam = nullptr; +std::function + GRBgetintparaminfo = nullptr; +std::function + GRBgetdblparaminfo = nullptr; +std::function + GRBgetstrparaminfo = nullptr; +std::function GRBgetparamtype = + nullptr; +std::function GRBgetparamname = + nullptr; +std::function + GRBsetparam = nullptr; +std::function + GRBsetintparam = nullptr; +std::function + GRBsetdblparam = nullptr; +std::function + GRBsetstrparam = nullptr; +std::function GRBresetparams = nullptr; +std::function GRBcopyparams = nullptr; +std::function GRBgetnumparams = nullptr; +std::function GRBemptyenv = nullptr; +std::function GRBloadenv = nullptr; +std::function GRBstartenv = nullptr; +std::function GRBgetenv = nullptr; +std::function GRBgetmultiobjenv = nullptr; +std::function GRBdiscardmultiobjenvs = nullptr; +std::function GRBfreeenv = nullptr; +std::function GRBgeterrormsg = nullptr; +std::function GRBversion = + nullptr; +std::function GRBplatform = nullptr; + +void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { + // This was generated with the parse_header.py script. + // See the comment at the top of the script. + + // This is the 'assign' section. + gurobi_dynamic_library->GetFunction(&GRBisattravailable, + "GRBisattravailable"); + gurobi_dynamic_library->GetFunction(&GRBgetintattr, "GRBgetintattr"); + gurobi_dynamic_library->GetFunction(&GRBsetintattr, "GRBsetintattr"); + gurobi_dynamic_library->GetFunction(&GRBgetintattrelement, + "GRBgetintattrelement"); + gurobi_dynamic_library->GetFunction(&GRBsetintattrelement, + "GRBsetintattrelement"); + gurobi_dynamic_library->GetFunction(&GRBgetintattrarray, + "GRBgetintattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetintattrarray, + "GRBsetintattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetintattrlist, "GRBsetintattrlist"); + gurobi_dynamic_library->GetFunction(&GRBgetcharattrelement, + "GRBgetcharattrelement"); + gurobi_dynamic_library->GetFunction(&GRBsetcharattrelement, + "GRBsetcharattrelement"); + gurobi_dynamic_library->GetFunction(&GRBgetcharattrarray, + "GRBgetcharattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetcharattrarray, + "GRBsetcharattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetcharattrlist, + "GRBsetcharattrlist"); + gurobi_dynamic_library->GetFunction(&GRBgetdblattr, "GRBgetdblattr"); + gurobi_dynamic_library->GetFunction(&GRBsetdblattr, "GRBsetdblattr"); + gurobi_dynamic_library->GetFunction(&GRBgetdblattrelement, + "GRBgetdblattrelement"); + gurobi_dynamic_library->GetFunction(&GRBsetdblattrelement, + "GRBsetdblattrelement"); + gurobi_dynamic_library->GetFunction(&GRBgetdblattrarray, + "GRBgetdblattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetdblattrarray, + "GRBsetdblattrarray"); + gurobi_dynamic_library->GetFunction(&GRBsetdblattrlist, "GRBsetdblattrlist"); + gurobi_dynamic_library->GetFunction(&GRBgetstrattr, "GRBgetstrattr"); + gurobi_dynamic_library->GetFunction(&GRBsetstrattr, "GRBsetstrattr"); + gurobi_dynamic_library->GetFunction(&GRBsetcallbackfunc, + "GRBsetcallbackfunc"); + gurobi_dynamic_library->GetFunction(&GRBcbget, "GRBcbget"); + gurobi_dynamic_library->GetFunction(&GRBcbsolution, "GRBcbsolution"); + gurobi_dynamic_library->GetFunction(&GRBcbcut, "GRBcbcut"); + gurobi_dynamic_library->GetFunction(&GRBcblazy, "GRBcblazy"); + gurobi_dynamic_library->GetFunction(&GRBgetvars, "GRBgetvars"); + gurobi_dynamic_library->GetFunction(&GRBoptimize, "GRBoptimize"); + gurobi_dynamic_library->GetFunction(&GRBcomputeIIS, "GRBcomputeIIS"); + gurobi_dynamic_library->GetFunction(&GRBwrite, "GRBwrite"); + gurobi_dynamic_library->GetFunction(&GRBnewmodel, "GRBnewmodel"); + gurobi_dynamic_library->GetFunction(&GRBaddvar, "GRBaddvar"); + gurobi_dynamic_library->GetFunction(&GRBaddvars, "GRBaddvars"); + gurobi_dynamic_library->GetFunction(&GRBaddconstr, "GRBaddconstr"); + gurobi_dynamic_library->GetFunction(&GRBaddconstrs, "GRBaddconstrs"); + gurobi_dynamic_library->GetFunction(&GRBaddrangeconstr, "GRBaddrangeconstr"); + gurobi_dynamic_library->GetFunction(&GRBaddsos, "GRBaddsos"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrMax, + "GRBaddgenconstrMax"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrMin, + "GRBaddgenconstrMin"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrAbs, + "GRBaddgenconstrAbs"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrAnd, + "GRBaddgenconstrAnd"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrOr, "GRBaddgenconstrOr"); + gurobi_dynamic_library->GetFunction(&GRBaddgenconstrIndicator, + "GRBaddgenconstrIndicator"); + gurobi_dynamic_library->GetFunction(&GRBaddqconstr, "GRBaddqconstr"); + gurobi_dynamic_library->GetFunction(&GRBaddqpterms, "GRBaddqpterms"); + gurobi_dynamic_library->GetFunction(&GRBdelvars, "GRBdelvars"); + gurobi_dynamic_library->GetFunction(&GRBdelconstrs, "GRBdelconstrs"); + gurobi_dynamic_library->GetFunction(&GRBdelsos, "GRBdelsos"); + gurobi_dynamic_library->GetFunction(&GRBdelgenconstrs, "GRBdelgenconstrs"); + gurobi_dynamic_library->GetFunction(&GRBdelqconstrs, "GRBdelqconstrs"); + gurobi_dynamic_library->GetFunction(&GRBdelq, "GRBdelq"); + gurobi_dynamic_library->GetFunction(&GRBchgcoeffs, "GRBchgcoeffs"); + gurobi_dynamic_library->GetFunction(&GRBupdatemodel, "GRBupdatemodel"); + gurobi_dynamic_library->GetFunction(&GRBfreemodel, "GRBfreemodel"); + gurobi_dynamic_library->GetFunction(&GRBterminate, "GRBterminate"); + gurobi_dynamic_library->GetFunction(&GRBsetobjectiven, "GRBsetobjectiven"); + gurobi_dynamic_library->GetFunction(&GRBgetintparam, "GRBgetintparam"); + gurobi_dynamic_library->GetFunction(&GRBgetdblparam, "GRBgetdblparam"); + gurobi_dynamic_library->GetFunction(&GRBgetstrparam, "GRBgetstrparam"); + gurobi_dynamic_library->GetFunction(&GRBsetparam, "GRBsetparam"); + gurobi_dynamic_library->GetFunction(&GRBsetintparam, "GRBsetintparam"); + gurobi_dynamic_library->GetFunction(&GRBsetdblparam, "GRBsetdblparam"); + gurobi_dynamic_library->GetFunction(&GRBsetstrparam, "GRBsetstrparam"); + gurobi_dynamic_library->GetFunction(&GRBresetparams, "GRBresetparams"); + gurobi_dynamic_library->GetFunction(&GRBcopyparams, "GRBcopyparams"); + gurobi_dynamic_library->GetFunction(&GRBloadenv, "GRBloadenv"); + gurobi_dynamic_library->GetFunction(&GRBstartenv, "GRBstartenv"); + gurobi_dynamic_library->GetFunction(&GRBemptyenv, "GRBemptyenv"); + gurobi_dynamic_library->GetFunction(&GRBgetnumparams, "GRBgetnumparams"); + gurobi_dynamic_library->GetFunction(&GRBgetparamname, "GRBgetparamname"); + gurobi_dynamic_library->GetFunction(&GRBgetparamtype, "GRBgetparamtype"); + gurobi_dynamic_library->GetFunction(&GRBgetintparaminfo, + "GRBgetintparaminfo"); + gurobi_dynamic_library->GetFunction(&GRBgetdblparaminfo, + "GRBgetdblparaminfo"); + gurobi_dynamic_library->GetFunction(&GRBgetstrparaminfo, + "GRBgetstrparaminfo"); + gurobi_dynamic_library->GetFunction(&GRBgetenv, "GRBgetenv"); + gurobi_dynamic_library->GetFunction(&GRBgetmultiobjenv, "GRBgetmultiobjenv"); + gurobi_dynamic_library->GetFunction(&GRBdiscardmultiobjenvs, + "GRBdiscardmultiobjenvs"); + gurobi_dynamic_library->GetFunction(&GRBfreeenv, "GRBfreeenv"); + gurobi_dynamic_library->GetFunction(&GRBgeterrormsg, "GRBgeterrormsg"); + gurobi_dynamic_library->GetFunction(&GRBversion, "GRBversion"); + gurobi_dynamic_library->GetFunction(&GRBplatform, "GRBplatform"); +} + +std::vector GurobiDynamicLibraryPotentialPaths() { + std::vector potential_paths; + const std::vector kGurobiVersions = { + "1202", "1201", "1200", "1103", "1102", "1101", "1100", "1003", + "1002", "1001", "1000", "952", "951", "950", "911", + "910", "903", "902", "811", "801", "752"}; + potential_paths.reserve(kGurobiVersions.size() * 3); + + // Look for libraries pointed by GUROBI_HOME first. + const char* gurobi_home_from_env = getenv("GUROBI_HOME"); + if (gurobi_home_from_env != nullptr) { + for (const absl::string_view version : kGurobiVersions) { + const absl::string_view lib = version.substr(0, version.size() - 1); +#if defined(_MSC_VER) // Windows + potential_paths.push_back( + absl::StrCat(gurobi_home_from_env, "\\bin\\gurobi", lib, ".dll")); +#elif defined(__APPLE__) // OS X + potential_paths.push_back( + absl::StrCat(gurobi_home_from_env, "/lib/libgurobi", lib, ".dylib")); +#elif defined(__GNUC__) // Linux + potential_paths.push_back( + absl::StrCat(gurobi_home_from_env, "/lib/libgurobi", lib, ".so")); + potential_paths.push_back( + absl::StrCat(gurobi_home_from_env, "/lib64/libgurobi", lib, ".so")); +#else + LOG(ERROR) << "OS Not recognized by gurobi_environment.cc." + << " You won't be able to use Gurobi."; +#endif + } + } + + // Search for canonical places. + for (const absl::string_view version : kGurobiVersions) { + const absl::string_view lib = version.substr(0, version.size() - 1); +#if defined(_MSC_VER) // Windows + potential_paths.push_back(absl::StrCat("C:\\Program Files\\gurobi", version, + "\\win64\\bin\\gurobi", lib, + ".dll")); + potential_paths.push_back(absl::StrCat( + "C:\\gurobi", version, "\\win64\\bin\\gurobi", lib, ".dll")); + potential_paths.push_back(absl::StrCat("gurobi", lib, ".dll")); +#elif defined(__APPLE__) // OS X + potential_paths.push_back(absl::StrCat( + "/Library/gurobi", version, "/mac64/lib/libgurobi", lib, ".dylib")); + potential_paths.push_back(absl::StrCat("/Library/gurobi", version, + "/macos_universal2/lib/libgurobi", + lib, ".dylib")); +#elif defined(__GNUC__) // Linux + potential_paths.push_back(absl::StrCat( + "/opt/gurobi", version, "/linux64/lib/libgurobi", lib, ".so")); + potential_paths.push_back(absl::StrCat( + "/opt/gurobi", version, "/linux64/lib64/libgurobi", lib, ".so")); + potential_paths.push_back( + absl::StrCat("/opt/gurobi/linux64/lib/libgurobi", lib, ".so")); + potential_paths.push_back( + absl::StrCat("/opt/gurobi/linux64/lib64/libgurobi", lib, ".so")); +#else + LOG(ERROR) << "OS Not recognized by gurobi_environment.cc." + << " You won't be able to use Gurobi."; +#endif + } + +#if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. + for (const absl::string_view version : + {"12.0.2","12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", + "10.0.3", "10.0.2", "10.0.1", "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { + potential_paths.push_back( + absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); + } +#endif + return potential_paths; +} + +absl::Status LoadGurobiDynamicLibrary( + std::vector potential_paths) { + static std::once_flag gurobi_loading_done; + static absl::Status gurobi_load_status; + static DynamicLibrary gurobi_library; + static absl::Mutex mutex; + + absl::MutexLock lock(&mutex); + + std::call_once(gurobi_loading_done, [&potential_paths]() { + const std::vector canonical_paths = + GurobiDynamicLibraryPotentialPaths(); + potential_paths.insert(potential_paths.end(), canonical_paths.begin(), + canonical_paths.end()); + for (const absl::string_view path : potential_paths) { + if (gurobi_library.TryToLoad(path)) { + LOG(INFO) << "Found the Gurobi library in '" << path << "."; + break; + } + } + + if (gurobi_library.LibraryIsLoaded()) { + LoadGurobiFunctions(&gurobi_library); + gurobi_load_status = absl::OkStatus(); + } else { + gurobi_load_status = absl::NotFoundError(absl::StrCat( + "Could not find the Gurobi shared library. Looked in: [", + absl::StrJoin(potential_paths, "', '"), + "]. If you know where it" + " is, pass the full path to 'LoadGurobiDynamicLibrary()'.")); + } + }); + return gurobi_load_status; +} + +} // namespace operations_research diff --git a/ortools/third_party_solvers/gurobi_environment.h b/ortools/third_party_solvers/gurobi_environment.h new file mode 100644 index 0000000000..6bb4e10ec4 --- /dev/null +++ b/ortools/third_party_solvers/gurobi_environment.h @@ -0,0 +1,736 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_GUROBI_ENVIRONMENT_H_ +#define THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_GUROBI_ENVIRONMENT_H_ + +#include +#include + +#include "absl/status/status.h" +#include "absl/strings/string_view.h" + +#if defined(_MSC_VER) +#define GUROBI_STDCALL __stdcall +#else +#define GUROBI_STDCALL +#endif + +extern "C" { + +typedef struct _GRBmodel GRBmodel; +typedef struct _GRBenv GRBenv; +typedef struct _GRBsvec { + int len; + int* ind; + double* val; +} GRBsvec; +} + +namespace operations_research { + +// Force the loading of the gurobi dynamic library. It returns true if the +// library was successfully loaded. This method can only be called once. +// Successive calls are no-op. +// +// Note that it does not check if a token license can be grabbed. +absl::Status LoadGurobiDynamicLibrary( + std::vector potential_paths); + +// clang-format off + +// The list of #define and extern std::function<> below is generated directly +// from gurobi_c.h via parse_header.py +// See the top comment on the parse_header.py file. +// This is the header section + +#define GRB_VERSION_MAJOR 10 +#define GRB_VERSION_MINOR 0 +#define GRB_VERSION_TECHNICAL 0 +#define DEFAULT_CS_PRIORITY 0 +#define MAX_CS_PRIORITY 100 +#define DEFAULT_CS_PORT 61000 +#define DEFAULT_CS_HANGUP 60 +#define GRB_ERROR_OUT_OF_MEMORY 10001 +#define GRB_ERROR_NULL_ARGUMENT 10002 +#define GRB_ERROR_INVALID_ARGUMENT 10003 +#define GRB_ERROR_UNKNOWN_ATTRIBUTE 10004 +#define GRB_ERROR_DATA_NOT_AVAILABLE 10005 +#define GRB_ERROR_INDEX_OUT_OF_RANGE 10006 +#define GRB_ERROR_UNKNOWN_PARAMETER 10007 +#define GRB_ERROR_VALUE_OUT_OF_RANGE 10008 +#define GRB_ERROR_NO_LICENSE 10009 +#define GRB_ERROR_SIZE_LIMIT_EXCEEDED 10010 +#define GRB_ERROR_CALLBACK 10011 +#define GRB_ERROR_FILE_READ 10012 +#define GRB_ERROR_FILE_WRITE 10013 +#define GRB_ERROR_NUMERIC 10014 +#define GRB_ERROR_IIS_NOT_INFEASIBLE 10015 +#define GRB_ERROR_NOT_FOR_MIP 10016 +#define GRB_ERROR_OPTIMIZATION_IN_PROGRESS 10017 +#define GRB_ERROR_DUPLICATES 10018 +#define GRB_ERROR_NODEFILE 10019 +#define GRB_ERROR_Q_NOT_PSD 10020 +#define GRB_ERROR_QCP_EQUALITY_CONSTRAINT 10021 +#define GRB_ERROR_NETWORK 10022 +#define GRB_ERROR_JOB_REJECTED 10023 +#define GRB_ERROR_NOT_SUPPORTED 10024 +#define GRB_ERROR_EXCEED_2B_NONZEROS 10025 +#define GRB_ERROR_INVALID_PIECEWISE_OBJ 10026 +#define GRB_ERROR_UPDATEMODE_CHANGE 10027 +#define GRB_ERROR_CLOUD 10028 +#define GRB_ERROR_MODEL_MODIFICATION 10029 +#define GRB_ERROR_CSWORKER 10030 +#define GRB_ERROR_TUNE_MODEL_TYPES 10031 +#define GRB_ERROR_SECURITY 10032 +#define GRB_LESS_EQUAL '<' +#define GRB_GREATER_EQUAL '>' +#define GRB_EQUAL '=' +#define GRB_CONTINUOUS 'C' +#define GRB_BINARY 'B' +#define GRB_INTEGER 'I' +#define GRB_SEMICONT 'S' +#define GRB_SEMIINT 'N' +#define GRB_MINIMIZE 1 +#define GRB_MAXIMIZE -1 +#define GRB_SOS_TYPE1 1 +#define GRB_SOS_TYPE2 2 +#define GRB_INFINITY 1e100 +#define GRB_UNDEFINED 1e101 +#define GRB_MAXINT 2000000000 +#define GRB_MAX_NAMELEN 255 +#define GRB_MAX_STRLEN 512 +#define GRB_MAX_TAGLEN 10240 +#define GRB_MAX_CONCURRENT 64 +#define CB_ARGS GRBmodel *model, void *cbdata, int where, void *usrdata +#define LOGCB_ARGS char *msg, void *logdata +extern std::function GRBisattravailable; +extern std::function GRBgetintattr; +extern std::function GRBsetintattr; +extern std::function GRBgetintattrelement; +extern std::function GRBsetintattrelement; +extern std::function GRBgetintattrarray; +extern std::function GRBsetintattrarray; +extern std::function GRBsetintattrlist; +extern std::function GRBgetcharattrelement; +extern std::function GRBsetcharattrelement; +extern std::function GRBgetcharattrarray; +extern std::function GRBsetcharattrarray; +extern std::function GRBsetcharattrlist; +extern std::function GRBgetdblattr; +extern std::function GRBsetdblattr; +extern std::function GRBgetdblattrelement; +extern std::function GRBsetdblattrelement; +extern std::function GRBgetdblattrarray; +extern std::function GRBsetdblattrarray; +extern std::function GRBsetdblattrlist; +extern std::function GRBgetstrattr; +extern std::function GRBsetstrattr; +extern std::function GRBsetcallbackfunc; +extern std::function GRBcbget; +extern std::function GRBcbsolution; +extern std::function GRBcbcut; +extern std::function GRBcblazy; +#define GRB_INT_ATTR_NUMCONSTRS "NumConstrs" +#define GRB_INT_ATTR_NUMVARS "NumVars" +#define GRB_INT_ATTR_NUMSOS "NumSOS" +#define GRB_INT_ATTR_NUMQCONSTRS "NumQConstrs" +#define GRB_INT_ATTR_NUMGENCONSTRS "NumGenConstrs" +#define GRB_INT_ATTR_NUMNZS "NumNZs" +#define GRB_DBL_ATTR_DNUMNZS "DNumNZs" +#define GRB_INT_ATTR_NUMQNZS "NumQNZs" +#define GRB_INT_ATTR_NUMQCNZS "NumQCNZs" +#define GRB_INT_ATTR_NUMINTVARS "NumIntVars" +#define GRB_INT_ATTR_NUMBINVARS "NumBinVars" +#define GRB_INT_ATTR_NUMPWLOBJVARS "NumPWLObjVars" +#define GRB_STR_ATTR_MODELNAME "ModelName" +#define GRB_INT_ATTR_MODELSENSE "ModelSense" +#define GRB_DBL_ATTR_OBJCON "ObjCon" +#define GRB_INT_ATTR_IS_MIP "IsMIP" +#define GRB_INT_ATTR_IS_QP "IsQP" +#define GRB_INT_ATTR_IS_QCP "IsQCP" +#define GRB_INT_ATTR_IS_MULTIOBJ "IsMultiObj" +#define GRB_INT_ATTR_LICENSE_EXPIRATION "LicenseExpiration" +#define GRB_INT_ATTR_NUMTAGGED "NumTagged" +#define GRB_INT_ATTR_FINGERPRINT "Fingerprint" +#define GRB_INT_ATTR_BATCHERRORCODE "BatchErrorCode" +#define GRB_STR_ATTR_BATCHERRORMESSAGE "BatchErrorMessage" +#define GRB_STR_ATTR_BATCHID "BatchID" +#define GRB_INT_ATTR_BATCHSTATUS "BatchStatus" +#define GRB_DBL_ATTR_LB "LB" +#define GRB_DBL_ATTR_UB "UB" +#define GRB_DBL_ATTR_OBJ "Obj" +#define GRB_CHAR_ATTR_VTYPE "VType" +#define GRB_DBL_ATTR_START "Start" +#define GRB_DBL_ATTR_PSTART "PStart" +#define GRB_INT_ATTR_BRANCHPRIORITY "BranchPriority" +#define GRB_STR_ATTR_VARNAME "VarName" +#define GRB_INT_ATTR_PWLOBJCVX "PWLObjCvx" +#define GRB_DBL_ATTR_VARHINTVAL "VarHintVal" +#define GRB_INT_ATTR_VARHINTPRI "VarHintPri" +#define GRB_INT_ATTR_PARTITION "Partition" +#define GRB_INT_ATTR_POOLIGNORE "PoolIgnore" +#define GRB_STR_ATTR_VTAG "VTag" +#define GRB_STR_ATTR_CTAG "CTag" +#define GRB_DBL_ATTR_RHS "RHS" +#define GRB_DBL_ATTR_DSTART "DStart" +#define GRB_CHAR_ATTR_SENSE "Sense" +#define GRB_STR_ATTR_CONSTRNAME "ConstrName" +#define GRB_INT_ATTR_LAZY "Lazy" +#define GRB_STR_ATTR_QCTAG "QCTag" +#define GRB_DBL_ATTR_QCRHS "QCRHS" +#define GRB_CHAR_ATTR_QCSENSE "QCSense" +#define GRB_STR_ATTR_QCNAME "QCName" +#define GRB_INT_ATTR_GENCONSTRTYPE "GenConstrType" +#define GRB_STR_ATTR_GENCONSTRNAME "GenConstrName" +#define GRB_INT_ATTR_FUNCPIECES "FuncPieces" +#define GRB_DBL_ATTR_FUNCPIECEERROR "FuncPieceError" +#define GRB_DBL_ATTR_FUNCPIECELENGTH "FuncPieceLength" +#define GRB_DBL_ATTR_FUNCPIECERATIO "FuncPieceRatio" +#define GRB_DBL_ATTR_MAX_COEFF "MaxCoeff" +#define GRB_DBL_ATTR_MIN_COEFF "MinCoeff" +#define GRB_DBL_ATTR_MAX_BOUND "MaxBound" +#define GRB_DBL_ATTR_MIN_BOUND "MinBound" +#define GRB_DBL_ATTR_MAX_OBJ_COEFF "MaxObjCoeff" +#define GRB_DBL_ATTR_MIN_OBJ_COEFF "MinObjCoeff" +#define GRB_DBL_ATTR_MAX_RHS "MaxRHS" +#define GRB_DBL_ATTR_MIN_RHS "MinRHS" +#define GRB_DBL_ATTR_MAX_QCCOEFF "MaxQCCoeff" +#define GRB_DBL_ATTR_MIN_QCCOEFF "MinQCCoeff" +#define GRB_DBL_ATTR_MAX_QOBJ_COEFF "MaxQObjCoeff" +#define GRB_DBL_ATTR_MIN_QOBJ_COEFF "MinQObjCoeff" +#define GRB_DBL_ATTR_MAX_QCLCOEFF "MaxQCLCoeff" +#define GRB_DBL_ATTR_MIN_QCLCOEFF "MinQCLCoeff" +#define GRB_DBL_ATTR_MAX_QCRHS "MaxQCRHS" +#define GRB_DBL_ATTR_MIN_QCRHS "MinQCRHS" +#define GRB_DBL_ATTR_RUNTIME "Runtime" +#define GRB_DBL_ATTR_WORK "Work" +#define GRB_INT_ATTR_STATUS "Status" +#define GRB_DBL_ATTR_OBJVAL "ObjVal" +#define GRB_DBL_ATTR_OBJBOUND "ObjBound" +#define GRB_DBL_ATTR_OBJBOUNDC "ObjBoundC" +#define GRB_DBL_ATTR_POOLOBJBOUND "PoolObjBound" +#define GRB_DBL_ATTR_POOLOBJVAL "PoolObjVal" +#define GRB_DBL_ATTR_MIPGAP "MIPGap" +#define GRB_INT_ATTR_SOLCOUNT "SolCount" +#define GRB_DBL_ATTR_ITERCOUNT "IterCount" +#define GRB_INT_ATTR_BARITERCOUNT "BarIterCount" +#define GRB_DBL_ATTR_NODECOUNT "NodeCount" +#define GRB_DBL_ATTR_OPENNODECOUNT "OpenNodeCount" +#define GRB_INT_ATTR_HASDUALNORM "HasDualNorm" +#define GRB_INT_ATTR_CONCURRENTWINMETHOD "ConcurrentWinMethod" +#define GRB_DBL_ATTR_X "X" +#define GRB_DBL_ATTR_XN "Xn" +#define GRB_DBL_ATTR_BARX "BarX" +#define GRB_DBL_ATTR_RC "RC" +#define GRB_DBL_ATTR_VDUALNORM "VDualNorm" +#define GRB_INT_ATTR_VBASIS "VBasis" +#define GRB_DBL_ATTR_PI "Pi" +#define GRB_DBL_ATTR_QCPI "QCPi" +#define GRB_DBL_ATTR_SLACK "Slack" +#define GRB_DBL_ATTR_QCSLACK "QCSlack" +#define GRB_DBL_ATTR_CDUALNORM "CDualNorm" +#define GRB_INT_ATTR_CBASIS "CBasis" +#define GRB_DBL_ATTR_MAX_VIO "MaxVio" +#define GRB_DBL_ATTR_BOUND_VIO "BoundVio" +#define GRB_DBL_ATTR_BOUND_SVIO "BoundSVio" +#define GRB_INT_ATTR_BOUND_VIO_INDEX "BoundVioIndex" +#define GRB_INT_ATTR_BOUND_SVIO_INDEX "BoundSVioIndex" +#define GRB_DBL_ATTR_BOUND_VIO_SUM "BoundVioSum" +#define GRB_DBL_ATTR_BOUND_SVIO_SUM "BoundSVioSum" +#define GRB_DBL_ATTR_CONSTR_VIO "ConstrVio" +#define GRB_DBL_ATTR_CONSTR_SVIO "ConstrSVio" +#define GRB_INT_ATTR_CONSTR_VIO_INDEX "ConstrVioIndex" +#define GRB_INT_ATTR_CONSTR_SVIO_INDEX "ConstrSVioIndex" +#define GRB_DBL_ATTR_CONSTR_VIO_SUM "ConstrVioSum" +#define GRB_DBL_ATTR_CONSTR_SVIO_SUM "ConstrSVioSum" +#define GRB_DBL_ATTR_CONSTR_RESIDUAL "ConstrResidual" +#define GRB_DBL_ATTR_CONSTR_SRESIDUAL "ConstrSResidual" +#define GRB_INT_ATTR_CONSTR_RESIDUAL_INDEX "ConstrResidualIndex" +#define GRB_INT_ATTR_CONSTR_SRESIDUAL_INDEX "ConstrSResidualIndex" +#define GRB_DBL_ATTR_CONSTR_RESIDUAL_SUM "ConstrResidualSum" +#define GRB_DBL_ATTR_CONSTR_SRESIDUAL_SUM "ConstrSResidualSum" +#define GRB_DBL_ATTR_DUAL_VIO "DualVio" +#define GRB_DBL_ATTR_DUAL_SVIO "DualSVio" +#define GRB_INT_ATTR_DUAL_VIO_INDEX "DualVioIndex" +#define GRB_INT_ATTR_DUAL_SVIO_INDEX "DualSVioIndex" +#define GRB_DBL_ATTR_DUAL_VIO_SUM "DualVioSum" +#define GRB_DBL_ATTR_DUAL_SVIO_SUM "DualSVioSum" +#define GRB_DBL_ATTR_DUAL_RESIDUAL "DualResidual" +#define GRB_DBL_ATTR_DUAL_SRESIDUAL "DualSResidual" +#define GRB_INT_ATTR_DUAL_RESIDUAL_INDEX "DualResidualIndex" +#define GRB_INT_ATTR_DUAL_SRESIDUAL_INDEX "DualSResidualIndex" +#define GRB_DBL_ATTR_DUAL_RESIDUAL_SUM "DualResidualSum" +#define GRB_DBL_ATTR_DUAL_SRESIDUAL_SUM "DualSResidualSum" +#define GRB_DBL_ATTR_INT_VIO "IntVio" +#define GRB_INT_ATTR_INT_VIO_INDEX "IntVioIndex" +#define GRB_DBL_ATTR_INT_VIO_SUM "IntVioSum" +#define GRB_DBL_ATTR_COMPL_VIO "ComplVio" +#define GRB_INT_ATTR_COMPL_VIO_INDEX "ComplVioIndex" +#define GRB_DBL_ATTR_COMPL_VIO_SUM "ComplVioSum" +#define GRB_DBL_ATTR_KAPPA "Kappa" +#define GRB_DBL_ATTR_KAPPA_EXACT "KappaExact" +#define GRB_DBL_ATTR_N2KAPPA "N2Kappa" +#define GRB_DBL_ATTR_SA_OBJLOW "SAObjLow" +#define GRB_DBL_ATTR_SA_OBJUP "SAObjUp" +#define GRB_DBL_ATTR_SA_LBLOW "SALBLow" +#define GRB_DBL_ATTR_SA_LBUP "SALBUp" +#define GRB_DBL_ATTR_SA_UBLOW "SAUBLow" +#define GRB_DBL_ATTR_SA_UBUP "SAUBUp" +#define GRB_DBL_ATTR_SA_RHSLOW "SARHSLow" +#define GRB_DBL_ATTR_SA_RHSUP "SARHSUp" +#define GRB_INT_ATTR_IIS_MINIMAL "IISMinimal" +#define GRB_INT_ATTR_IIS_LB "IISLB" +#define GRB_INT_ATTR_IIS_UB "IISUB" +#define GRB_INT_ATTR_IIS_CONSTR "IISConstr" +#define GRB_INT_ATTR_IIS_SOS "IISSOS" +#define GRB_INT_ATTR_IIS_QCONSTR "IISQConstr" +#define GRB_INT_ATTR_IIS_GENCONSTR "IISGenConstr" +#define GRB_INT_ATTR_IIS_LBFORCE "IISLBForce" +#define GRB_INT_ATTR_IIS_UBFORCE "IISUBForce" +#define GRB_INT_ATTR_IIS_CONSTRFORCE "IISConstrForce" +#define GRB_INT_ATTR_IIS_SOSFORCE "IISSOSForce" +#define GRB_INT_ATTR_IIS_QCONSTRFORCE "IISQConstrForce" +#define GRB_INT_ATTR_IIS_GENCONSTRFORCE "IISGenConstrForce" +#define GRB_INT_ATTR_TUNE_RESULTCOUNT "TuneResultCount" +#define GRB_DBL_ATTR_FARKASDUAL "FarkasDual" +#define GRB_DBL_ATTR_FARKASPROOF "FarkasProof" +#define GRB_DBL_ATTR_UNBDRAY "UnbdRay" +#define GRB_INT_ATTR_INFEASVAR "InfeasVar" +#define GRB_INT_ATTR_UNBDVAR "UnbdVar" +#define GRB_INT_ATTR_VARPRESTAT "VarPreStat" +#define GRB_DBL_ATTR_PREFIXVAL "PreFixVal" +#define GRB_DBL_ATTR_OBJN "ObjN" +#define GRB_DBL_ATTR_OBJNVAL "ObjNVal" +#define GRB_DBL_ATTR_OBJNCON "ObjNCon" +#define GRB_DBL_ATTR_OBJNWEIGHT "ObjNWeight" +#define GRB_INT_ATTR_OBJNPRIORITY "ObjNPriority" +#define GRB_DBL_ATTR_OBJNRELTOL "ObjNRelTol" +#define GRB_DBL_ATTR_OBJNABSTOL "ObjNAbsTol" +#define GRB_STR_ATTR_OBJNNAME "ObjNName" +#define GRB_DBL_ATTR_SCENNLB "ScenNLB" +#define GRB_DBL_ATTR_SCENNUB "ScenNUB" +#define GRB_DBL_ATTR_SCENNOBJ "ScenNObj" +#define GRB_DBL_ATTR_SCENNRHS "ScenNRHS" +#define GRB_STR_ATTR_SCENNNAME "ScenNName" +#define GRB_DBL_ATTR_SCENNX "ScenNX" +#define GRB_DBL_ATTR_SCENNOBJBOUND "ScenNObjBound" +#define GRB_DBL_ATTR_SCENNOBJVAL "ScenNObjVal" +#define GRB_INT_ATTR_NUMOBJ "NumObj" +#define GRB_INT_ATTR_NUMSCENARIOS "NumScenarios" +#define GRB_INT_ATTR_NUMSTART "NumStart" +#define GRB_GENCONSTR_MAX 0 +#define GRB_GENCONSTR_MIN 1 +#define GRB_GENCONSTR_ABS 2 +#define GRB_GENCONSTR_AND 3 +#define GRB_GENCONSTR_OR 4 +#define GRB_GENCONSTR_NORM 5 +#define GRB_GENCONSTR_INDICATOR 6 +#define GRB_GENCONSTR_PWL 7 +#define GRB_GENCONSTR_POLY 8 +#define GRB_GENCONSTR_EXP 9 +#define GRB_GENCONSTR_EXPA 10 +#define GRB_GENCONSTR_LOG 11 +#define GRB_GENCONSTR_LOGA 12 +#define GRB_GENCONSTR_POW 13 +#define GRB_GENCONSTR_SIN 14 +#define GRB_GENCONSTR_COS 15 +#define GRB_GENCONSTR_TAN 16 +#define GRB_GENCONSTR_LOGISTIC 17 +#define GRB_CB_POLLING 0 +#define GRB_CB_PRESOLVE 1 +#define GRB_CB_SIMPLEX 2 +#define GRB_CB_MIP 3 +#define GRB_CB_MIPSOL 4 +#define GRB_CB_MIPNODE 5 +#define GRB_CB_MESSAGE 6 +#define GRB_CB_BARRIER 7 +#define GRB_CB_MULTIOBJ 8 +#define GRB_CB_IIS 9 +#define GRB_CB_PRE_COLDEL 1000 +#define GRB_CB_PRE_ROWDEL 1001 +#define GRB_CB_PRE_SENCHG 1002 +#define GRB_CB_PRE_BNDCHG 1003 +#define GRB_CB_PRE_COECHG 1004 +#define GRB_CB_SPX_ITRCNT 2000 +#define GRB_CB_SPX_OBJVAL 2001 +#define GRB_CB_SPX_PRIMINF 2002 +#define GRB_CB_SPX_DUALINF 2003 +#define GRB_CB_SPX_ISPERT 2004 +#define GRB_CB_MIP_OBJBST 3000 +#define GRB_CB_MIP_OBJBND 3001 +#define GRB_CB_MIP_NODCNT 3002 +#define GRB_CB_MIP_SOLCNT 3003 +#define GRB_CB_MIP_CUTCNT 3004 +#define GRB_CB_MIP_NODLFT 3005 +#define GRB_CB_MIP_ITRCNT 3006 +#define GRB_CB_MIP_OPENSCENARIOS 3007 +#define GRB_CB_MIP_PHASE 3008 +#define GRB_CB_MIPSOL_SOL 4001 +#define GRB_CB_MIPSOL_OBJ 4002 +#define GRB_CB_MIPSOL_OBJBST 4003 +#define GRB_CB_MIPSOL_OBJBND 4004 +#define GRB_CB_MIPSOL_NODCNT 4005 +#define GRB_CB_MIPSOL_SOLCNT 4006 +#define GRB_CB_MIPSOL_OPENSCENARIOS 4007 +#define GRB_CB_MIPSOL_PHASE 4008 +#define GRB_CB_MIPNODE_STATUS 5001 +#define GRB_CB_MIPNODE_REL 5002 +#define GRB_CB_MIPNODE_OBJBST 5003 +#define GRB_CB_MIPNODE_OBJBND 5004 +#define GRB_CB_MIPNODE_NODCNT 5005 +#define GRB_CB_MIPNODE_SOLCNT 5006 +#define GRB_CB_MIPNODE_BRVAR 5007 +#define GRB_CB_MIPNODE_OPENSCENARIOS 5008 +#define GRB_CB_MIPNODE_PHASE 5009 +#define GRB_CB_MSG_STRING 6001 +#define GRB_CB_RUNTIME 6002 +#define GRB_CB_WORK 6003 +#define GRB_CB_BARRIER_ITRCNT 7001 +#define GRB_CB_BARRIER_PRIMOBJ 7002 +#define GRB_CB_BARRIER_DUALOBJ 7003 +#define GRB_CB_BARRIER_PRIMINF 7004 +#define GRB_CB_BARRIER_DUALINF 7005 +#define GRB_CB_BARRIER_COMPL 7006 +#define GRB_CB_MULTIOBJ_OBJCNT 8001 +#define GRB_CB_MULTIOBJ_SOLCNT 8002 +#define GRB_CB_MULTIOBJ_SOL 8003 +#define GRB_CB_IIS_CONSTRMIN 9001 +#define GRB_CB_IIS_CONSTRMAX 9002 +#define GRB_CB_IIS_CONSTRGUESS 9003 +#define GRB_CB_IIS_BOUNDMIN 9004 +#define GRB_CB_IIS_BOUNDMAX 9005 +#define GRB_CB_IIS_BOUNDGUESS 9006 +#define GRB_FEASRELAX_LINEAR 0 +#define GRB_FEASRELAX_QUADRATIC 1 +#define GRB_FEASRELAX_CARDINALITY 2 +extern std::function GRBgetvars; +extern std::function GRBoptimize; +extern std::function GRBcomputeIIS; +#define MALLOCCB_ARGS size_t size, void *syscbusrdata +#define CALLOCCB_ARGS size_t nmemb, size_t size, void *syscbusrdata +#define REALLOCCB_ARGS void *ptr, size_t size, void *syscbusrdata +#define FREECB_ARGS void *ptr, void *syscbusrdata +#define THREADCREATECB_ARGS void **threadP, void (*start_routine)(void *), void *arg, void *syscbusrdata +#define THREADJOINCB_ARGS void *thread, void *syscbusrdata +extern std::function GRBwrite; +extern std::function GRBnewmodel; +extern std::function GRBaddvar; +extern std::function GRBaddvars; +extern std::function GRBaddconstr; +extern std::function GRBaddconstrs; +extern std::function GRBaddrangeconstr; +extern std::function GRBaddsos; +extern std::function GRBaddgenconstrMax; +extern std::function GRBaddgenconstrMin; +extern std::function GRBaddgenconstrAbs; +extern std::function GRBaddgenconstrAnd; +extern std::function GRBaddgenconstrOr; +extern std::function GRBaddgenconstrIndicator; +extern std::function GRBaddqconstr; +extern std::function GRBaddqpterms; +extern std::function GRBdelvars; +extern std::function GRBdelconstrs; +extern std::function GRBdelsos; +extern std::function GRBdelgenconstrs; +extern std::function GRBdelqconstrs; +extern std::function GRBdelq; +extern std::function GRBchgcoeffs; +extern std::function GRBupdatemodel; +extern std::function GRBfreemodel; +#define GRB_LOADED 1 +#define GRB_OPTIMAL 2 +#define GRB_INFEASIBLE 3 +#define GRB_INF_OR_UNBD 4 +#define GRB_UNBOUNDED 5 +#define GRB_CUTOFF 6 +#define GRB_ITERATION_LIMIT 7 +#define GRB_NODE_LIMIT 8 +#define GRB_TIME_LIMIT 9 +#define GRB_SOLUTION_LIMIT 10 +#define GRB_INTERRUPTED 11 +#define GRB_NUMERIC 12 +#define GRB_SUBOPTIMAL 13 +#define GRB_INPROGRESS 14 +#define GRB_USER_OBJ_LIMIT 15 +#define GRB_WORK_LIMIT 16 +#define GRB_MEM_LIMIT 17 +#define GRB_BASIC 0 +#define GRB_NONBASIC_LOWER -1 +#define GRB_NONBASIC_UPPER -2 +#define GRB_SUPERBASIC -3 +#define GRB_INT_PAR_BARITERLIMIT "BarIterLimit" +#define GRB_DBL_PAR_CUTOFF "Cutoff" +#define GRB_DBL_PAR_ITERATIONLIMIT "IterationLimit" +#define GRB_DBL_PAR_NODELIMIT "NodeLimit" +#define GRB_INT_PAR_SOLUTIONLIMIT "SolutionLimit" +#define GRB_DBL_PAR_TIMELIMIT "TimeLimit" +#define GRB_DBL_PAR_WORKLIMIT "WorkLimit" +#define GRB_DBL_PAR_MEMLIMIT "MemLimit" +#define GRB_DBL_PAR_SOFTMEMLIMIT "SoftMemLimit" +#define GRB_DBL_PAR_BESTOBJSTOP "BestObjStop" +#define GRB_DBL_PAR_BESTBDSTOP "BestBdStop" +#define GRB_DBL_PAR_FEASIBILITYTOL "FeasibilityTol" +#define GRB_DBL_PAR_INTFEASTOL "IntFeasTol" +#define GRB_DBL_PAR_MARKOWITZTOL "MarkowitzTol" +#define GRB_DBL_PAR_MIPGAP "MIPGap" +#define GRB_DBL_PAR_MIPGAPABS "MIPGapAbs" +#define GRB_DBL_PAR_OPTIMALITYTOL "OptimalityTol" +#define GRB_DBL_PAR_PSDTOL "PSDTol" +#define GRB_INT_PAR_METHOD "Method" +#define GRB_DBL_PAR_PERTURBVALUE "PerturbValue" +#define GRB_DBL_PAR_OBJSCALE "ObjScale" +#define GRB_INT_PAR_SCALEFLAG "ScaleFlag" +#define GRB_INT_PAR_SIMPLEXPRICING "SimplexPricing" +#define GRB_INT_PAR_QUAD "Quad" +#define GRB_INT_PAR_NORMADJUST "NormAdjust" +#define GRB_INT_PAR_SIFTING "Sifting" +#define GRB_INT_PAR_SIFTMETHOD "SiftMethod" +#define GRB_INT_PAR_LPWARMSTART "LPWarmStart" +#define GRB_INT_PAR_NETWORKALG "NetworkAlg" +#define GRB_DBL_PAR_BARCONVTOL "BarConvTol" +#define GRB_INT_PAR_BARCORRECTORS "BarCorrectors" +#define GRB_INT_PAR_BARHOMOGENEOUS "BarHomogeneous" +#define GRB_INT_PAR_BARORDER "BarOrder" +#define GRB_DBL_PAR_BARQCPCONVTOL "BarQCPConvTol" +#define GRB_INT_PAR_CROSSOVER "Crossover" +#define GRB_INT_PAR_CROSSOVERBASIS "CrossoverBasis" +#define GRB_INT_PAR_BRANCHDIR "BranchDir" +#define GRB_INT_PAR_DEGENMOVES "DegenMoves" +#define GRB_INT_PAR_DISCONNECTED "Disconnected" +#define GRB_DBL_PAR_HEURISTICS "Heuristics" +#define GRB_DBL_PAR_IMPROVESTARTGAP "ImproveStartGap" +#define GRB_DBL_PAR_IMPROVESTARTTIME "ImproveStartTime" +#define GRB_DBL_PAR_IMPROVESTARTNODES "ImproveStartNodes" +#define GRB_INT_PAR_INTEGRALITYFOCUS "IntegralityFocus" +#define GRB_INT_PAR_MINRELNODES "MinRelNodes" +#define GRB_INT_PAR_MIPFOCUS "MIPFocus" +#define GRB_INT_PAR_NLPHEUR "NLPHeur" +#define GRB_STR_PAR_NODEFILEDIR "NodefileDir" +#define GRB_DBL_PAR_NODEFILESTART "NodefileStart" +#define GRB_INT_PAR_NODEMETHOD "NodeMethod" +#define GRB_DBL_PAR_NORELHEURTIME "NoRelHeurTime" +#define GRB_DBL_PAR_NORELHEURWORK "NoRelHeurWork" +#define GRB_INT_PAR_OBBT "OBBT" +#define GRB_INT_PAR_PUMPPASSES "PumpPasses" +#define GRB_INT_PAR_RINS "RINS" +#define GRB_STR_PAR_SOLFILES "SolFiles" +#define GRB_INT_PAR_STARTNODELIMIT "StartNodeLimit" +#define GRB_INT_PAR_SUBMIPNODES "SubMIPNodes" +#define GRB_INT_PAR_SYMMETRY "Symmetry" +#define GRB_INT_PAR_VARBRANCH "VarBranch" +#define GRB_INT_PAR_SOLUTIONNUMBER "SolutionNumber" +#define GRB_INT_PAR_ZEROOBJNODES "ZeroObjNodes" +#define GRB_INT_PAR_CUTS "Cuts" +#define GRB_INT_PAR_CLIQUECUTS "CliqueCuts" +#define GRB_INT_PAR_COVERCUTS "CoverCuts" +#define GRB_INT_PAR_FLOWCOVERCUTS "FlowCoverCuts" +#define GRB_INT_PAR_FLOWPATHCUTS "FlowPathCuts" +#define GRB_INT_PAR_GUBCOVERCUTS "GUBCoverCuts" +#define GRB_INT_PAR_IMPLIEDCUTS "ImpliedCuts" +#define GRB_INT_PAR_PROJIMPLIEDCUTS "ProjImpliedCuts" +#define GRB_INT_PAR_MIPSEPCUTS "MIPSepCuts" +#define GRB_INT_PAR_MIRCUTS "MIRCuts" +#define GRB_INT_PAR_STRONGCGCUTS "StrongCGCuts" +#define GRB_INT_PAR_MODKCUTS "ModKCuts" +#define GRB_INT_PAR_ZEROHALFCUTS "ZeroHalfCuts" +#define GRB_INT_PAR_NETWORKCUTS "NetworkCuts" +#define GRB_INT_PAR_SUBMIPCUTS "SubMIPCuts" +#define GRB_INT_PAR_INFPROOFCUTS "InfProofCuts" +#define GRB_INT_PAR_RLTCUTS "RLTCuts" +#define GRB_INT_PAR_RELAXLIFTCUTS "RelaxLiftCuts" +#define GRB_INT_PAR_BQPCUTS "BQPCuts" +#define GRB_INT_PAR_PSDCUTS "PSDCuts" +#define GRB_INT_PAR_LIFTPROJECTCUTS "LiftProjectCuts" +#define GRB_INT_PAR_CUTAGGPASSES "CutAggPasses" +#define GRB_INT_PAR_CUTPASSES "CutPasses" +#define GRB_INT_PAR_GOMORYPASSES "GomoryPasses" +#define GRB_STR_PAR_WORKERPOOL "WorkerPool" +#define GRB_STR_PAR_WORKERPASSWORD "WorkerPassword" +#define GRB_STR_PAR_COMPUTESERVER "ComputeServer" +#define GRB_STR_PAR_TOKENSERVER "TokenServer" +#define GRB_STR_PAR_SERVERPASSWORD "ServerPassword" +#define GRB_INT_PAR_SERVERTIMEOUT "ServerTimeout" +#define GRB_STR_PAR_CSROUTER "CSRouter" +#define GRB_STR_PAR_CSGROUP "CSGroup" +#define GRB_DBL_PAR_CSQUEUETIMEOUT "CSQueueTimeout" +#define GRB_INT_PAR_CSPRIORITY "CSPriority" +#define GRB_INT_PAR_CSIDLETIMEOUT "CSIdleTimeout" +#define GRB_INT_PAR_CSTLSINSECURE "CSTLSInsecure" +#define GRB_INT_PAR_TSPORT "TSPort" +#define GRB_STR_PAR_CLOUDACCESSID "CloudAccessID" +#define GRB_STR_PAR_CLOUDSECRETKEY "CloudSecretKey" +#define GRB_STR_PAR_CLOUDPOOL "CloudPool" +#define GRB_STR_PAR_CLOUDHOST "CloudHost" +#define GRB_STR_PAR_CSMANAGER "CSManager" +#define GRB_STR_PAR_CSAUTHTOKEN "CSAuthToken" +#define GRB_STR_PAR_CSAPIACCESSID "CSAPIAccessID" +#define GRB_STR_PAR_CSAPISECRET "CSAPISecret" +#define GRB_INT_PAR_CSBATCHMODE "CSBatchMode" +#define GRB_STR_PAR_USERNAME "Username" +#define GRB_STR_PAR_CSAPPNAME "CSAppName" +#define GRB_INT_PAR_CSCLIENTLOG "CSClientLog" +#define GRB_STR_PAR_WLSACCESSID "WLSAccessID" +#define GRB_STR_PAR_WLSSECRET "WLSSecret" +#define GRB_INT_PAR_WLSTOKENDURATION "WLSTokenDuration" +#define GRB_DBL_PAR_WLSTOKENREFRESH "WLSTokenRefresh" +#define GRB_STR_PAR_WLSTOKEN "WLSToken" +#define GRB_INT_PAR_LICENSEID "LicenseID" +#define GRB_INT_PAR_AGGREGATE "Aggregate" +#define GRB_INT_PAR_AGGFILL "AggFill" +#define GRB_INT_PAR_CONCURRENTMIP "ConcurrentMIP" +#define GRB_INT_PAR_CONCURRENTJOBS "ConcurrentJobs" +#define GRB_INT_PAR_DISPLAYINTERVAL "DisplayInterval" +#define GRB_INT_PAR_DISTRIBUTEDMIPJOBS "DistributedMIPJobs" +#define GRB_INT_PAR_DUALREDUCTIONS "DualReductions" +#define GRB_DBL_PAR_FEASRELAXBIGM "FeasRelaxBigM" +#define GRB_INT_PAR_IISMETHOD "IISMethod" +#define GRB_INT_PAR_INFUNBDINFO "InfUnbdInfo" +#define GRB_INT_PAR_JSONSOLDETAIL "JSONSolDetail" +#define GRB_INT_PAR_LAZYCONSTRAINTS "LazyConstraints" +#define GRB_STR_PAR_LOGFILE "LogFile" +#define GRB_INT_PAR_LOGTOCONSOLE "LogToConsole" +#define GRB_INT_PAR_MIQCPMETHOD "MIQCPMethod" +#define GRB_INT_PAR_NONCONVEX "NonConvex" +#define GRB_INT_PAR_NUMERICFOCUS "NumericFocus" +#define GRB_INT_PAR_OUTPUTFLAG "OutputFlag" +#define GRB_INT_PAR_PRECRUSH "PreCrush" +#define GRB_INT_PAR_PREDEPROW "PreDepRow" +#define GRB_INT_PAR_PREDUAL "PreDual" +#define GRB_INT_PAR_PREPASSES "PrePasses" +#define GRB_INT_PAR_PREQLINEARIZE "PreQLinearize" +#define GRB_INT_PAR_PRESOLVE "Presolve" +#define GRB_DBL_PAR_PRESOS1BIGM "PreSOS1BigM" +#define GRB_DBL_PAR_PRESOS2BIGM "PreSOS2BigM" +#define GRB_INT_PAR_PRESOS1ENCODING "PreSOS1Encoding" +#define GRB_INT_PAR_PRESOS2ENCODING "PreSOS2Encoding" +#define GRB_INT_PAR_PRESPARSIFY "PreSparsify" +#define GRB_INT_PAR_PREMIQCPFORM "PreMIQCPForm" +#define GRB_INT_PAR_QCPDUAL "QCPDual" +#define GRB_INT_PAR_RECORD "Record" +#define GRB_STR_PAR_RESULTFILE "ResultFile" +#define GRB_INT_PAR_SEED "Seed" +#define GRB_INT_PAR_SOLUTIONTARGET "SolutionTarget" +#define GRB_INT_PAR_THREADS "Threads" +#define GRB_DBL_PAR_TUNETIMELIMIT "TuneTimeLimit" +#define GRB_INT_PAR_TUNERESULTS "TuneResults" +#define GRB_INT_PAR_TUNECRITERION "TuneCriterion" +#define GRB_INT_PAR_TUNETRIALS "TuneTrials" +#define GRB_INT_PAR_TUNEOUTPUT "TuneOutput" +#define GRB_INT_PAR_TUNEJOBS "TuneJobs" +#define GRB_DBL_PAR_TUNECLEANUP "TuneCleanup" +#define GRB_DBL_PAR_TUNETARGETMIPGAP "TuneTargetMIPGap" +#define GRB_DBL_PAR_TUNETARGETTIME "TuneTargetTime" +#define GRB_INT_PAR_TUNEMETRIC "TuneMetric" +#define GRB_INT_PAR_UPDATEMODE "UpdateMode" +#define GRB_INT_PAR_OBJNUMBER "ObjNumber" +#define GRB_INT_PAR_MULTIOBJMETHOD "MultiObjMethod" +#define GRB_INT_PAR_MULTIOBJPRE "MultiObjPre" +#define GRB_INT_PAR_SCENARIONUMBER "ScenarioNumber" +#define GRB_INT_PAR_POOLSOLUTIONS "PoolSolutions" +#define GRB_DBL_PAR_POOLGAP "PoolGap" +#define GRB_DBL_PAR_POOLGAPABS "PoolGapAbs" +#define GRB_INT_PAR_POOLSEARCHMODE "PoolSearchMode" +#define GRB_INT_PAR_IGNORENAMES "IgnoreNames" +#define GRB_INT_PAR_STARTNUMBER "StartNumber" +#define GRB_INT_PAR_PARTITIONPLACE "PartitionPlace" +#define GRB_INT_PAR_FUNCPIECES "FuncPieces" +#define GRB_DBL_PAR_FUNCPIECELENGTH "FuncPieceLength" +#define GRB_DBL_PAR_FUNCPIECEERROR "FuncPieceError" +#define GRB_DBL_PAR_FUNCPIECERATIO "FuncPieceRatio" +#define GRB_DBL_PAR_FUNCMAXVAL "FuncMaxVal" +#define GRB_STR_PAR_DUMMY "Dummy" +#define GRB_STR_PAR_JOBID "JobID" +#define GRB_CUTS_AUTO -1 +#define GRB_CUTS_OFF 0 +#define GRB_CUTS_CONSERVATIVE 1 +#define GRB_CUTS_AGGRESSIVE 2 +#define GRB_CUTS_VERYAGGRESSIVE 3 +#define GRB_PRESOLVE_AUTO -1 +#define GRB_PRESOLVE_OFF 0 +#define GRB_PRESOLVE_CONSERVATIVE 1 +#define GRB_PRESOLVE_AGGRESSIVE 2 +#define GRB_METHOD_NONE -1 +#define GRB_METHOD_AUTO -1 +#define GRB_METHOD_PRIMAL 0 +#define GRB_METHOD_DUAL 1 +#define GRB_METHOD_BARRIER 2 +#define GRB_METHOD_CONCURRENT 3 +#define GRB_METHOD_DETERMINISTIC_CONCURRENT 4 +#define GRB_METHOD_DETERMINISTIC_CONCURRENT_SIMPLEX 5 +#define GRB_BARHOMOGENEOUS_AUTO -1 +#define GRB_BARHOMOGENEOUS_OFF 0 +#define GRB_BARHOMOGENEOUS_ON 1 +#define GRB_MIPFOCUS_BALANCED 0 +#define GRB_MIPFOCUS_FEASIBILITY 1 +#define GRB_MIPFOCUS_OPTIMALITY 2 +#define GRB_MIPFOCUS_BESTBOUND 3 +#define GRB_BARORDER_AUTOMATIC -1 +#define GRB_BARORDER_AMD 0 +#define GRB_BARORDER_NESTEDDISSECTION 1 +#define GRB_SIMPLEXPRICING_AUTO -1 +#define GRB_SIMPLEXPRICING_PARTIAL 0 +#define GRB_SIMPLEXPRICING_STEEPEST_EDGE 1 +#define GRB_SIMPLEXPRICING_DEVEX 2 +#define GRB_SIMPLEXPRICING_STEEPEST_QUICK 3 +#define GRB_VARBRANCH_AUTO -1 +#define GRB_VARBRANCH_PSEUDO_REDUCED 0 +#define GRB_VARBRANCH_PSEUDO_SHADOW 1 +#define GRB_VARBRANCH_MAX_INFEAS 2 +#define GRB_VARBRANCH_STRONG 3 +#define GRB_PARTITION_EARLY 16 +#define GRB_PARTITION_ROOTSTART 8 +#define GRB_PARTITION_ROOTEND 4 +#define GRB_PARTITION_NODES 2 +#define GRB_PARTITION_CLEANUP 1 +#define GRB_PHASE_MIP_NOREL 0 +#define GRB_PHASE_MIP_SEARCH 1 +#define GRB_PHASE_MIP_IMPROVE 2 +extern std::function GRBterminate; +extern std::function GRBsetobjectiven; +extern std::function GRBgetintparam; +extern std::function GRBgetdblparam; +extern std::function GRBgetstrparam; +extern std::function GRBsetparam; +extern std::function GRBsetintparam; +extern std::function GRBsetdblparam; +extern std::function GRBsetstrparam; +extern std::function GRBresetparams; +extern std::function GRBcopyparams; +extern std::function GRBloadenv; +extern std::function GRBstartenv; +extern std::function GRBemptyenv; +extern std::function GRBgetnumparams; +extern std::function GRBgetparamname; +extern std::function GRBgetparamtype; +extern std::function GRBgetintparaminfo; +extern std::function GRBgetdblparaminfo; +extern std::function GRBgetstrparaminfo; +extern std::function GRBgetenv; +extern std::function GRBgetmultiobjenv; +extern std::function GRBdiscardmultiobjenvs; +extern std::function GRBfreeenv; +extern std::function GRBgeterrormsg; +extern std::function GRBversion; +extern std::function GRBplatform; +#define GRB_BATCH_STATUS_UNKNOWN 0 +#define GRB_BATCH_CREATED 1 +#define GRB_BATCH_SUBMITTED 2 +#define GRB_BATCH_ABORTED 3 +#define GRB_BATCH_FAILED 4 +#define GRB_BATCH_COMPLETED 5 + +// clang-format on +} // namespace operations_research + +#endif // THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_GUROBI_ENVIRONMENT_H_ diff --git a/ortools/third_party_solvers/gurobi_parse_header.py b/ortools/third_party_solvers/gurobi_parse_header.py new file mode 100644 index 0000000000..8811e0d4ed --- /dev/null +++ b/ortools/third_party_solvers/gurobi_parse_header.py @@ -0,0 +1,280 @@ +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Gurobi header parser script to generate code for the environment.{cc|h}. + +To use, run the script + copy gurobi_c.h somewhere. + edit the file and add the signature for the GRBisqp: + int __stdcall + GRBisqp(GRBenv**, const char*, const char*, const char*, int, const char*); + blaze run \ + ortools/third_party_solvers/gurobi_parse_header \ + -- + +It will output all methods defined in the EXPORTED_FUNCTIONS field, and all +defined symbols. + +The list of symbols to export is found by the following command: + grep -oh -e "GRB[A-Za-z0-9_]*" /gurobi_interface.cc \ + /gurobi_proto_solver.* /math_opt/solvers/gurobi* \ + /math_opt/solvers/gurobi/g_gurobi* | sort -u + +This will printout on the console 3 sections: + +------------------- header ------------------- + +to copy paste in environment.h + +------------------- define ------------------- + +to copy in the define part of environment.cc + +------------------- assign ------------------- + +to copy in the assign part of environment.cc +""" + +import re +from typing import Sequence +from absl import app + +EXPORTED_FUNCTIONS = frozenset( + [ + "GRBaddconstr", + "GRBaddconstrs", + "GRBaddgenconstrAbs", + "GRBaddgenconstrAnd", + "GRBaddgenconstrIndicator", + "GRBaddgenconstrMax", + "GRBaddgenconstrMin", + "GRBaddgenconstrOr", + "GRBaddqconstr", + "GRBaddqpterms", + "GRBaddrangeconstr", + "GRBaddsos", + "GRBaddvar", + "GRBaddvars", + "GRBcbcut", + "GRBcbget", + "GRBcblazy", + "GRBcbsolution", + "GRBchgcoeffs", + "GRBcopyparams", + "GRBdelconstrs", + "GRBdelgenconstrs", + "GRBdelq", + "GRBdelqconstrs", + "GRBdelsos", + "GRBdelvars", + "GRBenv", + "GRBenvUniquePtr", + "GRBemptyenv", + "GRBgetnumparams", + "GRBgetparamtype", + "GRBgetparamname", + "GRBgetintparaminfo", + "GRBgetdblparaminfo", + "GRBgetstrparaminfo", + "GRBfreeenv", + "GRBfreemodel", + "GRBgetcharattrarray", + "GRBgetcharattrelement", + "GRBgetdblattr", + "GRBgetdblattrarray", + "GRBgetdblattrelement", + "GRBgetdblparam", + "GRBgetenv", + "GRBgeterrormsg", + "GRBgetintattr", + "GRBgetintattrarray", + "GRBgetintattrelement", + "GRBgetintparam", + "GRBgetstrattr", + "GRBgetstrparam", + "GRBgetvars", + "GRBisattravailable", + "GRBisqp", + "GRBloadenv", + "GRBmodel", + "GRBnewmodel", + "GRBoptimize", + "GRBcomputeIIS", + "GRBplatform", + "GRBresetparams", + "GRBsetcallbackfunc", + "GRBsetcharattrarray", + "GRBsetcharattrelement", + "GRBsetcharattrlist", + "GRBsetdblattr", + "GRBsetdblattrarray", + "GRBsetdblattrelement", + "GRBsetdblattrlist", + "GRBsetdblparam", + "GRBsetintattr", + "GRBsetintattrarray", + "GRBsetintattrelement", + "GRBsetintattrlist", + "GRBsetintparam", + "GRBsetobjectiven", + "GRBsetparam", + "GRBsetstrattr", + "GRBsetstrparam", + "GRBterminate", + "GRBupdatemodel", + "GRBversion", + "GRBwrite", + ] +) + +# TODO(user): Filter #define too. + + +class GurobiHeaderParser: + """Converts gurobi_c.h to something pastable in ./environment.h|.cc.""" + + def __init__(self): + self.__header = "" + self.__define = "" + self.__assign = "" + self.__state = 0 + self.__return_type = "" + self.__args = "" + self.__fun_name = "" + + def should_be_exported(self, name: str) -> bool: + return name in EXPORTED_FUNCTIONS + + def write_define(self, symbol: str, value: str) -> None: + self.__header += f"#define {symbol} {value}\n" + + def write_fun(self, name: str, return_type: str, args: str) -> None: + if not self.should_be_exported(name): + print("skipping " + name) + return + + self.__header += f"extern std::function<{return_type}({args})> {name};\n" + self.__define += f"std::function<{return_type}({args})> {name} = nullptr;\n" + self.__assign += f" gurobi_dynamic_library->GetFunction(&{name}, " + self.__assign += f'"{name}");\n' + + def parse(self, filepath: str) -> None: + """Main method to parser the gurobi header.""" + + with open(filepath) as fp: + all_lines = fp.read() + + for line in all_lines.splitlines(): + if not line: # Ignore empty lines. + continue + if re.fullmatch(r"/\*", line, re.M): # Ignore comments. + continue + + if self.__state == 0: + # Note: fullmatch does not work. + match_def = re.match(r"#define ([A-Z0-9_]*)\s+([^/]+)", line, re.M) + if match_def: + self.write_define(match_def.group(1), match_def.group(2)) + continue + + # Single line function definition. + match_fun = re.fullmatch( + r"([a-z]+) __stdcall (GRB[A-Za-z_]*)\(([^;]*)\);", line, re.M + ) + if match_fun: + self.write_fun( + match_fun.group(1), match_fun.group(2), match_fun.group(3) + ) + continue + + # Simple type declaration (i.e. int __stdcall). + match_fun = re.fullmatch(r"([a-z]+) __stdcall\s*$", line, re.M) + if match_fun: + self.__return_type = match_fun.group(1) + self.__state = 1 + continue + + # Complex type declaration with pointer (i.e. GRBModel* __stdcall). + match_fun = re.fullmatch(r"([A-Za-z ]+)\*\s*__stdcall\s*$", line, re.M) + if match_fun: + self.__return_type = match_fun.group(1) + "*" + self.__state = 1 + continue + + elif self.__state == 1: # The return type was defined at the line before. + # Function definition terminates in this line. + match_fun = re.fullmatch(r"\s*(GRB[A-Za-z_]*)\(([^;]+)\);", line, re.M) + if match_fun: + self.write_fun( + match_fun.group(1), self.__return_type, match_fun.group(2) + ) + self.__state = 0 + self.__return_type = "" + continue + + # Function definition does not terminate in this line. + match_fun = re.fullmatch(r"\s*(GRB[A-Za-z_]*)\(([^;]+)$", line, re.M) + if match_fun: + self.__fun_name = match_fun.group(1) + self.__args = match_fun.group(2) + self.__state = 2 + continue + + elif self.__state == 2: # Extra arguments. + # Arguments end in this line. + match_fun = re.fullmatch(r"\s*([^;]+)\);", line, re.M) + if match_fun: + self.__args += match_fun.group(1) + self.write_fun(self.__fun_name, self.__return_type, self.__args) + self.__args = "" + self.__fun_name = "" + self.__return_type = "" + self.__state = 0 + continue + + # Arguments do not end in this line. + match_fun = re.fullmatch(r"\s*([^;]+)$", line, re.M) + if match_fun: + self.__args += match_fun.group(1) + continue + + def output(self) -> None: + """Output the 3 generated code on standard out.""" + + # replace __stdcall by GUROBI_STDCALL. + self.__header = self.__header.replace("__stdcall", "GUROBI_STDCALL") + self.__define = self.__define.replace("__stdcall", "GUROBI_STDCALL") + + print("------------------- header -------------------") + print(self.__header) + + print("------------------- define -------------------") + print(self.__define) + + print("------------------- assign -------------------") + print(self.__assign) + + +def main(argv: Sequence[str]) -> None: + if len(argv) > 2: + raise app.UsageError("Too many command-line arguments.") + if len(argv) == 1: + raise app.UsageError("Please supply path to gurobi_c.h on the command line.") + + parser = GurobiHeaderParser() + parser.parse(argv[1]) + parser.output() + + +if __name__ == "__main__": + app.run(main) diff --git a/ortools/third_party_solvers/xpress_environment.cc b/ortools/third_party_solvers/xpress_environment.cc new file mode 100644 index 0000000000..04d872d7a6 --- /dev/null +++ b/ortools/third_party_solvers/xpress_environment.cc @@ -0,0 +1,389 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Initial version of this code was provided by RTE + +#include "ortools/third_party_solvers/xpress_environment.h" + +#include +// NOLINTNEXTLINE(build/c++17) +#include +#include +#include +#include + +#include "absl/base/call_once.h" +#include "absl/base/const_init.h" +#include "absl/status/status.h" +#include "absl/strings/str_cat.h" +#include "absl/strings/str_join.h" +#include "absl/synchronization/mutex.h" +#include "ortools/base/logging.h" +#include "ortools/third_party_solvers/dynamic_library.h" + +namespace operations_research { + +#define STRINGIFY2(X) #X +#define STRINGIFY(X) STRINGIFY2(X) + +// Let's not reformat for rest of the file. +// This was generated with the parse_header_xpress.py script. +// See the comment at the top of the script. + +// This is the 'define' section. +// NOLINTBEGIN(whitespace/line_length) +// NOLINTBEGIN(google3-runtime-global-variables) +// clang-format off +std::function XPRScreateprob = nullptr; +std::function XPRSdestroyprob = nullptr; +std::function XPRSinit = nullptr; +std::function XPRSfree = nullptr; +std::function XPRSgetlicerrmsg = nullptr; +std::function XPRSlicense = nullptr; +std::function XPRSgetbanner = nullptr; +std::function XPRSgetversion = nullptr; +std::function XPRSsetprobname = nullptr; +std::function XPRSsetdefaultcontrol = nullptr; +std::function XPRSinterrupt = nullptr; +std::function XPRSsetintcontrol = nullptr; +std::function XPRSsetintcontrol64 = nullptr; +std::function XPRSsetdblcontrol = nullptr; +std::function XPRSsetstrcontrol = nullptr; +std::function XPRSgetintcontrol = nullptr; +std::function XPRSgetintcontrol64 = nullptr; +std::function XPRSgetdblcontrol = nullptr; +std::function XPRSgetstringcontrol = nullptr; +std::function XPRSgetintattrib = nullptr; +std::function XPRSgetstringattrib = nullptr; +std::function XPRSgetdblattrib = nullptr; +std::function XPRSgetcontrolinfo = nullptr; +std::function XPRSgetobj = nullptr; +std::function XPRSgetrhs = nullptr; +std::function XPRSgetrhsrange = nullptr; +std::function XPRSgetlb = nullptr; +std::function XPRSgetub = nullptr; +std::function XPRSgetcoef = nullptr; +std::function XPRSgetduals = nullptr; +std::function XPRSgetredcosts = nullptr; +std::function XPRSaddrows = nullptr; +std::function XPRSdelrows = nullptr; +std::function XPRSaddcols = nullptr; +std::function XPRSaddnames = nullptr; +std::function XPRSgetnames = nullptr; +std::function XPRSdelcols = nullptr; +std::function XPRSchgcoltype = nullptr; +std::function XPRSloadbasis = nullptr; +std::function XPRSpostsolve = nullptr; +std::function XPRSchgobjsense = nullptr; +std::function XPRSgetlasterror = nullptr; +std::function XPRSgetbasis = nullptr; +std::function XPRSwriteprob = nullptr; +std::function XPRSgetrowtype = nullptr; +std::function XPRSgetcoltype = nullptr; +std::function XPRSchgbounds = nullptr; +std::function XPRSaddmipsol = nullptr; +std::function XPRSgetlpsol = nullptr; +std::function XPRSgetmipsol = nullptr; +std::function XPRSchgobj = nullptr; +std::function XPRSchgcoef = nullptr; +std::function XPRSchgmcoef = nullptr; +std::function XPRSchgmcoef64 = nullptr; +std::function XPRSchgmqobj = nullptr; +std::function XPRSchgrhs = nullptr; +std::function XPRSchgrhsrange = nullptr; +std::function XPRSchgrowtype = nullptr; +std::function XPRSdelobj = nullptr; +std::function XPRSaddcbintsol = nullptr; +std::function XPRSremovecbintsol = nullptr; +std::function XPRSaddcbmessage = nullptr; +std::function XPRSlpoptimize = nullptr; +std::function XPRSmipoptimize = nullptr; +std::function XPRSoptimize = nullptr; +// clang-format on +// NOLINTEND(google3-runtime-global-variables) +// NOLINTEND(whitespace/line_length) + +void LoadXpressFunctions(DynamicLibrary* xpress_dynamic_library) { + // This was generated with the parse_header_xpress.py script. + // See the comment at the top of the script. + + // This is the 'assign' section. + // NOLINTBEGIN(whitespace/line_length) + // clang-format off + xpress_dynamic_library->GetFunction(&XPRScreateprob, "XPRScreateprob"); + xpress_dynamic_library->GetFunction(&XPRSdestroyprob, "XPRSdestroyprob"); + xpress_dynamic_library->GetFunction(&XPRSinit, "XPRSinit"); + xpress_dynamic_library->GetFunction(&XPRSfree, "XPRSfree"); + xpress_dynamic_library->GetFunction(&XPRSgetlicerrmsg, "XPRSgetlicerrmsg"); + xpress_dynamic_library->GetFunction(&XPRSlicense, "XPRSlicense"); + xpress_dynamic_library->GetFunction(&XPRSgetbanner, "XPRSgetbanner"); + xpress_dynamic_library->GetFunction(&XPRSgetversion, "XPRSgetversion"); + xpress_dynamic_library->GetFunction(&XPRSsetprobname, "XPRSsetprobname"); + xpress_dynamic_library->GetFunction(&XPRSsetdefaultcontrol, "XPRSsetdefaultcontrol"); + xpress_dynamic_library->GetFunction(&XPRSinterrupt, "XPRSinterrupt"); + xpress_dynamic_library->GetFunction(&XPRSsetintcontrol, "XPRSsetintcontrol"); + xpress_dynamic_library->GetFunction(&XPRSsetintcontrol64, "XPRSsetintcontrol64"); + xpress_dynamic_library->GetFunction(&XPRSsetdblcontrol, "XPRSsetdblcontrol"); + xpress_dynamic_library->GetFunction(&XPRSsetstrcontrol, "XPRSsetstrcontrol"); + xpress_dynamic_library->GetFunction(&XPRSgetintcontrol, "XPRSgetintcontrol"); + xpress_dynamic_library->GetFunction(&XPRSgetintcontrol64, "XPRSgetintcontrol64"); + xpress_dynamic_library->GetFunction(&XPRSgetdblcontrol, "XPRSgetdblcontrol"); + xpress_dynamic_library->GetFunction(&XPRSgetstringcontrol, "XPRSgetstringcontrol"); + xpress_dynamic_library->GetFunction(&XPRSgetintattrib, "XPRSgetintattrib"); + xpress_dynamic_library->GetFunction(&XPRSgetstringattrib, "XPRSgetstringattrib"); + xpress_dynamic_library->GetFunction(&XPRSgetdblattrib, "XPRSgetdblattrib"); + xpress_dynamic_library->GetFunction(&XPRSgetobj, "XPRSgetobj"); + xpress_dynamic_library->GetFunction(&XPRSgetrhs, "XPRSgetrhs"); + xpress_dynamic_library->GetFunction(&XPRSgetrhsrange, "XPRSgetrhsrange"); + xpress_dynamic_library->GetFunction(&XPRSgetlb, "XPRSgetlb"); + xpress_dynamic_library->GetFunction(&XPRSgetub, "XPRSgetub"); + xpress_dynamic_library->GetFunction(&XPRSgetcoef, "XPRSgetcoef"); + xpress_dynamic_library->GetFunction(&XPRSgetduals, "XPRSgetduals"); + xpress_dynamic_library->GetFunction(&XPRSgetredcosts, "XPRSgetredcosts"); + xpress_dynamic_library->GetFunction(&XPRSaddrows, "XPRSaddrows"); + xpress_dynamic_library->GetFunction(&XPRSdelrows, "XPRSdelrows"); + xpress_dynamic_library->GetFunction(&XPRSaddcols, "XPRSaddcols"); + xpress_dynamic_library->GetFunction(&XPRSaddnames, "XPRSaddnames"); + xpress_dynamic_library->GetFunction(&XPRSgetnames, "XPRSgetnames"); + xpress_dynamic_library->GetFunction(&XPRSdelcols, "XPRSdelcols"); + xpress_dynamic_library->GetFunction(&XPRSchgcoltype, "XPRSchgcoltype"); + xpress_dynamic_library->GetFunction(&XPRSloadbasis, "XPRSloadbasis"); + xpress_dynamic_library->GetFunction(&XPRSpostsolve, "XPRSpostsolve"); + xpress_dynamic_library->GetFunction(&XPRSchgobjsense, "XPRSchgobjsense"); + xpress_dynamic_library->GetFunction(&XPRSgetlasterror, "XPRSgetlasterror"); + xpress_dynamic_library->GetFunction(&XPRSgetbasis, "XPRSgetbasis"); + xpress_dynamic_library->GetFunction(&XPRSwriteprob, "XPRSwriteprob"); + xpress_dynamic_library->GetFunction(&XPRSgetrowtype, "XPRSgetrowtype"); + xpress_dynamic_library->GetFunction(&XPRSgetcoltype, "XPRSgetcoltype"); + xpress_dynamic_library->GetFunction(&XPRSchgbounds, "XPRSchgbounds"); + xpress_dynamic_library->GetFunction(&XPRSaddmipsol, "XPRSaddmipsol"); + xpress_dynamic_library->GetFunction(&XPRSgetlpsol, "XPRSgetlpsol"); + xpress_dynamic_library->GetFunction(&XPRSgetmipsol, "XPRSgetmipsol"); + xpress_dynamic_library->GetFunction(&XPRSchgobj, "XPRSchgobj"); + xpress_dynamic_library->GetFunction(&XPRSchgcoef, "XPRSchgcoef"); + xpress_dynamic_library->GetFunction(&XPRSchgmcoef, "XPRSchgmcoef"); + xpress_dynamic_library->GetFunction(&XPRSchgmcoef64, "XPRSchgmcoef64"); + xpress_dynamic_library->GetFunction(&XPRSchgmqobj, "XPRSchgmqobj"); + xpress_dynamic_library->GetFunction(&XPRSchgrhs, "XPRSchgrhs"); + xpress_dynamic_library->GetFunction(&XPRSchgrhsrange, "XPRSchgrhsrange"); + xpress_dynamic_library->GetFunction(&XPRSchgrowtype, "XPRSchgrowtype"); + xpress_dynamic_library->GetFunction(&XPRSdelobj, "XPRSdelobj"); + xpress_dynamic_library->GetFunction(&XPRSaddcbintsol, "XPRSaddcbintsol"); + xpress_dynamic_library->GetFunction(&XPRSremovecbintsol, "XPRSremovecbintsol"); + xpress_dynamic_library->GetFunction(&XPRSaddcbmessage, "XPRSaddcbmessage"); + xpress_dynamic_library->GetFunction(&XPRSlpoptimize, "XPRSlpoptimize"); + xpress_dynamic_library->GetFunction(&XPRSmipoptimize, "XPRSmipoptimize"); + xpress_dynamic_library->GetFunction(&XPRSoptimize, "XPRSoptimize"); + // clang-format on + // NOLINTEND(whitespace/line_length) +} + +void printXpressBanner(bool error) { + char banner[XPRS_MAXBANNERLENGTH]; + XPRSgetbanner(banner); + + if (error) { + LOG(ERROR) << "XpressInterface : Xpress banner :\n" << banner << "\n"; + } else { + LOG(WARNING) << "XpressInterface : Xpress banner :\n" << banner << "\n"; + } +} + +std::vector XpressDynamicLibraryPotentialPaths() { + std::vector potential_paths; + + // Look for libraries pointed by XPRESSDIR first. + const char* xpressdir_from_env = getenv("XPRESSDIR"); + if (xpressdir_from_env != nullptr) { + LOG(INFO) << "Environment variable XPRESSDIR = " << xpressdir_from_env; +#if defined(_MSC_VER) // Windows + potential_paths.push_back( + absl::StrCat(xpressdir_from_env, "\\bin\\xprs.dll")); +#elif defined(__APPLE__) // macOS + potential_paths.push_back( + absl::StrCat(xpressdir_from_env, "/lib/libxprs.dylib")); +#elif defined(__GNUC__) // Linux + potential_paths.push_back( + absl::StrCat(xpressdir_from_env, "/lib/libxprs.so")); +#else + LOG(ERROR) << "OS Not recognized by xpress_environment.cc." + << " You won't be able to use Xpress."; +#endif + } else { + LOG(WARNING) << "Environment variable XPRESSDIR undefined."; + } + + // Search for canonical places. +#if defined(_MSC_VER) // Windows + potential_paths.push_back(absl::StrCat("C:\\xpressmp\\bin\\xprs.dll")); + potential_paths.push_back( + absl::StrCat("C:\\Program Files\\xpressmp\\bin\\xprs.dll")); +#elif defined(__APPLE__) // macOS + potential_paths.push_back( + absl::StrCat("/Library/xpressmp/lib/libxprs.dylib")); +#elif defined(__GNUC__) // Linux + potential_paths.push_back(absl::StrCat("/opt/xpressmp/lib/libxprs.so")); +#else + LOG(ERROR) << "OS Not recognized by xpress_environment.cc." + << " You won't be able to use Xpress."; +#endif + return potential_paths; +} + +absl::Status LoadXpressDynamicLibrary(std::string& xpresspath) { + static std::string* xpress_lib_path = new std::string; + static absl::once_flag xpress_loading_done; + static absl::Status* xpress_load_status = new absl::Status; + static DynamicLibrary* xpress_library = new DynamicLibrary; + static absl::Mutex mutex(absl::kConstInit); + + absl::MutexLock lock(&mutex); + + absl::call_once(xpress_loading_done, []() { + const std::vector canonical_paths = + XpressDynamicLibraryPotentialPaths(); + for (const std::string& path : canonical_paths) { + if (xpress_library->TryToLoad(path)) { + LOG(INFO) << "Found the Xpress library in " << path << "."; + xpress_lib_path->clear(); + std::filesystem::path p(path); + p.remove_filename(); + xpress_lib_path->append(p.string()); + break; + } + } + + if (xpress_library->LibraryIsLoaded()) { + LOG(INFO) << "Loading all Xpress functions"; + LoadXpressFunctions(xpress_library); + *xpress_load_status = absl::OkStatus(); + } else { + *xpress_load_status = absl::NotFoundError( + absl::StrCat("Could not find the Xpress shared library. Looked in: [", + absl::StrJoin(canonical_paths, "', '"), + "]. Please check environment variable XPRESSDIR")); + } + }); + xpresspath.clear(); + xpresspath.append(*xpress_lib_path); + return *xpress_load_status; +} + +void log_message_about_XPRSinit_argument(); +void log_full_license_error(int code, const std::string& xpress_lib_dir); +//! init XPRESS environment. +bool initXpressEnv(bool verbose, int xpress_oem_license_key) { + std::string xpress_lib_dir; + absl::Status status = LoadXpressDynamicLibrary(xpress_lib_dir); + if (!status.ok()) { + LOG(WARNING) << status << "\n"; + return false; + } + + int code; + // if not an OEM key + if (xpress_oem_license_key == 0) { + if (verbose) { + log_message_about_XPRSinit_argument(); + } + + code = XPRSinit(nullptr); + + if (!code) { + // XPRSbanner informs about Xpress version, options and error messages + if (verbose) { + printXpressBanner(false); + char version[16]; + XPRSgetversion(version); + LOG(WARNING) << "Optimizer version: " << version + << " (OR-Tools was compiled with version " << XPVERSION + << ")."; + } + return true; + } else { + log_full_license_error(code, xpress_lib_dir); + return false; + } + } else { + // if OEM key + if (verbose) { + LOG(WARNING) << "XpressInterface : Initialising xpress-MP with OEM key " + << xpress_oem_license_key; + } + + int nvalue = 0; + int ierr; + char slicmsg[256] = ""; + char errmsg[256]; + + XPRSlicense(&nvalue, slicmsg); + if (verbose) { + DLOG(INFO) << "XpressInterface : First message from XPRSLicense : " + << slicmsg; + } + + nvalue = xpress_oem_license_key - ((nvalue * nvalue) / 19); + ierr = XPRSlicense(&nvalue, slicmsg); + + if (verbose) { + DLOG(INFO) << "XpressInterface : Second message from XPRSLicense : " + << slicmsg; + } + if (ierr == 16) { + if (verbose) { + DLOG(INFO) + << "XpressInterface : Optimizer development software detected"; + } + } else if (ierr != 0) { + // get the license error message + XPRSgetlicerrmsg(errmsg, 256); + + LOG(ERROR) << "XpressInterface : " << errmsg; + return false; + } + + code = XPRSinit(nullptr); + + if (!code) { + return true; + } else { + LOG(ERROR) << "XPRSinit returned code : " << code << "\n"; + return false; + } + } +} +void log_full_license_error(int code, const std::string& xpress_lib_dir) { + LOG(WARNING) << "XpressInterface: Xpress found at " << xpress_lib_dir << "\n"; + char errmsg[256]; + XPRSgetlicerrmsg(errmsg, 256); + + LOG(ERROR) << "XpressInterface : License error : " << errmsg + << " (XPRSinit returned code " << code << "). \n"; + LOG(ERROR) + << "|_Your Xpress installation should have set the env var XPAUTH_PATH" + " to the full path of your licence file\n"; +} +void log_message_about_XPRSinit_argument() { + LOG(WARNING) + << "XpressInterface : Initialising xpress-MP with default parameters"; +} + +bool XpressIsCorrectlyInstalled() { + bool correctlyInstalled = initXpressEnv(false); + if (correctlyInstalled) { + XPRSfree(); + } + return correctlyInstalled; +} + +} // namespace operations_research diff --git a/ortools/third_party_solvers/xpress_environment.h b/ortools/third_party_solvers/xpress_environment.h new file mode 100644 index 0000000000..aafecf3c4a --- /dev/null +++ b/ortools/third_party_solvers/xpress_environment.h @@ -0,0 +1,550 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Initial version of this code was provided by RTE + +#ifndef THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_XPRESS_ENVIRONMENT_H_ +#define THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_XPRESS_ENVIRONMENT_H_ + +#include +#include + +#include "absl/status/status.h" +#include "ortools/base/base_export.h" + +extern "C" { +typedef struct xo_prob_struct* XPRSprob; +} + +namespace operations_research { + +void printXpressBanner(bool error); + +bool initXpressEnv(bool verbose = true, int xpress_oem_license_key = 0); + +bool XpressIsCorrectlyInstalled(); + +// Force the loading of the xpress dynamic library. It returns true if the +// library was successfully loaded. This method can only be called once. +// Successive calls are no-op. +// +// Note that it does not check if a token license can be grabbed. +absl::Status LoadXpressDynamicLibrary(std::string& xpresspath); + +// The list of #define and extern std::function<> below is generated directly +// from xprs.h via parse_header_xpress.py +// See the top comment on the parse_header_xpress.py file. +// This is the header section +// NOLINTBEGIN(runtime/int) +#if defined(_WIN32) +#define XPRSint64 __int64 +#elif defined(__LP64__) || defined(_LP64) || defined(__ILP64__) || \ + defined(_ILP64) +#define XPRSint64 long +#else +#define XPRSint64 long long +#endif +// NOLINTEND(runtime/int) + +#if defined(_MSC_VER) +#define XPRS_CC __stdcall +#else +#define XPRS_CC +#endif +// *************************************************************************** +// * values related to XPRSinterrupt * +// *************************************************************************** +#define XPRS_STOP_NONE 0 +#define XPRS_STOP_TIMELIMIT 1 +#define XPRS_STOP_CTRLC 2 +#define XPRS_STOP_NODELIMIT 3 +#define XPRS_STOP_ITERLIMIT 4 +#define XPRS_STOP_MIPGAP 5 +#define XPRS_STOP_SOLLIMIT 6 +#define XPRS_STOP_GENERICERROR 7 +#define XPRS_STOP_MEMORYERROR 8 +#define XPRS_STOP_USER 9 +#define XPRS_STOP_SOLVECOMPLETE 10 +#define XPRS_STOP_LICENSELOST 11 +#define XPRS_STOP_NUMERICALERROR 13 +// *************************************************************************** +// * values related to Set/GetControl/Attribinfo * +// *************************************************************************** +#define XPRS_TYPE_NOTDEFINED 0 +#define XPRS_TYPE_INT 1 +#define XPRS_TYPE_INT64 2 +#define XPRS_TYPE_DOUBLE 3 +#define XPRS_TYPE_STRING 4 +// *************************************************************************** +// * values related to NAMESPACES * +// *************************************************************************** +#define XPRS_NAMES_ROW 1 +#define XPRS_NAMES_COLUMN 2 + +#define XPRS_PLUSINFINITY 1.0e+20 +#define XPRS_MINUSINFINITY -1.0e+20 +#define XPRS_MAXBANNERLENGTH 512 +#define XPVERSION 41 +#define XPRS_MPSRHSNAME 6001 +#define XPRS_MPSOBJNAME 6002 +#define XPRS_MPSRANGENAME 6003 +#define XPRS_MPSBOUNDNAME 6004 +#define XPRS_OUTPUTMASK 6005 +#define XPRS_TUNERMETHODFILE 6017 +#define XPRS_TUNEROUTPUTPATH 6018 +#define XPRS_TUNERSESSIONNAME 6019 +#define XPRS_COMPUTEEXECSERVICE 6022 +#define XPRS_MAXCUTTIME 8149 +#define XPRS_MAXSTALLTIME 8443 +#define XPRS_TUNERMAXTIME 8364 +#define XPRS_MATRIXTOL 7001 +#define XPRS_PIVOTTOL 7002 +#define XPRS_FEASTOL 7003 +#define XPRS_OUTPUTTOL 7004 +#define XPRS_SOSREFTOL 7005 +#define XPRS_OPTIMALITYTOL 7006 +#define XPRS_ETATOL 7007 +#define XPRS_RELPIVOTTOL 7008 +#define XPRS_MIPTOL 7009 +#define XPRS_MIPTOLTARGET 7010 +#define XPRS_BARPERTURB 7011 +#define XPRS_MIPADDCUTOFF 7012 +#define XPRS_MIPABSCUTOFF 7013 +#define XPRS_MIPRELCUTOFF 7014 +#define XPRS_PSEUDOCOST 7015 +#define XPRS_PENALTY 7016 +#define XPRS_BIGM 7018 +#define XPRS_MIPABSSTOP 7019 +#define XPRS_MIPRELSTOP 7020 +#define XPRS_CROSSOVERACCURACYTOL 7023 +#define XPRS_PRIMALPERTURB 7024 +#define XPRS_DUALPERTURB 7025 +#define XPRS_BAROBJSCALE 7026 +#define XPRS_BARRHSSCALE 7027 +#define XPRS_CHOLESKYTOL 7032 +#define XPRS_BARGAPSTOP 7033 +#define XPRS_BARDUALSTOP 7034 +#define XPRS_BARPRIMALSTOP 7035 +#define XPRS_BARSTEPSTOP 7036 +#define XPRS_ELIMTOL 7042 +#define XPRS_MARKOWITZTOL 7047 +#define XPRS_MIPABSGAPNOTIFY 7064 +#define XPRS_MIPRELGAPNOTIFY 7065 +#define XPRS_BARLARGEBOUND 7067 +#define XPRS_PPFACTOR 7069 +#define XPRS_REPAIRINDEFINITEQMAX 7071 +#define XPRS_BARGAPTARGET 7073 +#define XPRS_DUMMYCONTROL 7075 +#define XPRS_BARSTARTWEIGHT 7076 +#define XPRS_BARFREESCALE 7077 +#define XPRS_SBEFFORT 7086 +#define XPRS_HEURDIVERANDOMIZE 7089 +#define XPRS_HEURSEARCHEFFORT 7090 +#define XPRS_CUTFACTOR 7091 +#define XPRS_EIGENVALUETOL 7097 +#define XPRS_INDLINBIGM 7099 +#define XPRS_TREEMEMORYSAVINGTARGET 7100 +#define XPRS_INDPRELINBIGM 7102 +#define XPRS_RELAXTREEMEMORYLIMIT 7105 +#define XPRS_MIPABSGAPNOTIFYOBJ 7108 +#define XPRS_MIPABSGAPNOTIFYBOUND 7109 +#define XPRS_PRESOLVEMAXGROW 7110 +#define XPRS_HEURSEARCHTARGETSIZE 7112 +#define XPRS_CROSSOVERRELPIVOTTOL 7113 +#define XPRS_CROSSOVERRELPIVOTTOLSAFE 7114 +#define XPRS_DETLOGFREQ 7116 +#define XPRS_MAXIMPLIEDBOUND 7120 +#define XPRS_FEASTOLTARGET 7121 +#define XPRS_OPTIMALITYTOLTARGET 7122 +#define XPRS_PRECOMPONENTSEFFORT 7124 +#define XPRS_LPLOGDELAY 7127 +#define XPRS_HEURDIVEITERLIMIT 7128 +#define XPRS_BARKERNEL 7130 +#define XPRS_FEASTOLPERTURB 7132 +#define XPRS_CROSSOVERFEASWEIGHT 7133 +#define XPRS_LUPIVOTTOL 7139 +#define XPRS_MIPRESTARTGAPTHRESHOLD 7140 +#define XPRS_NODEPROBINGEFFORT 7141 +#define XPRS_INPUTTOL 7143 +#define XPRS_MIPRESTARTFACTOR 7145 +#define XPRS_BAROBJPERTURB 7146 +#define XPRS_CPIALPHA 7149 +#define XPRS_GLOBALBOUNDINGBOX 7154 +#define XPRS_TIMELIMIT 7158 +#define XPRS_SOLTIMELIMIT 7159 +#define XPRS_REPAIRINFEASTIMELIMIT 7160 +#define XPRS_EXTRAROWS 8004 +#define XPRS_EXTRACOLS 8005 +#define XPRS_LPITERLIMIT 8007 +#define XPRS_LPLOG 8009 +#define XPRS_SCALING 8010 +#define XPRS_PRESOLVE 8011 +#define XPRS_CRASH 8012 +#define XPRS_PRICINGALG 8013 +#define XPRS_INVERTFREQ 8014 +#define XPRS_INVERTMIN 8015 +#define XPRS_MAXNODE 8018 +#define XPRS_MAXTIME 8020 +#define XPRS_MAXMIPSOL 8021 +#define XPRS_SIFTPASSES 8022 +#define XPRS_DEFAULTALG 8023 +#define XPRS_VARSELECTION 8025 +#define XPRS_NODESELECTION 8026 +#define XPRS_BACKTRACK 8027 +#define XPRS_MIPLOG 8028 +#define XPRS_KEEPNROWS 8030 +#define XPRS_MPSECHO 8032 +#define XPRS_MAXPAGELINES 8034 +#define XPRS_OUTPUTLOG 8035 +#define XPRS_BARSOLUTION 8038 +#define XPRS_CACHESIZE 8043 +#define XPRS_CROSSOVER 8044 +#define XPRS_BARITERLIMIT 8045 +#define XPRS_CHOLESKYALG 8046 +#define XPRS_BAROUTPUT 8047 +#define XPRS_EXTRAMIPENTS 8051 +#define XPRS_REFACTOR 8052 +#define XPRS_BARTHREADS 8053 +#define XPRS_KEEPBASIS 8054 +#define XPRS_CROSSOVEROPS 8060 +#define XPRS_VERSION 8061 +#define XPRS_CROSSOVERTHREADS 8065 +#define XPRS_BIGMMETHOD 8068 +#define XPRS_MPSNAMELENGTH 8071 +#define XPRS_ELIMFILLIN 8073 +#define XPRS_PRESOLVEOPS 8077 +#define XPRS_MIPPRESOLVE 8078 +#define XPRS_MIPTHREADS 8079 +#define XPRS_BARORDER 8080 +#define XPRS_BREADTHFIRST 8082 +#define XPRS_AUTOPERTURB 8084 +#define XPRS_DENSECOLLIMIT 8086 +#define XPRS_CALLBACKFROMMASTERTHREAD 8090 +#define XPRS_MAXMCOEFFBUFFERELEMS 8091 +#define XPRS_REFINEOPS 8093 +#define XPRS_LPREFINEITERLIMIT 8094 +#define XPRS_MIPREFINEITERLIMIT 8095 +#define XPRS_DUALIZEOPS 8097 +#define XPRS_CROSSOVERITERLIMIT 8104 +#define XPRS_PREBASISRED 8106 +#define XPRS_PRESORT 8107 +#define XPRS_PREPERMUTE 8108 +#define XPRS_PREPERMUTESEED 8109 +#define XPRS_MAXMEMORYSOFT 8112 +#define XPRS_CUTFREQ 8116 +#define XPRS_SYMSELECT 8117 +#define XPRS_SYMMETRY 8118 +#define XPRS_MAXMEMORYHARD 8119 +#define XPRS_MIQCPALG 8125 +#define XPRS_QCCUTS 8126 +#define XPRS_QCROOTALG 8127 +#define XPRS_PRECONVERTSEPARABLE 8128 +#define XPRS_ALGAFTERNETWORK 8129 +#define XPRS_TRACE 8130 +#define XPRS_MAXIIS 8131 +#define XPRS_CPUTIME 8133 +#define XPRS_COVERCUTS 8134 +#define XPRS_GOMCUTS 8135 +#define XPRS_LPFOLDING 8136 +#define XPRS_MPSFORMAT 8137 +#define XPRS_CUTSTRATEGY 8138 +#define XPRS_CUTDEPTH 8139 +#define XPRS_TREECOVERCUTS 8140 +#define XPRS_TREEGOMCUTS 8141 +#define XPRS_CUTSELECT 8142 +#define XPRS_TREECUTSELECT 8143 +#define XPRS_DUALIZE 8144 +#define XPRS_DUALGRADIENT 8145 +#define XPRS_SBITERLIMIT 8146 +#define XPRS_SBBEST 8147 +#define XPRS_BARINDEFLIMIT 8153 +#define XPRS_HEURFREQ 8155 +#define XPRS_HEURDEPTH 8156 +#define XPRS_HEURMAXSOL 8157 +#define XPRS_HEURNODES 8158 +#define XPRS_LNPBEST 8160 +#define XPRS_LNPITERLIMIT 8161 +#define XPRS_BRANCHCHOICE 8162 +#define XPRS_BARREGULARIZE 8163 +#define XPRS_SBSELECT 8164 +#define XPRS_LOCALCHOICE 8170 +#define XPRS_LOCALBACKTRACK 8171 +#define XPRS_DUALSTRATEGY 8174 +#define XPRS_L1CACHE 8175 +#define XPRS_HEURDIVESTRATEGY 8177 +#define XPRS_HEURSELECT 8178 +#define XPRS_BARSTART 8180 +#define XPRS_PRESOLVEPASSES 8183 +#define XPRS_BARNUMSTABILITY 8186 +#define XPRS_BARORDERTHREADS 8187 +#define XPRS_EXTRASETS 8190 +#define XPRS_FEASIBILITYPUMP 8193 +#define XPRS_PRECOEFELIM 8194 +#define XPRS_PREDOMCOL 8195 +#define XPRS_HEURSEARCHFREQ 8196 +#define XPRS_HEURDIVESPEEDUP 8197 +#define XPRS_SBESTIMATE 8198 +#define XPRS_BARCORES 8202 +#define XPRS_MAXCHECKSONMAXTIME 8203 +#define XPRS_MAXCHECKSONMAXCUTTIME 8204 +#define XPRS_HISTORYCOSTS 8206 +#define XPRS_ALGAFTERCROSSOVER 8208 +#define XPRS_MUTEXCALLBACKS 8210 +#define XPRS_BARCRASH 8211 +#define XPRS_HEURDIVESOFTROUNDING 8215 +#define XPRS_HEURSEARCHROOTSELECT 8216 +#define XPRS_HEURSEARCHTREESELECT 8217 +#define XPRS_MPS18COMPATIBLE 8223 +#define XPRS_ROOTPRESOLVE 8224 +#define XPRS_CROSSOVERDRP 8227 +#define XPRS_FORCEOUTPUT 8229 +#define XPRS_PRIMALOPS 8231 +#define XPRS_DETERMINISTIC 8232 +#define XPRS_PREPROBING 8238 +#define XPRS_TREEMEMORYLIMIT 8242 +#define XPRS_TREECOMPRESSION 8243 +#define XPRS_TREEDIAGNOSTICS 8244 +#define XPRS_MAXTREEFILESIZE 8245 +#define XPRS_PRECLIQUESTRATEGY 8247 +#define XPRS_REPAIRINFEASMAXTIME 8250 +#define XPRS_IFCHECKCONVEXITY 8251 +#define XPRS_PRIMALUNSHIFT 8252 +#define XPRS_REPAIRINDEFINITEQ 8254 +#define XPRS_MIPRAMPUP 8255 +#define XPRS_MAXLOCALBACKTRACK 8257 +#define XPRS_USERSOLHEURISTIC 8258 +#define XPRS_FORCEPARALLELDUAL 8265 +#define XPRS_BACKTRACKTIE 8266 +#define XPRS_BRANCHDISJ 8267 +#define XPRS_MIPFRACREDUCE 8270 +#define XPRS_CONCURRENTTHREADS 8274 +#define XPRS_MAXSCALEFACTOR 8275 +#define XPRS_HEURTHREADS 8276 +#define XPRS_THREADS 8278 +#define XPRS_HEURBEFORELP 8280 +#define XPRS_PREDOMROW 8281 +#define XPRS_BRANCHSTRUCTURAL 8282 +#define XPRS_QUADRATICUNSHIFT 8284 +#define XPRS_BARPRESOLVEOPS 8286 +#define XPRS_QSIMPLEXOPS 8288 +#define XPRS_MIPRESTART 8290 +#define XPRS_CONFLICTCUTS 8292 +#define XPRS_PREPROTECTDUAL 8293 +#define XPRS_CORESPERCPU 8296 +#define XPRS_RESOURCESTRATEGY 8297 +#define XPRS_CLAMPING 8301 +#define XPRS_SLEEPONTHREADWAIT 8302 +#define XPRS_PREDUPROW 8307 +#define XPRS_CPUPLATFORM 8312 +#define XPRS_BARALG 8315 +#define XPRS_SIFTING 8319 +#define XPRS_LPLOGSTYLE 8326 +#define XPRS_RANDOMSEED 8328 +#define XPRS_TREEQCCUTS 8331 +#define XPRS_PRELINDEP 8333 +#define XPRS_DUALTHREADS 8334 +#define XPRS_PREOBJCUTDETECT 8336 +#define XPRS_PREBNDREDQUAD 8337 +#define XPRS_PREBNDREDCONE 8338 +#define XPRS_PRECOMPONENTS 8339 +#define XPRS_MAXMIPTASKS 8347 +#define XPRS_MIPTERMINATIONMETHOD 8348 +#define XPRS_PRECONEDECOMP 8349 +#define XPRS_HEURFORCESPECIALOBJ 8350 +#define XPRS_HEURSEARCHROOTCUTFREQ 8351 +#define XPRS_PREELIMQUAD 8353 +#define XPRS_PREIMPLICATIONS 8356 +#define XPRS_TUNERMODE 8359 +#define XPRS_TUNERMETHOD 8360 +#define XPRS_TUNERTARGET 8362 +#define XPRS_TUNERTHREADS 8363 +#define XPRS_TUNERHISTORY 8365 +#define XPRS_TUNERPERMUTE 8366 +#define XPRS_TUNERVERBOSE 8370 +#define XPRS_TUNEROUTPUT 8372 +#define XPRS_PREANALYTICCENTER 8374 +#define XPRS_NETCUTS 8382 +#define XPRS_LPFLAGS 8385 +#define XPRS_MIPKAPPAFREQ 8386 +#define XPRS_OBJSCALEFACTOR 8387 +#define XPRS_TREEFILELOGINTERVAL 8389 +#define XPRS_IGNORECONTAINERCPULIMIT 8390 +#define XPRS_IGNORECONTAINERMEMORYLIMIT 8391 +#define XPRS_MIPDUALREDUCTIONS 8392 +#define XPRS_GENCONSDUALREDUCTIONS 8395 +#define XPRS_PWLDUALREDUCTIONS 8396 +#define XPRS_BARFAILITERLIMIT 8398 +#define XPRS_AUTOSCALING 8406 +#define XPRS_GENCONSABSTRANSFORMATION 8408 +#define XPRS_COMPUTEJOBPRIORITY 8409 +#define XPRS_PREFOLDING 8410 +#define XPRS_NETSTALLLIMIT 8412 +#define XPRS_SERIALIZEPREINTSOL 8413 +#define XPRS_NUMERICALEMPHASIS 8416 +#define XPRS_PWLNONCONVEXTRANSFORMATION 8420 +#define XPRS_MIPCOMPONENTS 8421 +#define XPRS_MIPCONCURRENTNODES 8422 +#define XPRS_MIPCONCURRENTSOLVES 8423 +#define XPRS_OUTPUTCONTROLS 8424 +#define XPRS_SIFTSWITCH 8425 +#define XPRS_HEUREMPHASIS 8427 +#define XPRS_COMPUTEMATX 8428 +#define XPRS_COMPUTEMATX_IIS 8429 +#define XPRS_COMPUTEMATX_IISMAXTIME 8430 +#define XPRS_BARREFITER 8431 +#define XPRS_COMPUTELOG 8434 +#define XPRS_SIFTPRESOLVEOPS 8435 +#define XPRS_CHECKINPUTDATA 8436 +#define XPRS_ESCAPENAMES 8440 +#define XPRS_IOTIMEOUT 8442 +#define XPRS_AUTOCUTTING 8446 +#define XPRS_CALLBACKCHECKTIMEDELAY 8451 +#define XPRS_MULTIOBJOPS 8457 +#define XPRS_MULTIOBJLOG 8458 +#define XPRS_GLOBALSPATIALBRANCHIFPREFERORIG 8465 +#define XPRS_PRECONFIGURATION 8470 +#define XPRS_FEASIBILITYJUMP 8471 +#define XPRS_EXTRAELEMS 8006 +#define XPRS_EXTRASETELEMS 8191 +#define XPRS_LPOBJVAL 2001 +#define XPRS_MIPOBJVAL 2003 +#define XPRS_BESTBOUND 2004 +#define XPRS_OBJRHS 2005 +#define XPRS_OBJSENSE 2008 +#define XPRS_ROWS 1001 +#define XPRS_SIMPLEXITER 1009 +#define XPRS_BARITER 5001 +#define XPRS_SOLSTATUS_NOTFOUND 0 +#define XPRS_SOLSTATUS_OPTIMAL 1 +#define XPRS_SOLSTATUS_FEASIBLE 2 +#define XPRS_SOLSTATUS_INFEASIBLE 3 +#define XPRS_SOLSTATUS_UNBOUNDED 4 +#define XPRS_LPSTATUS 1010 +#define XPRS_MIPSTATUS 1011 +#define XPRS_NODES 1013 +#define XPRS_COLS 1018 +#define XPRS_MAXPROBNAMELENGTH 1158 +#define XPRS_LP_UNSTARTED 0 +#define XPRS_LP_OPTIMAL 1 +#define XPRS_LP_INFEAS 2 +#define XPRS_LP_CUTOFF 3 +#define XPRS_LP_UNFINISHED 4 +#define XPRS_LP_UNBOUNDED 5 +#define XPRS_LP_CUTOFF_IN_DUAL 6 +#define XPRS_LP_UNSOLVED 7 +#define XPRS_LP_NONCONVEX 8 +#define XPRS_MIP_SOLUTION 4 +#define XPRS_MIP_INFEAS 5 +#define XPRS_MIP_OPTIMAL 6 +#define XPRS_MIP_UNBOUNDED 7 +#define XPRS_ALG_DUAL 2 +#define XPRS_ALG_PRIMAL 3 +#define XPRS_ALG_BARRIER 4 +#define XPRS_OBJ_MINIMIZE 1 +#define XPRS_OBJ_MAXIMIZE -1 +#define XPRS_UUID 3011 +// *************************************************************************** +// * variable types * +// *************************************************************************** +#define XPRS_INTEGER 'I' +#define XPRS_CONTINUOUS 'C' +// *************************************************************************** +// * constraint types * +// *************************************************************************** +#define XPRS_LESS_EQUAL 'L' +#define XPRS_GREATER_EQUAL 'G' +#define XPRS_EQUAL 'E' +#define XPRS_RANGE 'R' +#define XPRS_NONBINDING 'N' +// *************************************************************************** +// * basis status * +// *************************************************************************** +#define XPRS_AT_LOWER 0 +#define XPRS_BASIC 1 +#define XPRS_AT_UPPER 2 +#define XPRS_FREE_SUPER 3 + +// Let's not reformat for rest of the file. +// NOLINTBEGIN(whitespace/line_length) +// clang-format off +extern std::function XPRScreateprob; +extern std::function XPRSdestroyprob; +extern std::function XPRSinit; +extern std::function XPRSfree; +extern std::function XPRSgetlicerrmsg; +extern std::function XPRSlicense; +extern std::function XPRSgetbanner; +extern std::function XPRSgetversion; +extern std::function XPRSsetprobname; +extern std::function XPRSsetdefaultcontrol; +extern std::function XPRSinterrupt; +extern std::function XPRSsetintcontrol; +extern std::function XPRSsetintcontrol64; +extern std::function XPRSsetdblcontrol; +extern std::function XPRSsetstrcontrol; +OR_DLL extern std::function XPRSgetintcontrol; +OR_DLL extern std::function XPRSgetintcontrol64; +OR_DLL extern std::function XPRSgetdblcontrol; +OR_DLL extern std::function XPRSgetstringcontrol; +OR_DLL extern std::function XPRSgetintattrib; +OR_DLL extern std::function XPRSgetstringattrib; +OR_DLL extern std::function XPRSgetdblattrib; +extern std::function XPRSgetcontrolinfo; +OR_DLL extern std::function XPRSgetobj; +OR_DLL extern std::function XPRSgetrhs; +OR_DLL extern std::function XPRSgetrhsrange; +OR_DLL extern std::function XPRSgetlb; +OR_DLL extern std::function XPRSgetub; +OR_DLL extern std::function XPRSgetcoef; +extern std::function XPRSgetduals; +extern std::function XPRSgetredcosts; +extern std::function XPRSaddrows; +extern std::function XPRSdelrows; +extern std::function XPRSaddcols; +extern std::function XPRSaddnames; +extern std::function XPRSgetnames; +extern std::function XPRSdelcols; +extern std::function XPRSchgcoltype; +extern std::function XPRSloadbasis; +extern std::function XPRSpostsolve; +extern std::function XPRSchgobjsense; +extern std::function XPRSgetlasterror; +extern std::function XPRSgetbasis; +extern std::function XPRSwriteprob; +OR_DLL extern std::function XPRSgetrowtype; +OR_DLL extern std::function XPRSgetcoltype; +extern std::function XPRSchgbounds; +extern std::function XPRSaddmipsol; +extern std::function XPRSgetlpsol; +extern std::function XPRSgetmipsol; +extern std::function XPRSchgobj; +extern std::function XPRSchgcoef; +extern std::function XPRSchgmcoef; +extern std::function XPRSchgmcoef64; +extern std::function XPRSchgmqobj; +extern std::function XPRSchgrhs; +extern std::function XPRSchgrhsrange; +extern std::function XPRSchgrowtype; +extern std::function XPRSdelobj; +extern std::function XPRSaddcbintsol; +extern std::function XPRSremovecbintsol; +extern std::function XPRSaddcbmessage; +extern std::function XPRSlpoptimize; +extern std::function XPRSmipoptimize; +extern std::function XPRSoptimize; +// clang-format on +// NOLINTEND(whitespace/line_length) + +} // namespace operations_research + +#endif // THIRD_PARTY_ORTOOLS_ORTOOLS_THIRD_PARTY_SOLVERS_XPRESS_ENVIRONMENT_H_ From 91e14de1f7c0a208fb0275bb18a4dd4191d757ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 18:05:38 +0200 Subject: [PATCH 105/509] ortools: utils: keep compatibility with protobuf < 26 --- ortools/util/file_util.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ortools/util/file_util.cc b/ortools/util/file_util.cc index 6ee86f6e52..62d0aaa314 100644 --- a/ortools/util/file_util.cc +++ b/ortools/util/file_util.cc @@ -166,7 +166,11 @@ absl::Status WriteProtoToFile(absl::string_view filename, case ProtoWriteFormat::kJson: { google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; +#if PROTOBUF_VERSION >= 5026000 // Version 26.0.0 options.always_print_fields_with_no_presence = true; +#else + options.always_print_primitive_fields = true; +#endif options.preserve_proto_field_names = true; if (!google::protobuf::util::MessageToJsonString(proto, &output_string, options) From f1e95386d1f1e134586ece26c950a169d6538b7e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Jun 2025 10:29:31 +0200 Subject: [PATCH 106/509] cmake: Fix cmake_minimum_required to 3.24 (#4692) --- CMakeLists.txt | 2 +- cmake/README.md | 2 +- cmake/dependencies/CMakeLists.txt | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index efefbaa212..12f42a9183 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,7 +12,7 @@ # limitations under the License. # This file is just an orchestration -cmake_minimum_required(VERSION 3.20) +cmake_minimum_required(VERSION 3.24) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") # Enable output of compile commands during generation. diff --git a/cmake/README.md b/cmake/README.md index 816f67d067..ff84170a29 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -88,7 +88,7 @@ CMake as a standalone project or incorporate it into an existing CMake project. ## Requirement You'll need: -* `CMake >= 3.18`. +* `CMake >= 3.24`. * A C++20 compiler (GCC 10 or above) ## Solvers supported diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 80fdbb1b37..184fed78eb 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -11,6 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# We are using FetchContent OVERRIDE_FIND_PACKAGE introduced in 3.24 +cmake_minimum_required(VERSION 3.24) + # ############################################################################## # SWIG (WIN32) # ############################################################################## From 3365d1636c2d07f2826286f1098ef5447fb0ceb6 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 16:58:32 +0200 Subject: [PATCH 107/509] routing: export from google3 --- ortools/routing/BUILD.bazel | 52 +- ortools/routing/breaks.cc | 1302 ++++------------- ortools/routing/breaks.h | 110 ++ ortools/routing/constraints.cc | 237 +++ ortools/routing/constraints.h | 3 + ortools/routing/decision_builders.cc | 18 - ortools/routing/decision_builders.h | 23 +- ortools/routing/docs/ROUTING.md | 94 +- ortools/routing/filter_committables.cc | 73 + ortools/routing/filter_committables.h | 28 +- ortools/routing/filters.cc | 290 +++- ortools/routing/filters.h | 11 +- ortools/routing/flow.cc | 8 +- ortools/routing/heuristic_parameters.proto | 101 ++ ortools/routing/ils.cc | 55 +- ortools/routing/ils.proto | 18 +- ortools/routing/index_manager.cc | 1 - ortools/routing/insertion_lns.cc | 1 - ortools/routing/lp_scheduling.cc | 704 +++++---- ortools/routing/lp_scheduling.h | 98 +- ortools/routing/parameters.cc | 190 ++- ortools/routing/parameters.proto | 88 +- ortools/routing/parameters_utils.h | 1 + ortools/routing/routing.cc | 267 ++-- ortools/routing/routing.h | 267 +--- ortools/routing/samples/cvrptw_break.py | 1 + .../routing/samples/simple_routing_program.py | 1 + ortools/routing/samples/tsp_circuit_board.py | 1 + ortools/routing/samples/tsp_cities.py | 1 + .../routing/samples/tsp_distance_matrix.py | 1 + ortools/routing/samples/vrp.py | 1 + ortools/routing/samples/vrp_breaks.py | 1 + ortools/routing/samples/vrp_capacity.py | 1 + ortools/routing/samples/vrp_drop_nodes.py | 1 + ortools/routing/samples/vrp_global_span.py | 2 + ortools/routing/samples/vrp_initial_routes.py | 2 + ortools/routing/samples/vrp_node_max.py | 3 + .../routing/samples/vrp_pickup_delivery.py | 1 + .../samples/vrp_pickup_delivery_fifo.py | 1 + .../samples/vrp_pickup_delivery_lifo.py | 1 + ortools/routing/samples/vrp_resources.py | 1 + .../routing/samples/vrp_solution_callback.py | 3 + ortools/routing/samples/vrp_starts_ends.py | 1 + ortools/routing/samples/vrp_time_windows.py | 1 + ortools/routing/samples/vrp_tokens.py | 1 + .../routing/samples/vrp_with_time_limit.py | 1 + .../samples/vrptw_store_solution_data.py | 5 + ortools/routing/sat.cc | 2 +- ortools/routing/search.cc | 77 +- ortools/routing/search.h | 28 +- ortools/routing/types.h | 14 +- ortools/routing/utils.cc | 2 + 52 files changed, 2184 insertions(+), 2011 deletions(-) create mode 100644 ortools/routing/breaks.h create mode 100644 ortools/routing/filter_committables.cc create mode 100644 ortools/routing/heuristic_parameters.proto diff --git a/ortools/routing/BUILD.bazel b/ortools/routing/BUILD.bazel index 5d62812372..f17f7d0235 100644 --- a/ortools/routing/BUILD.bazel +++ b/ortools/routing/BUILD.bazel @@ -52,7 +52,10 @@ java_proto_library( proto_library( name = "ils_proto", srcs = ["ils.proto"], - deps = [":enums_proto"], + deps = [ + ":enums_proto", + ":heuristic_parameters_proto", + ], ) cc_proto_library( @@ -75,6 +78,7 @@ proto_library( srcs = ["parameters.proto"], deps = [ ":enums_proto", + ":heuristic_parameters_proto", ":ils_proto", "//ortools/constraint_solver:solver_parameters_proto", "//ortools/sat:sat_parameters_proto", @@ -109,6 +113,7 @@ cc_library( hdrs = ["parameters.h"], deps = [ ":enums_cc_proto", + ":heuristic_parameters_cc_proto", ":ils_cc_proto", ":parameters_cc_proto", "//ortools/base", @@ -122,6 +127,7 @@ cc_library( "//ortools/util:optional_boolean_cc_proto", "//ortools/util:testing_utils", "@abseil-cpp//absl/container:flat_hash_map", + "@abseil-cpp//absl/log", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/time", @@ -134,6 +140,7 @@ cc_library( srcs = ["parameters_utils.cc"], hdrs = ["parameters_utils.h"], deps = [ + ":heuristic_parameters_cc_proto", ":parameters_cc_proto", "//ortools/util:optional_boolean_cc_proto", "@abseil-cpp//absl/types:span", @@ -144,8 +151,8 @@ cc_library( name = "types", hdrs = ["types.h"], deps = [ - "//ortools/base:int_type", "//ortools/util:piecewise_linear_function", + "//ortools/util:strong_integers", ], ) @@ -191,10 +198,22 @@ cc_library( ], ) +cc_library( + name = "breaks", + srcs = ["breaks.cc"], + hdrs = ["breaks.h"], + deps = [ + ":filter_committables", + "//ortools/algorithms:binary_search", + "//ortools/util:saturated_arithmetic", + "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/types:span", + ], +) + cc_library( name = "routing", srcs = [ - "breaks.cc", "constraints.cc", "decision_builders.cc", "filters.cc", @@ -223,8 +242,10 @@ cc_library( "//conditions:default": [], }), deps = [ + ":breaks", ":enums_cc_proto", ":filter_committables", + ":heuristic_parameters_cc_proto", ":ils_cc_proto", ":index_manager", ":neighborhoods", @@ -235,7 +256,6 @@ cc_library( ":utils", "//ortools/base", "//ortools/base:dump_vars", - "//ortools/base:int_type", "//ortools/base:map_util", "//ortools/base:mathutil", "//ortools/base:protoutil", @@ -256,7 +276,7 @@ cc_library( "//ortools/port:proto_utils", "//ortools/sat:cp_model_cc_proto", "//ortools/sat:cp_model_solver", - "//ortools/sat:integer", + "//ortools/sat:integer_base", "//ortools/sat:lp_utils", "//ortools/sat:model", "//ortools/sat:sat_parameters_cc_proto", @@ -267,7 +287,6 @@ cc_library( "//ortools/util:range_minimum_query", "//ortools/util:range_query_function", "//ortools/util:saturated_arithmetic", - "//ortools/util:scheduling", "//ortools/util:sorted_interval_list", "//ortools/util:time_limit", "@abseil-cpp//absl/algorithm:container", @@ -295,6 +314,7 @@ cc_library( cc_library( name = "filter_committables", + srcs = ["filter_committables.cc"], hdrs = ["filter_committables.h"], deps = [ "//ortools/util:bitset", @@ -303,3 +323,23 @@ cc_library( "@abseil-cpp//absl/types:span", ], ) + +proto_library( + name = "heuristic_parameters_proto", + srcs = ["heuristic_parameters.proto"], +) + +java_proto_library( + name = "heuristic_parameters_java_proto", + deps = [":heuristic_parameters_proto"], +) + +cc_proto_library( + name = "heuristic_parameters_cc_proto", + deps = [":heuristic_parameters_proto"], +) + +py_proto_library( + name = "heuristic_parameters_py_pb2", + deps = [":heuristic_parameters_proto"], +) diff --git a/ortools/routing/breaks.cc b/ortools/routing/breaks.cc index e00fea29d0..85ba622b73 100644 --- a/ortools/routing/breaks.cc +++ b/ortools/routing/breaks.cc @@ -11,1079 +11,287 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "ortools/routing/breaks.h" + #include #include -#include #include -#include -#include -#include #include #include #include "absl/log/check.h" #include "absl/types/span.h" -#include "ortools/base/logging.h" -#include "ortools/base/types.h" -#include "ortools/constraint_solver/constraint_solver.h" -#include "ortools/constraint_solver/constraint_solveri.h" -#include "ortools/routing/filters.h" -#include "ortools/routing/routing.h" +#include "ortools/algorithms/binary_search.h" +#include "ortools/routing/filter_committables.h" #include "ortools/util/saturated_arithmetic.h" -#include "ortools/util/scheduling.h" -#include "ortools/util/sorted_interval_list.h" namespace operations_research::routing { -bool DisjunctivePropagator::Propagate(Tasks* tasks) { - DCHECK_LE(tasks->num_chain_tasks, tasks->start_min.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->start_max.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->duration_min.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->duration_max.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->end_min.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->end_max.size()); - DCHECK_EQ(tasks->start_min.size(), tasks->is_preemptible.size()); - // Do forward deductions, then backward deductions. - // All propagators are followed by Precedences(), - // except MirrorTasks() after which Precedences() would make no deductions, - // and DetectablePrecedencesWithChain() which is stronger than Precedences(). - // Precedences() is a propagator that does obvious deductions quickly (O(n)), - // so interleaving Precedences() speeds up the propagation fixed point. - if (!Precedences(tasks) || !EdgeFinding(tasks) || !Precedences(tasks) || - !DetectablePrecedencesWithChain(tasks)) { - return false; +// break_duration_on_transition_ is initialized with an upper bound on the +// number of transitions. +BreakPropagator::BreakPropagator(int num_nodes) + : break_duration_on_transition_(num_nodes, 0) {} + +BreakPropagator::PropagationResult BreakPropagator::FastPropagations( + int path, DimensionValues& dimension_values, + const PrePostVisitValues& visits) { + using VehicleBreak = DimensionValues::VehicleBreak; + const absl::Span vehicle_breaks = + absl::MakeSpan(dimension_values.MutableVehicleBreaks(path)); + if (vehicle_breaks.empty()) return kUnchanged; + const absl::Span cumuls = dimension_values.MutableCumuls(path); + const int num_cumuls = cumuls.size(); + const absl::Span travels = dimension_values.Travels(path); + const absl::Span travel_sums = + dimension_values.TravelSums(path); + const absl::Span pre_visits = visits.PreVisits(path); + const absl::Span post_visits = visits.PostVisits(path); + const auto visit_start_max = [cumuls, pre_visits](int c) -> int64_t { + return CapSub(cumuls[c].max, pre_visits[c]); + }; + const auto visit_end_min = [cumuls, post_visits](int c) -> int64_t { + return CapAdd(cumuls[c].min, post_visits[c]); + }; + const auto travel_slack = [cumuls, travels](int c) -> int64_t { + const int64_t slack = + CapSub(CapSub(cumuls[c + 1].max, cumuls[c].min), travels[c]); + DCHECK_GE(slack, 0); + return slack; + }; + PropagationResult result = kUnchanged; + // Propagations on cumuls are delayed to not break BinarySearch. + delayed_propagations_.clear(); + // When breaks must be performed inside the route, accumulate location as a + // min/max window and their total duration. + int breaks_window_min = num_cumuls; + int breaks_window_max = -1; + int64_t breaks_total_duration = 0; + break_duration_on_transition_.Revert(); + const int num_breaks = vehicle_breaks.size(); + for (VehicleBreak& br : vehicle_breaks) { + if (br.is_performed.min == 0) continue; + if (!IncreaseMin(CapSub(br.end.min, br.start.max), &br.duration, &result)) { + return result; + } + // Find largest c_min such that time windows prevent break br to end + // before cumul c_min. In that case, all visits up to and including c_min + // must be performed before the break. + int c_min = BinarySearch( + -1, num_cumuls, [&visit_start_max, break_end_min = br.end.min](int c) { + return visit_start_max(c) < break_end_min; + }); + if (c_min >= 0) { + while (c_min < num_cumuls - 1 && travel_slack(c_min) < br.duration.min) { + ++c_min; + } + if (!IncreaseMin(visit_end_min(c_min), &br.start, &result) || + !IncreaseMin(CapAdd(br.start.min, br.duration.min), &br.end, + &result)) { + return kInfeasible; + } + // The min break duration should fit, but in some cases (interbreaks) we + // may have br.start.min + br.duration.min < br.end.min. + // If br.end.min - br.start.min > travel_slack: + // - either the break is during the travel c_min -> c_min+1, + // in this case its duration is at most travel_slack, + // so that br.end.min - br.start <= travel_slack. + // Some of the travel c_min -> c_min+1 may be performed after the break. + // - or the break is after c_min+1, and all of the travel c_min -> c_min+1 + // is performed before the break: br.start >= cumul[c_min] + travel. + // We have to take the weaker of the two alternatives, the first one. + if (c_min < num_cumuls - 1 && + !IncreaseMin(CapSub(br.end.min, travel_slack(c_min)), &br.start, + &result)) { + return kInfeasible; + } + // Visit c_min must be before the break. + const int64_t cumul_ub = CapSub(br.start.max, post_visits[c_min]); + if (cumuls[c_min].min > cumul_ub) return kInfeasible; + delayed_propagations_.push_back( + {.value = cumul_ub, .index = c_min, .is_min = false}); + } + // Find smallest c_max such that time windows prevent break br to start + // after cumul c_max. In that case, all visits including and after c_max + // must be performed after the break. + int c_max = BinarySearch( + num_cumuls, -1, + [&visit_end_min, break_start_max = br.start.max](int c) { + return break_start_max < visit_end_min(c); + }); + if (c_max < num_cumuls) { + while (c_max > 0 && travel_slack(c_max - 1) < br.duration.min) --c_max; + if (!DecreaseMax(visit_start_max(c_max), &br.end, &result) || + !DecreaseMax(CapSub(br.end.max, br.duration.min), &br.start, + &result)) { + return kInfeasible; + } + // See the comment on the symmetric situation above. + if (c_max > 0 && + !DecreaseMax(CapAdd(br.start.max, travel_slack(c_max - 1)), &br.end, + &result)) { + return kInfeasible; + } + // Visit c_max must be after the break, delay to not break BinarySearch. + const int64_t cumul_lb = CapAdd(br.end.min, pre_visits[c_max]); + if (cumuls[c_max].max < cumul_lb) return kInfeasible; + delayed_propagations_.push_back( + {.value = cumul_lb, .index = c_max, .is_min = true}); + } + // If the break must be inside the route, it must be inside some cumul + // window, here [c_min, c_max]. + // Overload checking: if transit + break duration do not fit, the break is + // infeasible. + // Edge finding: if it fits, push cumuls c_min/c_max to leave enough room. + if (0 <= c_min && c_max < num_cumuls) { + const int64_t transit = CapAdd( + br.duration.min, CapSub(travel_sums[c_max], travel_sums[c_min])); + if (CapAdd(cumuls[c_min].min, transit) > cumuls[c_max].max) { + return kInfeasible; + } + delayed_propagations_.push_back( + {.value = CapAdd(cumuls[c_min].min, transit), + .index = c_max, + .is_min = true}); + delayed_propagations_.push_back( + {.value = CapSub(cumuls[c_max].max, transit), + .index = c_min, + .is_min = false}); + breaks_window_min = std::min(breaks_window_min, c_min); + breaks_window_max = std::max(breaks_window_max, c_max); + CapAddTo(br.duration.min, &breaks_total_duration); + // If this break is forced on the transition c_min -> c_min + 1, + // accumulate its duration to this transition. + if (num_breaks > 1 && c_min + 1 == c_max) { + const int64_t total_duration = break_duration_on_transition_.Get(c_min); + break_duration_on_transition_.Set( + c_min, CapAdd(total_duration, br.duration.min)); + } + } } - if (!tasks->forbidden_intervals.empty()) { - if (!ForbiddenIntervals(tasks) || !Precedences(tasks)) return false; + // After the previous loop, there are no BinarySearch() calls, so there is + // no need to delay propagations. + + // Per-transition reasoning: total break duration + travel must fit. + for (const int t : break_duration_on_transition_.ChangedIndices()) { + const int64_t total = + CapAdd(travels[t], break_duration_on_transition_.Get(t)); + if (!IncreaseMin(CapAdd(cumuls[t].min, total), &cumuls[t + 1], &result) || + !DecreaseMax(CapSub(cumuls[t + 1].max, total), &cumuls[t], &result)) { + return kInfeasible; + } } - if (!tasks->distance_duration.empty()) { - if (!DistanceDuration(tasks) || !Precedences(tasks)) return false; + // Overload checker reasoning on overall break window. + if (breaks_total_duration > 0) { + const int64_t window_transit = CapAdd( + breaks_total_duration, + CapSub(travel_sums[breaks_window_max], travel_sums[breaks_window_min])); + if (!IncreaseMin(CapAdd(cumuls[breaks_window_min].min, window_transit), + &cumuls[breaks_window_max], &result) || + !DecreaseMax(CapSub(cumuls[breaks_window_max].max, window_transit), + &cumuls[breaks_window_min], &result)) { + return kInfeasible; + } } - if (!MirrorTasks(tasks) || !EdgeFinding(tasks) || !Precedences(tasks) || - !DetectablePrecedencesWithChain(tasks) || !MirrorTasks(tasks)) { - return false; + for (const auto& [value, index, is_min] : delayed_propagations_) { + if (is_min && !IncreaseMin(value, &cumuls[index], &result)) { + return kInfeasible; + } + if (!is_min && !DecreaseMax(value, &cumuls[index], &result)) { + return kInfeasible; + } } - return true; + return result; } -bool DisjunctivePropagator::Precedences(Tasks* tasks) { - const int num_chain_tasks = tasks->num_chain_tasks; - if (num_chain_tasks > 0) { - // Propagate forwards. - int64_t time = tasks->start_min[0]; - for (int task = 0; task < num_chain_tasks; ++task) { - time = std::max(tasks->start_min[task], time); - tasks->start_min[task] = time; - time = CapAdd(time, tasks->duration_min[task]); - if (tasks->end_max[task] < time) return false; - time = std::max(time, tasks->end_min[task]); - tasks->end_min[task] = time; - } - // Propagate backwards. - time = tasks->end_max[num_chain_tasks - 1]; - for (int task = num_chain_tasks - 1; task >= 0; --task) { - time = std::min(tasks->end_max[task], time); - tasks->end_max[task] = time; - time = CapSub(time, tasks->duration_min[task]); - if (time < tasks->start_min[task]) return false; - time = std::min(time, tasks->start_max[task]); - tasks->start_max[task] = time; - } - } - const int num_tasks = tasks->start_min.size(); - for (int task = 0; task < num_tasks; ++task) { - // Enforce start + duration <= end. - tasks->end_min[task] = - std::max(tasks->end_min[task], - CapAdd(tasks->start_min[task], tasks->duration_min[task])); - tasks->start_max[task] = - std::min(tasks->start_max[task], - CapSub(tasks->end_max[task], tasks->duration_min[task])); - tasks->duration_max[task] = - std::min(tasks->duration_max[task], - CapSub(tasks->end_max[task], tasks->start_min[task])); - if (!tasks->is_preemptible[task]) { - // Enforce start + duration == end for nonpreemptibles. - tasks->end_max[task] = - std::min(tasks->end_max[task], - CapAdd(tasks->start_max[task], tasks->duration_max[task])); - tasks->start_min[task] = - std::max(tasks->start_min[task], - CapSub(tasks->end_min[task], tasks->duration_max[task])); - tasks->duration_min[task] = - std::max(tasks->duration_min[task], - CapSub(tasks->end_min[task], tasks->start_max[task])); - } - if (tasks->duration_min[task] > tasks->duration_max[task]) return false; - if (tasks->end_min[task] > tasks->end_max[task]) return false; - if (tasks->start_min[task] > tasks->start_max[task]) return false; - } - return true; -} - -bool DisjunctivePropagator::MirrorTasks(Tasks* tasks) { - const int num_tasks = tasks->start_min.size(); - // For all tasks, start_min := -end_max and end_max := -start_min. - for (int task = 0; task < num_tasks; ++task) { - const int64_t t = -tasks->start_min[task]; - tasks->start_min[task] = -tasks->end_max[task]; - tasks->end_max[task] = t; - } - // For all tasks, start_max := -end_min and end_min := -start_max. - for (int task = 0; task < num_tasks; ++task) { - const int64_t t = -tasks->start_max[task]; - tasks->start_max[task] = -tasks->end_min[task]; - tasks->end_min[task] = t; - } - // In the mirror problem, tasks linked by precedences are in reversed order. - const int num_chain_tasks = tasks->num_chain_tasks; - for (const auto it : - {tasks->start_min.begin(), tasks->start_max.begin(), - tasks->duration_min.begin(), tasks->duration_max.begin(), - tasks->end_min.begin(), tasks->end_max.begin()}) { - std::reverse(it, it + num_chain_tasks); - std::reverse(it + num_chain_tasks, it + num_tasks); - } - std::reverse(tasks->is_preemptible.begin(), - tasks->is_preemptible.begin() + num_chain_tasks); - std::reverse(tasks->is_preemptible.begin() + num_chain_tasks, - tasks->is_preemptible.begin() + num_tasks); - return true; -} - -bool DisjunctivePropagator::EdgeFinding(Tasks* tasks) { - const int num_tasks = tasks->start_min.size(); - // Prepare start_min events for tree. - tasks_by_start_min_.resize(num_tasks); - std::iota(tasks_by_start_min_.begin(), tasks_by_start_min_.end(), 0); - std::sort( - tasks_by_start_min_.begin(), tasks_by_start_min_.end(), - [&](int i, int j) { return tasks->start_min[i] < tasks->start_min[j]; }); - event_of_task_.resize(num_tasks); - for (int event = 0; event < num_tasks; ++event) { - event_of_task_[tasks_by_start_min_[event]] = event; - } - // Tasks will be browsed according to end_max order. - tasks_by_end_max_.resize(num_tasks); - std::iota(tasks_by_end_max_.begin(), tasks_by_end_max_.end(), 0); - std::sort( - tasks_by_end_max_.begin(), tasks_by_end_max_.end(), - [&](int i, int j) { return tasks->end_max[i] < tasks->end_max[j]; }); - - // Generic overload checking: insert tasks by end_max, - // fail if envelope > end_max. - theta_lambda_tree_.Reset(num_tasks); - for (const int task : tasks_by_end_max_) { - theta_lambda_tree_.AddOrUpdateEvent( - event_of_task_[task], tasks->start_min[task], tasks->duration_min[task], - tasks->duration_min[task]); - if (theta_lambda_tree_.GetEnvelope() > tasks->end_max[task]) { - return false; - } - } - - // Generic edge finding: from full set of tasks, at each end_max event in - // decreasing order, check lambda feasibility, then move end_max task from - // theta to lambda. - for (int i = num_tasks - 1; i >= 0; --i) { - const int task = tasks_by_end_max_[i]; - const int64_t envelope = theta_lambda_tree_.GetEnvelope(); - // If a nonpreemptible optional would overload end_max, push to envelope. - while (theta_lambda_tree_.GetOptionalEnvelope() > tasks->end_max[task]) { - int critical_event; // Dummy value. - int optional_event; - int64_t available_energy; // Dummy value. - theta_lambda_tree_.GetEventsWithOptionalEnvelopeGreaterThan( - tasks->end_max[task], &critical_event, &optional_event, - &available_energy); - const int optional_task = tasks_by_start_min_[optional_event]; - tasks->start_min[optional_task] = - std::max(tasks->start_min[optional_task], envelope); - theta_lambda_tree_.RemoveEvent(optional_event); - } - if (!tasks->is_preemptible[task]) { - theta_lambda_tree_.AddOrUpdateOptionalEvent(event_of_task_[task], - tasks->start_min[task], - tasks->duration_min[task]); - } else { - theta_lambda_tree_.RemoveEvent(event_of_task_[task]); - } - } - return true; -} - -bool DisjunctivePropagator::DetectablePrecedencesWithChain(Tasks* tasks) { - const int num_tasks = tasks->start_min.size(); - // Prepare start_min events for tree. - tasks_by_start_min_.resize(num_tasks); - std::iota(tasks_by_start_min_.begin(), tasks_by_start_min_.end(), 0); - std::sort( - tasks_by_start_min_.begin(), tasks_by_start_min_.end(), - [&](int i, int j) { return tasks->start_min[i] < tasks->start_min[j]; }); - event_of_task_.resize(num_tasks); - for (int event = 0; event < num_tasks; ++event) { - event_of_task_[tasks_by_start_min_[event]] = event; - } - theta_lambda_tree_.Reset(num_tasks); - - // Sort nonchain tasks by start max = end_max - duration_min. - const int num_chain_tasks = tasks->num_chain_tasks; - nonchain_tasks_by_start_max_.resize(num_tasks - num_chain_tasks); - std::iota(nonchain_tasks_by_start_max_.begin(), - nonchain_tasks_by_start_max_.end(), num_chain_tasks); - std::sort(nonchain_tasks_by_start_max_.begin(), - nonchain_tasks_by_start_max_.end(), [&tasks](int i, int j) { - return tasks->end_max[i] - tasks->duration_min[i] < - tasks->end_max[j] - tasks->duration_min[j]; - }); - - // Detectable precedences, specialized for routes: for every task on route, - // put all tasks before it in the tree, then push with envelope. - int index_nonchain = 0; - for (int i = 0; i < num_chain_tasks; ++i) { - if (!tasks->is_preemptible[i]) { - // Add all nonchain tasks detected before i. - while (index_nonchain < nonchain_tasks_by_start_max_.size()) { - const int task = nonchain_tasks_by_start_max_[index_nonchain]; - if (tasks->end_max[task] - tasks->duration_min[task] >= - tasks->start_min[i] + tasks->duration_min[i]) - break; - theta_lambda_tree_.AddOrUpdateEvent( - event_of_task_[task], tasks->start_min[task], - tasks->duration_min[task], tasks->duration_min[task]); - index_nonchain++; - } - } - // All chain and nonchain tasks before i are now in the tree, push i. - const int64_t new_start_min = theta_lambda_tree_.GetEnvelope(); - // Add i to the tree before updating it. - theta_lambda_tree_.AddOrUpdateEvent(event_of_task_[i], tasks->start_min[i], - tasks->duration_min[i], - tasks->duration_min[i]); - tasks->start_min[i] = std::max(tasks->start_min[i], new_start_min); - } - return true; -} - -bool DisjunctivePropagator::ForbiddenIntervals(Tasks* tasks) { - if (tasks->forbidden_intervals.empty()) return true; - const int num_tasks = tasks->start_min.size(); - for (int task = 0; task < num_tasks; ++task) { - if (tasks->duration_min[task] == 0) continue; - if (tasks->forbidden_intervals[task] == nullptr) continue; - // If start_min forbidden, push to next feasible value. - { - const auto& interval = - tasks->forbidden_intervals[task]->FirstIntervalGreaterOrEqual( - tasks->start_min[task]); - if (interval == tasks->forbidden_intervals[task]->end()) continue; - if (interval->start <= tasks->start_min[task]) { - tasks->start_min[task] = CapAdd(interval->end, 1); - } - } - // If end_max forbidden, push to next feasible value. - { - const int64_t start_max = - CapSub(tasks->end_max[task], tasks->duration_min[task]); - const auto& interval = - tasks->forbidden_intervals[task]->LastIntervalLessOrEqual(start_max); - if (interval == tasks->forbidden_intervals[task]->end()) continue; - if (interval->end >= start_max) { - tasks->end_max[task] = - CapAdd(interval->start, tasks->duration_min[task] - 1); - } - } - if (CapAdd(tasks->start_min[task], tasks->duration_min[task]) > - tasks->end_max[task]) { - return false; - } - } - return true; -} - -bool DisjunctivePropagator::DistanceDuration(Tasks* tasks) { - if (tasks->distance_duration.empty()) return true; - if (tasks->num_chain_tasks == 0) return true; - const int route_start = 0; - const int route_end = tasks->num_chain_tasks - 1; - const int num_tasks = tasks->start_min.size(); - for (int i = 0; i < tasks->distance_duration.size(); ++i) { - const int64_t max_distance = tasks->distance_duration[i].first; - const int64_t minimum_break_duration = tasks->distance_duration[i].second; - - // This is a sweeping algorithm that looks whether the union of intervals - // defined by breaks and route start/end is (-infty, +infty). - // Those intervals are: - // - route start: (-infty, start_max + distance] - // - route end: [end_min, +infty) - // - breaks: [start_min, end_max + distance) if their duration_max - // is >= min_duration, empty set otherwise. - // If sweeping finds that a time point can be covered by only one interval, - // it will force the corresponding break or route start/end to cover this - // point, which can force a break to be above minimum_break_duration. - - // We suppose break tasks are ordered, so the algorithm supposes that - // start_min(task_n) <= start_min(task_{n+1}) and - // end_max(task_n) <= end_max(task_{n+1}). - for (int task = tasks->num_chain_tasks + 1; task < num_tasks; ++task) { - tasks->start_min[task] = - std::max(tasks->start_min[task], tasks->start_min[task - 1]); - } - for (int task = num_tasks - 2; task >= tasks->num_chain_tasks; --task) { - tasks->end_max[task] = - std::min(tasks->end_max[task], tasks->end_max[task + 1]); - } - // Skip breaks that cannot be performed after start. - int index_break_by_emax = tasks->num_chain_tasks; - while (index_break_by_emax < num_tasks && - tasks->end_max[index_break_by_emax] <= tasks->end_min[route_start]) { - ++index_break_by_emax; - } - // Special case: no breaks after start. - if (index_break_by_emax == num_tasks) { - tasks->end_min[route_start] = - std::max(tasks->end_min[route_start], - CapSub(tasks->start_min[route_end], max_distance)); - tasks->start_max[route_end] = - std::min(tasks->start_max[route_end], - CapAdd(tasks->end_max[route_start], max_distance)); - continue; - } - // There will be a break after start, so route_start coverage is tested. - // Initial state: start at -inf with route_start in task_set. - // Sweep over profile, looking for time points where the number of - // covering breaks is <= 1. If it is 0, fail, otherwise force the - // unique break to cover it. - // Route start and end get a special treatment, not sure generalizing - // would be better. - int64_t xor_active_tasks = route_start; - int num_active_tasks = 1; - int64_t previous_time = std::numeric_limits::min(); - const int64_t route_start_time = - CapAdd(tasks->end_max[route_start], max_distance); - const int64_t route_end_time = tasks->start_min[route_end]; - // NOTE: all smin events must be closed by a corresponding emax event, - // otherwise num_active_tasks is wrong (too high) and the reasoning misses - // some filtering. - int index_break_by_smin = index_break_by_emax; - while (index_break_by_emax < num_tasks) { - // Find next time point among start/end of covering intervals. - int64_t current_time = - CapAdd(tasks->end_max[index_break_by_emax], max_distance); - if (index_break_by_smin < num_tasks) { - current_time = - std::min(current_time, tasks->start_min[index_break_by_smin]); - } - if (previous_time < route_start_time && route_start_time < current_time) { - current_time = route_start_time; - } - if (previous_time < route_end_time && route_end_time < current_time) { - current_time = route_end_time; - } - // If num_active_tasks was 1, the unique active task must cover from - // previous_time to current_time. - if (num_active_tasks == 1) { - // xor_active_tasks is the unique task that can cover [previous_time, - // current_time). - if (xor_active_tasks != route_end) { - tasks->end_min[xor_active_tasks] = - std::max(tasks->end_min[xor_active_tasks], - CapSub(current_time, max_distance)); - if (xor_active_tasks != route_start) { - tasks->duration_min[xor_active_tasks] = std::max( - tasks->duration_min[xor_active_tasks], - std::max( - minimum_break_duration, - CapSub(CapSub(current_time, max_distance), previous_time))); - } - } - } - // Process covering intervals that start or end at current_time. - while (index_break_by_smin < num_tasks && - current_time == tasks->start_min[index_break_by_smin]) { - if (tasks->duration_max[index_break_by_smin] >= - minimum_break_duration) { - xor_active_tasks ^= index_break_by_smin; - ++num_active_tasks; - } - ++index_break_by_smin; - } - while (index_break_by_emax < num_tasks && - current_time == - CapAdd(tasks->end_max[index_break_by_emax], max_distance)) { - if (tasks->duration_max[index_break_by_emax] >= - minimum_break_duration) { - xor_active_tasks ^= index_break_by_emax; - --num_active_tasks; - } - ++index_break_by_emax; - } - if (current_time == route_start_time) { - xor_active_tasks ^= route_start; - --num_active_tasks; - } - if (current_time == route_end_time) { - xor_active_tasks ^= route_end; - ++num_active_tasks; - } - // If num_active_tasks becomes 1, the unique active task must cover from - // current_time. - if (num_active_tasks <= 0) return false; - if (num_active_tasks == 1) { - if (xor_active_tasks != route_start) { - // xor_active_tasks is the unique task that can cover from - // current_time to the next time point. - tasks->start_max[xor_active_tasks] = - std::min(tasks->start_max[xor_active_tasks], current_time); - if (xor_active_tasks != route_end) { - tasks->duration_min[xor_active_tasks] = std::max( - tasks->duration_min[xor_active_tasks], minimum_break_duration); - } - } - } - previous_time = current_time; - } - } - return true; -} - -bool DisjunctivePropagator::ChainSpanMin(Tasks* tasks) { - const int num_chain_tasks = tasks->num_chain_tasks; - if (num_chain_tasks < 1) return true; - // TODO(user): add stronger bounds. - // The duration of the chain plus that of nonchain tasks that must be - // performed during the chain is a lower bound of the chain span. - { - int64_t sum_chain_durations = 0; - const auto duration_start = tasks->duration_min.begin(); - const auto duration_end = tasks->duration_min.begin() + num_chain_tasks; - for (auto it = duration_start; it != duration_end; ++it) { - sum_chain_durations = CapAdd(sum_chain_durations, *it); - } - int64_t sum_forced_nonchain_durations = 0; - for (int i = num_chain_tasks; i < tasks->start_min.size(); ++i) { - // Tasks that can be executed before or after are skipped. - if (tasks->end_min[i] <= tasks->start_max[0] || - tasks->end_min[num_chain_tasks - 1] <= tasks->start_max[i]) { +// Add interbreak reasoning. +BreakPropagator::PropagationResult BreakPropagator::PropagateInterbreak( + int path, DimensionValues& dimension, + absl::Span> interbreaks) { + PropagationResult result = kUnchanged; + absl::Span cumuls = dimension.MutableCumuls(path); + std::vector& vehicle_breaks = + dimension.MutableVehicleBreaks(path); + // We use fake breaks for start/end of path: + // - start break: [kint64min, cumul[0]) + // - end break: [cumul[n-1], kint64max). + const int64_t kint64min = std::numeric_limits::min(); + const int64_t kint64max = std::numeric_limits::max(); + vehicle_breaks.push_back({.start = {kint64min, kint64min}, + .end = cumuls.front(), + .duration = {0, kint64max}, + .is_performed = {1, 1}}); + vehicle_breaks.push_back({.start = cumuls.back(), + .end = {kint64max, kint64max}, + .duration = {0, kint64max}, + .is_performed = {1, 1}}); + const int num_breaks = vehicle_breaks.size(); + for (const auto [limit, min_break_duration] : interbreaks) { + // Generate and sort events by increasing time. Events have to be + // regenerated for each interbreak, because end events depend on the limit. + usage_events_.clear(); + for (int i = 0; i < num_breaks; ++i) { + const auto& br = vehicle_breaks[i]; + if (br.is_performed.max == 0 || br.duration.max < min_break_duration) { continue; } - sum_forced_nonchain_durations = - CapAdd(sum_forced_nonchain_durations, tasks->duration_min[i]); + usage_events_.push_back( + {.time = br.start.min, .index = i, .is_start = true}); + usage_events_.push_back( + {.time = CapAdd(br.end.max, limit), .index = i, .is_start = false}); } - tasks->span_min = - std::max(tasks->span_min, - CapAdd(sum_chain_durations, sum_forced_nonchain_durations)); - } - // The difference end of the chain - start of the chain is a lower bound. - { - const int64_t end_minus_start = - CapSub(tasks->end_min[num_chain_tasks - 1], tasks->start_max[0]); - tasks->span_min = std::max(tasks->span_min, end_minus_start); - } - - return tasks->span_min <= tasks->span_max; -} - -// Computes a lower bound of the span of the chain, taking into account only -// the first nonchain task. -// TODO(user): extend to arbitrary number of nonchain tasks. -bool DisjunctivePropagator::ChainSpanMinDynamic(Tasks* tasks) { - // Do nothing if there are no chain tasks or no nonchain tasks. - const int num_chain_tasks = tasks->num_chain_tasks; - if (num_chain_tasks < 1) return true; - if (num_chain_tasks == tasks->start_min.size()) return true; - const int task_index = num_chain_tasks; - if (!Precedences(tasks)) return false; - const int64_t min_possible_chain_end = tasks->end_min[num_chain_tasks - 1]; - const int64_t max_possible_chain_start = tasks->start_max[0]; - // For each chain task i, compute cumulated duration of chain tasks before it. - int64_t total_duration = 0; - { - total_duration_before_.resize(num_chain_tasks); - for (int i = 0; i < num_chain_tasks; ++i) { - total_duration_before_[i] = total_duration; - total_duration = CapAdd(total_duration, tasks->duration_min[i]); - } - } - // Estimate span min of chain tasks. Use the schedule that ends at - // min_possible_chain_end and starts at smallest of start_max[0] or the - // threshold where pushing start[0] later does not make a difference to the - // chain span because of chain precedence constraints, - // i.e. min_possible_chain_end - total_duration. - { - const int64_t chain_span_min = - min_possible_chain_end - - std::min(tasks->start_max[0], min_possible_chain_end - total_duration); - if (chain_span_min > tasks->span_max) { - return false; - } else { - tasks->span_min = std::max(tasks->span_min, chain_span_min); - } - // If task can be performed before or after the chain, - // span_min is chain_span_min. - if (tasks->end_min[task_index] <= tasks->start_max[0] || - tasks->end_min[num_chain_tasks - 1] <= tasks->start_max[task_index]) { - return true; - } - } - // Scan all possible preemption positions of the nontask chain, - // keep the one that yields the minimum span. - int64_t span_min = std::numeric_limits::max(); - bool schedule_is_feasible = false; - for (int i = 0; i < num_chain_tasks; ++i) { - if (!tasks->is_preemptible[i]) continue; - // Estimate span min if tasks is performed during i. - // For all possible minimal-span schedules, there is a schedule where task i - // and nonchain task form a single block. Thus, we only consider those. - const int64_t block_start_min = - std::max(tasks->start_min[i], - tasks->start_min[task_index] - tasks->duration_min[i]); - const int64_t block_start_max = - std::min(tasks->start_max[task_index], - tasks->start_max[i] - tasks->duration_min[task_index]); - if (block_start_min > block_start_max) continue; - - // Compute the block start that yields the minimal span. - // Given a feasible block start, a chain of minimum span constrained to - // this particular block start can be obtained by scheduling all tasks after - // the block at their earliest, and all tasks before it at their latest. - // The span can be decomposed into two parts: the head, which are the - // tasks that are before the block, and the tail, which are the block and - // the tasks after it. - // When the block start varies, the head length of the optimal schedule - // described above decreases as much as the block start decreases, until - // an inflection point at which it stays constant. That inflection value - // is the one where the precedence constraints force the chain start to - // decrease because of durations. - const int64_t head_inflection = - max_possible_chain_start + total_duration_before_[i]; - // The map from block start to minimal tail length also has an inflection - // point, that additionally depends on the nonchain task's duration. - const int64_t tail_inflection = - min_possible_chain_end - (total_duration - total_duration_before_[i]) - - tasks->duration_min[task_index]; - // All block start values between these two yield the same minimal span. - // Indeed, first, mind that the inflection points might be in any order. - // - if head_inflection < tail_inflection, then inside the interval - // [head_inflection, tail_inflection], increasing the block start by delta - // decreases the tail length by delta and increases the head length by - // delta too. - // - if tail_inflection < head_inflection, then inside the interval - // [tail_inflection, head_inflection], head length is constantly at - // total_duration_before_[i], and tail length is also constant. - // In both cases, outside of the interval, one part is constant and the - // other increases as much as the distance to the interval. - // We can abstract inflection point to the interval they form. - const int64_t optimal_interval_min_start = - std::min(head_inflection, tail_inflection); - const int64_t optimal_interval_max_start = - std::max(head_inflection, tail_inflection); - // If the optimal interval for block start intersects the feasible interval, - // we can select any point within it, for instance the earliest one. - int64_t block_start = std::max(optimal_interval_min_start, block_start_min); - // If the intervals do not intersect, the feasible value closest to the - // optimal interval has the minimal span, because the span increases as - // much as the distance to the optimal interval. - if (optimal_interval_max_start < block_start_min) { - // Optimal interval is before feasible interval, closest is feasible min. - block_start = block_start_min; - } else if (block_start_max < optimal_interval_min_start) { - // Optimal interval is after feasible interval, closest is feasible max. - block_start = block_start_max; - } - // Compute span for the chosen block start. - const int64_t head_duration = - std::max(block_start, head_inflection) - max_possible_chain_start; - const int64_t tail_duration = - min_possible_chain_end - std::min(block_start, tail_inflection); - const int64_t optimal_span_at_i = head_duration + tail_duration; - span_min = std::min(span_min, optimal_span_at_i); - schedule_is_feasible = true; - } - if (!schedule_is_feasible || span_min > tasks->span_max) { - return false; - } else { - tasks->span_min = std::max(tasks->span_min, span_min); - return true; - } -} - -void AppendTasksFromPath(absl::Span path, - const TravelBounds& travel_bounds, - const RoutingDimension& dimension, - DisjunctivePropagator::Tasks* tasks) { - const int num_nodes = path.size(); - DCHECK_EQ(travel_bounds.pre_travels.size(), num_nodes - 1); - DCHECK_EQ(travel_bounds.post_travels.size(), num_nodes - 1); - for (int i = 0; i < num_nodes; ++i) { - const int64_t cumul_min = dimension.CumulVar(path[i])->Min(); - const int64_t cumul_max = dimension.CumulVar(path[i])->Max(); - // Add task associated to visit i. - // Visits start at Cumul(path[i]) - before_visit - // and end at Cumul(path[i]) + after_visit - { - const int64_t before_visit = - (i == 0) ? 0 : travel_bounds.post_travels[i - 1]; - const int64_t after_visit = - (i == num_nodes - 1) ? 0 : travel_bounds.pre_travels[i]; - - tasks->start_min.push_back(CapSub(cumul_min, before_visit)); - tasks->start_max.push_back(CapSub(cumul_max, before_visit)); - tasks->duration_min.push_back(CapAdd(before_visit, after_visit)); - tasks->duration_max.push_back(CapAdd(before_visit, after_visit)); - tasks->end_min.push_back(CapAdd(cumul_min, after_visit)); - tasks->end_max.push_back(CapAdd(cumul_max, after_visit)); - tasks->is_preemptible.push_back(false); - } - if (i == num_nodes - 1) break; - - // Tasks from travels. - // A travel task starts at Cumul(path[i]) + pre_travel, - // last for FixedTransitVar(path[i]) - pre_travel - post_travel, - // and must end at the latest at Cumul(path[i+1]) - post_travel. - { - const int64_t pre_travel = travel_bounds.pre_travels[i]; - const int64_t post_travel = travel_bounds.post_travels[i]; - tasks->start_min.push_back(CapAdd(cumul_min, pre_travel)); - tasks->start_max.push_back(CapAdd(cumul_max, pre_travel)); - tasks->duration_min.push_back( - std::max(0, CapSub(travel_bounds.min_travels[i], - CapAdd(pre_travel, post_travel)))); - tasks->duration_max.push_back( - travel_bounds.max_travels[i] == std::numeric_limits::max() - ? std::numeric_limits::max() - : std::max(0, CapSub(travel_bounds.max_travels[i], - CapAdd(pre_travel, post_travel)))); - tasks->end_min.push_back( - CapSub(dimension.CumulVar(path[i + 1])->Min(), post_travel)); - tasks->end_max.push_back( - CapSub(dimension.CumulVar(path[i + 1])->Max(), post_travel)); - tasks->is_preemptible.push_back(true); - } - } -} - -void FillTravelBoundsOfVehicle(int vehicle, absl::Span path, - const RoutingDimension& dimension, - TravelBounds* travel_bounds) { - // Fill path and min/max/pre/post travel bounds. - FillPathEvaluation(path, dimension.transit_evaluator(vehicle), - &travel_bounds->min_travels); - const int num_travels = travel_bounds->min_travels.size(); - travel_bounds->max_travels.assign(num_travels, - std::numeric_limits::max()); - { - const int index = dimension.GetPreTravelEvaluatorOfVehicle(vehicle); - if (index == -1) { - travel_bounds->pre_travels.assign(num_travels, 0); - } else { - FillPathEvaluation(path, dimension.model()->TransitCallback(index), - &travel_bounds->pre_travels); - } - } - { - const int index = dimension.GetPostTravelEvaluatorOfVehicle(vehicle); - if (index == -1) { - travel_bounds->post_travels.assign(num_travels, 0); - } else { - FillPathEvaluation(path, dimension.model()->TransitCallback(index), - &travel_bounds->post_travels); - } - } -} - -void AppendTasksFromIntervals(const std::vector& intervals, - DisjunctivePropagator::Tasks* tasks) { - for (IntervalVar* interval : intervals) { - if (!interval->MustBePerformed()) continue; - tasks->start_min.push_back(interval->StartMin()); - tasks->start_max.push_back(interval->StartMax()); - tasks->duration_min.push_back(interval->DurationMin()); - tasks->duration_max.push_back(interval->DurationMax()); - tasks->end_min.push_back(interval->EndMin()); - tasks->end_max.push_back(interval->EndMax()); - tasks->is_preemptible.push_back(false); - } -} - -GlobalVehicleBreaksConstraint::GlobalVehicleBreaksConstraint( - const RoutingDimension* dimension) - : Constraint(dimension->model()->solver()), - model_(dimension->model()), - dimension_(dimension) { - vehicle_demons_.resize(model_->vehicles()); -} - -void GlobalVehicleBreaksConstraint::Post() { - for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) { - if (dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() && - dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) { - continue; - } - vehicle_demons_[vehicle] = MakeDelayedConstraintDemon1( - solver(), this, &GlobalVehicleBreaksConstraint::PropagateVehicle, - "PropagateVehicle", vehicle); - for (IntervalVar* interval : - dimension_->GetBreakIntervalsOfVehicle(vehicle)) { - interval->WhenAnything(vehicle_demons_[vehicle]); - } - } - const int num_cumuls = dimension_->cumuls().size(); - const int num_nexts = model_->Nexts().size(); - for (int node = 0; node < num_cumuls; node++) { - Demon* dimension_demon = MakeConstraintDemon1( - solver(), this, &GlobalVehicleBreaksConstraint::PropagateNode, - "PropagateNode", node); - if (node < num_nexts) { - model_->NextVar(node)->WhenBound(dimension_demon); - dimension_->SlackVar(node)->WhenRange(dimension_demon); - } - model_->VehicleVar(node)->WhenBound(dimension_demon); - dimension_->CumulVar(node)->WhenRange(dimension_demon); - } -} - -void GlobalVehicleBreaksConstraint::InitialPropagate() { - for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) { - if (!dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() || - !dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) { - PropagateVehicle(vehicle); - } - } -} - -// This dispatches node events to the right vehicle propagator. -// It also filters out a part of uninteresting events, on which the vehicle -// propagator will not find anything new. -void GlobalVehicleBreaksConstraint::PropagateNode(int node) { - if (!model_->VehicleVar(node)->Bound()) return; - const int vehicle = model_->VehicleVar(node)->Min(); - if (vehicle < 0 || vehicle_demons_[vehicle] == nullptr) return; - EnqueueDelayedDemon(vehicle_demons_[vehicle]); -} - -void GlobalVehicleBreaksConstraint::FillPartialPathOfVehicle(int vehicle) { - path_.clear(); - int current = model_->Start(vehicle); - while (!model_->IsEnd(current)) { - path_.push_back(current); - current = model_->NextVar(current)->Bound() - ? model_->NextVar(current)->Min() - : model_->End(vehicle); - } - path_.push_back(current); -} - -void GlobalVehicleBreaksConstraint::FillPathTravels( - absl::Span path) { - const int num_travels = path.size() - 1; - travel_bounds_.min_travels.resize(num_travels); - travel_bounds_.max_travels.resize(num_travels); - for (int i = 0; i < num_travels; ++i) { - travel_bounds_.min_travels[i] = dimension_->FixedTransitVar(path[i])->Min(); - travel_bounds_.max_travels[i] = dimension_->FixedTransitVar(path[i])->Max(); - } -} - -// First, perform energy-based reasoning on intervals and cumul variables. -// Then, perform reasoning on slack variables. -void GlobalVehicleBreaksConstraint::PropagateVehicle(int vehicle) { - // Fill path and pre/post travel information. - FillPartialPathOfVehicle(vehicle); - const int num_nodes = path_.size(); - FillPathTravels(path_); - { - const int index = dimension_->GetPreTravelEvaluatorOfVehicle(vehicle); - if (index == -1) { - travel_bounds_.pre_travels.assign(num_nodes - 1, 0); - } else { - FillPathEvaluation(path_, model_->TransitCallback(index), - &travel_bounds_.pre_travels); - } - } - { - const int index = dimension_->GetPostTravelEvaluatorOfVehicle(vehicle); - if (index == -1) { - travel_bounds_.post_travels.assign(num_nodes - 1, 0); - } else { - FillPathEvaluation(path_, model_->TransitCallback(index), - &travel_bounds_.post_travels); - } - } - // The last travel might not be fixed: in that case, relax its information. - if (!model_->NextVar(path_[num_nodes - 2])->Bound()) { - travel_bounds_.min_travels.back() = 0; - travel_bounds_.max_travels.back() = std::numeric_limits::max(); - travel_bounds_.pre_travels.back() = 0; - travel_bounds_.post_travels.back() = 0; - } - - // Fill tasks from path, break intervals, and break constraints. - tasks_.Clear(); - AppendTasksFromPath(path_, travel_bounds_, *dimension_, &tasks_); - tasks_.num_chain_tasks = tasks_.start_min.size(); - AppendTasksFromIntervals(dimension_->GetBreakIntervalsOfVehicle(vehicle), - &tasks_); - tasks_.distance_duration = - dimension_->GetBreakDistanceDurationOfVehicle(vehicle); - - // Do the actual reasoning, no need to continue if infeasible. - if (!disjunctive_propagator_.Propagate(&tasks_)) solver()->Fail(); - - // Make task translators to help set new bounds of CP variables. - task_translators_.clear(); - for (int i = 0; i < num_nodes; ++i) { - const int64_t before_visit = - (i == 0) ? 0 : travel_bounds_.post_travels[i - 1]; - const int64_t after_visit = - (i == num_nodes - 1) ? 0 : travel_bounds_.pre_travels[i]; - task_translators_.emplace_back(dimension_->CumulVar(path_[i]), before_visit, - after_visit); - if (i == num_nodes - 1) break; - task_translators_.emplace_back(); // Dummy translator for travel tasks. - } - for (IntervalVar* interval : - dimension_->GetBreakIntervalsOfVehicle(vehicle)) { - if (!interval->MustBePerformed()) continue; - task_translators_.emplace_back(interval); - } - - // Push new bounds to CP variables. - const int num_tasks = tasks_.start_min.size(); - for (int task = 0; task < num_tasks; ++task) { - task_translators_[task].SetStartMin(tasks_.start_min[task]); - task_translators_[task].SetStartMax(tasks_.start_max[task]); - task_translators_[task].SetDurationMin(tasks_.duration_min[task]); - task_translators_[task].SetEndMin(tasks_.end_min[task]); - task_translators_[task].SetEndMax(tasks_.end_max[task]); - } - - // Reasoning on slack variables: when intervals must be inside an arc, - // that arc's slack must be large enough to accommodate for those. - // TODO(user): Make a version more efficient than O(n^2). - if (dimension_->GetBreakIntervalsOfVehicle(vehicle).empty()) return; - // If the last arc of the path was not bound, do not change slack. - const int64_t last_bound_arc = - num_nodes - 2 - (model_->NextVar(path_[num_nodes - 2])->Bound() ? 0 : 1); - for (int i = 0; i <= last_bound_arc; ++i) { - const int64_t arc_start_max = - CapSub(dimension_->CumulVar(path_[i])->Max(), - i > 0 ? travel_bounds_.post_travels[i - 1] : 0); - const int64_t arc_end_min = - CapAdd(dimension_->CumulVar(path_[i + 1])->Min(), - i < num_nodes - 2 ? travel_bounds_.pre_travels[i + 1] : 0); - int64_t total_break_inside_arc = 0; - for (IntervalVar* interval : - dimension_->GetBreakIntervalsOfVehicle(vehicle)) { - if (!interval->MustBePerformed()) continue; - const int64_t interval_start_max = interval->StartMax(); - const int64_t interval_end_min = interval->EndMin(); - const int64_t interval_duration_min = interval->DurationMin(); - // If interval cannot end before the arc's from node and - // cannot start after the 'to' node, then it must be inside the arc. - if (arc_start_max < interval_end_min && - interval_start_max < arc_end_min) { - total_break_inside_arc += interval_duration_min; - } - } - dimension_->SlackVar(path_[i])->SetMin(total_break_inside_arc); - } - // Reasoning on optional intervals. - // TODO(user): merge this with energy-based reasoning. - // If there is no optional interval, skip the rest of this function. - { - bool has_optional = false; - for (const IntervalVar* interval : - dimension_->GetBreakIntervalsOfVehicle(vehicle)) { - if (interval->MayBePerformed() && !interval->MustBePerformed()) { - has_optional = true; - break; - } - } - if (!has_optional) return; - } - const std::vector& break_intervals = - dimension_->GetBreakIntervalsOfVehicle(vehicle); - for (int pos = 0; pos < num_nodes - 1; ++pos) { - const int64_t current_slack_max = dimension_->SlackVar(path_[pos])->Max(); - const int64_t visit_start_offset = - pos > 0 ? travel_bounds_.post_travels[pos - 1] : 0; - const int64_t visit_start_max = - CapSub(dimension_->CumulVar(path_[pos])->Max(), visit_start_offset); - const int64_t visit_end_offset = - (pos < num_nodes - 1) ? travel_bounds_.pre_travels[pos] : 0; - const int64_t visit_end_min = - CapAdd(dimension_->CumulVar(path_[pos])->Min(), visit_end_offset); - - for (IntervalVar* interval : break_intervals) { - if (!interval->MayBePerformed()) continue; - const bool interval_is_performed = interval->MustBePerformed(); - const int64_t interval_start_max = interval->StartMax(); - const int64_t interval_end_min = interval->EndMin(); - const int64_t interval_duration_min = interval->DurationMin(); - // When interval cannot fit inside current arc, - // do disjunctive reasoning on full arc. - if (pos < num_nodes - 1 && interval_duration_min > current_slack_max) { - // The arc lasts from CumulVar(path_[pos]) - post_travel_[pos] to - // CumulVar(path_[pos+1]) + pre_travel_[pos+1]. - const int64_t arc_start_offset = - pos > 0 ? travel_bounds_.post_travels[pos - 1] : 0; - const int64_t arc_start_max = visit_start_max; - const int64_t arc_end_offset = - (pos < num_nodes - 2) ? travel_bounds_.pre_travels[pos + 1] : 0; - const int64_t arc_end_min = - CapAdd(dimension_->CumulVar(path_[pos + 1])->Min(), arc_end_offset); - // Interval not before. - if (arc_start_max < interval_end_min) { - interval->SetStartMin(arc_end_min); - if (interval_is_performed) { - dimension_->CumulVar(path_[pos + 1]) - ->SetMax(CapSub(interval_start_max, arc_end_offset)); + std::sort(usage_events_.begin(), usage_events_.end()); + // Main loop: sweep over events, maintain max profile height. + // When sweeping over time, we cross some time intervals of duration > 0: + // - if profile height is 0, no break can cover the interval. Infeasible. + // - if profile height is 1, the only active break must cover the interval. + // When num_active_breaks == 1, the xor of all active breaks is the only + // active break. + int num_active_breaks = 0; + int xor_active_breaks = 0; + int64_t previous_time = kint64min; + for (const UsageEvent& event : usage_events_) { + if (event.time != previous_time) { + DCHECK_GT(event.time, previous_time); + // Time changed: check covering condition. + if (num_active_breaks == 0) return kInfeasible; + if (num_active_breaks == 1) { + VehicleBreak& br = vehicle_breaks[xor_active_breaks]; + const int64_t new_start_max = + std::min(previous_time, CapSub(br.end.max, min_break_duration)); + const int64_t new_end_min = + std::max(CapSub(event.time, limit), + CapAdd(br.start.min, min_break_duration)); + if (!DecreaseMax(new_start_max, &br.start, &result) || + !IncreaseMin(new_end_min, &br.end, &result)) { + return kInfeasible; + } + if (xor_active_breaks < num_breaks - 2) { + const int64_t new_duration_min = std::max( + min_break_duration, CapSub(new_end_min, new_start_max)); + if (!IncreaseMin(1, &br.is_performed, &result) || + !IncreaseMin(new_duration_min, &br.duration, &result)) { + return kInfeasible; + } } } - // Interval not after. - if (interval_start_max < arc_end_min) { - interval->SetEndMax(arc_start_max); - if (interval_is_performed) { - dimension_->CumulVar(path_[pos]) - ->SetMin(CapAdd(interval_end_min, arc_start_offset)); - } - } - continue; - } - // Interval could fit inside arc: do disjunctive reasoning between - // interval and visit. - // Interval not before. - if (visit_start_max < interval_end_min) { - interval->SetStartMin(visit_end_min); - if (interval_is_performed) { - dimension_->CumulVar(path_[pos]) - ->SetMax(CapSub(interval_start_max, visit_end_offset)); - } - } - // Interval not after. - if (interval_start_max < visit_end_min) { - interval->SetEndMax(visit_start_max); - if (interval_is_performed) { - dimension_->CumulVar(path_[pos]) - ->SetMin(CapAdd(interval_end_min, visit_start_offset)); - } } + // Update the set of active intervals. + num_active_breaks += event.is_start ? 1 : -1; + xor_active_breaks ^= event.index; + previous_time = event.time; + } + // Propagate fake start/end information to actual start/end. + const Interval& new_start = vehicle_breaks[num_breaks - 2].end; + const Interval& new_end = vehicle_breaks[num_breaks - 1].start; + if (!IntersectWith(new_start, &cumuls.front(), &result) || + !IntersectWith(new_end, &cumuls.back(), &result)) { + vehicle_breaks.resize(num_breaks - 2); + return kInfeasible; } } -} - -namespace { -class VehicleBreaksFilter : public BasePathFilter { - public: - VehicleBreaksFilter(const RoutingModel& routing_model, - const RoutingDimension& dimension); - std::string DebugString() const override { return "VehicleBreaksFilter"; } - bool AcceptPath(int64_t path_start, int64_t chain_start, - int64_t chain_end) override; - - private: - // Fills path_ with the path of vehicle, start to end. - void FillPathOfVehicle(int64_t vehicle); - std::vector path_; - // Handles to model. - const RoutingModel& model_; - const RoutingDimension& dimension_; - // Strong energy-based filtering algorithm. - DisjunctivePropagator disjunctive_propagator_; - DisjunctivePropagator::Tasks tasks_; - // Used to check whether propagation changed a vector. - std::vector old_start_min_; - std::vector old_start_max_; - std::vector old_end_min_; - std::vector old_end_max_; - - std::vector start_to_vehicle_; - TravelBounds travel_bounds_; -}; - -VehicleBreaksFilter::VehicleBreaksFilter(const RoutingModel& routing_model, - const RoutingDimension& dimension) - : BasePathFilter(routing_model.Nexts(), - routing_model.Size() + routing_model.vehicles(), - routing_model.GetPathsMetadata()), - model_(routing_model), - dimension_(dimension) { - DCHECK(dimension_.HasBreakConstraints()); - start_to_vehicle_.resize(Size(), -1); - for (int i = 0; i < routing_model.vehicles(); ++i) { - start_to_vehicle_[routing_model.Start(i)] = i; - } -} - -void VehicleBreaksFilter::FillPathOfVehicle(int64_t vehicle) { - path_.clear(); - int current = model_.Start(vehicle); - while (!model_.IsEnd(current)) { - path_.push_back(current); - current = GetNext(current); - } - path_.push_back(current); -} - -bool VehicleBreaksFilter::AcceptPath(int64_t path_start, int64_t chain_start, - int64_t chain_end) { - const int vehicle = start_to_vehicle_[path_start]; - if (dimension_.GetBreakIntervalsOfVehicle(vehicle).empty() && - dimension_.GetBreakDistanceDurationOfVehicle(vehicle).empty()) { - return true; - } - // Fill path and pre/post travel information. - FillPathOfVehicle(vehicle); - FillTravelBoundsOfVehicle(vehicle, path_, dimension_, &travel_bounds_); - // Fill tasks from path, forbidden intervals, breaks and break constraints. - tasks_.Clear(); - AppendTasksFromPath(path_, travel_bounds_, dimension_, &tasks_); - tasks_.num_chain_tasks = tasks_.start_min.size(); - AppendTasksFromIntervals(dimension_.GetBreakIntervalsOfVehicle(vehicle), - &tasks_); - // Add forbidden intervals only if a node has some. - tasks_.forbidden_intervals.clear(); - if (std::any_of(path_.begin(), path_.end(), [this](int64_t node) { - return dimension_.forbidden_intervals()[node].NumIntervals() > 0; - })) { - tasks_.forbidden_intervals.assign(tasks_.start_min.size(), nullptr); - for (int i = 0; i < path_.size(); ++i) { - tasks_.forbidden_intervals[2 * i] = - &(dimension_.forbidden_intervals()[path_[i]]); - } - } - // Max distance duration constraint. - tasks_.distance_duration = - dimension_.GetBreakDistanceDurationOfVehicle(vehicle); - - // Reduce bounds until failure or fixed point is reached. - // We set a maximum amount of iterations to avoid slow propagation. - bool is_feasible = true; - int maximum_num_iterations = 8; - while (--maximum_num_iterations >= 0) { - old_start_min_ = tasks_.start_min; - old_start_max_ = tasks_.start_max; - old_end_min_ = tasks_.end_min; - old_end_max_ = tasks_.end_max; - is_feasible = disjunctive_propagator_.Propagate(&tasks_); - if (!is_feasible) break; - // If fixed point reached, stop. - if ((old_start_min_ == tasks_.start_min) && - (old_start_max_ == tasks_.start_max) && - (old_end_min_ == tasks_.end_min) && (old_end_max_ == tasks_.end_max)) { - break; - } - } - return is_feasible; -} - -} // namespace - -IntVarLocalSearchFilter* MakeVehicleBreaksFilter( - const RoutingModel& routing_model, const RoutingDimension& dimension) { - return routing_model.solver()->RevAlloc( - new VehicleBreaksFilter(routing_model, dimension)); + // Remove fake path start/end breaks. + vehicle_breaks.resize(num_breaks - 2); + return result; } } // namespace operations_research::routing diff --git a/ortools/routing/breaks.h b/ortools/routing/breaks.h new file mode 100644 index 0000000000..8331572c1f --- /dev/null +++ b/ortools/routing/breaks.h @@ -0,0 +1,110 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_ROUTING_BREAKS_H_ +#define OR_TOOLS_ROUTING_BREAKS_H_ + +#include +#include +#include + +#include "absl/types/span.h" +#include "ortools/routing/filter_committables.h" + +namespace operations_research::routing { + +class BreakPropagator { + public: + explicit BreakPropagator(int num_nodes); + + // Result of a propagation: kInfeasible means some infeasibility was found, + // kChanged means that the propagation tightened the bounds of some intervals, + // kUnchanged means that the propagation did not change anything. + enum class PropagationResult { kInfeasible, kChanged, kUnchanged }; + // TODO(user): when the OSS version is at C++20, replace this by + // using enum PropagationResult; + static constexpr PropagationResult kInfeasible = + PropagationResult::kInfeasible; + static constexpr PropagationResult kChanged = PropagationResult::kChanged; + static constexpr PropagationResult kUnchanged = PropagationResult::kUnchanged; + + // Applies fast propagations, O(log |path|) per break, to the given path. + PropagationResult FastPropagations(int path, + DimensionValues& dimension_values, + const PrePostVisitValues& visits); + // Propagates interbreak rules on a given path, with a covering reasoning. + // Each interbreak is a pair (interbreak_limit, min_break_duration). + PropagationResult PropagateInterbreak( + int path, DimensionValues& dimension, + absl::Span> interbreaks); + + private: + using Interval = DimensionValues::Interval; + using VehicleBreak = DimensionValues::VehicleBreak; + + static bool IncreaseMin(int64_t new_min, Interval* interval, + PropagationResult* propagation_result) { + if (interval->min >= new_min) return true; + if (!interval->IncreaseMin(new_min)) { + *propagation_result = kInfeasible; + return false; + } + *propagation_result = kChanged; + return true; + } + static bool DecreaseMax(int64_t new_max, Interval* interval, + PropagationResult* propagation_result) { + if (interval->max <= new_max) return true; + if (!interval->DecreaseMax(new_max)) { + *propagation_result = kInfeasible; + return false; + } + *propagation_result = kChanged; + return true; + } + static bool IntersectWith(Interval source, Interval* target, + PropagationResult* propagation_result) { + if (!source.IntersectWith(*target)) { + *propagation_result = kInfeasible; + } else if (source != *target) { + *propagation_result = kChanged; + } + *target = source; + return *propagation_result != kInfeasible; + } + // In cases where propagators expect some property of variables to hold, + // for instance "cumuls[i].min should be weakly increasing in i", + // it is necessary to delay modification of the variables until after all + // propagations are done. + // This struct can be used to store such delayed propagations. + struct DelayedPropagation { + int64_t value; // New bound of the variable. + int index; // Some information on which variable to modify. + bool is_min; // The bound is a min if this is true, otherwise a max. + }; + std::vector delayed_propagations_; + // Events used in PropagateInterbreak(). + struct UsageEvent { + int64_t time; + int index; + bool is_start; + bool operator<(const UsageEvent& other) const { return time < other.time; } + }; + std::vector usage_events_; + // Per-transition reasoning. + CommittableArray break_duration_on_transition_; +}; + +} // namespace operations_research::routing + +#endif // OR_TOOLS_ROUTING_BREAKS_H_ diff --git a/ortools/routing/constraints.cc b/ortools/routing/constraints.cc index 8c3b66e33a..da13afb0b4 100644 --- a/ortools/routing/constraints.cc +++ b/ortools/routing/constraints.cc @@ -23,10 +23,15 @@ #include #include "absl/container/flat_hash_set.h" +#include "absl/functional/any_invocable.h" #include "absl/log/check.h" +#include "absl/types/span.h" #include "ortools/base/strong_vector.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" +#include "ortools/routing/breaks.h" +#include "ortools/routing/filter_committables.h" +#include "ortools/routing/filters.h" #include "ortools/routing/lp_scheduling.h" #include "ortools/routing/routing.h" #include "ortools/routing/search.h" @@ -795,4 +800,236 @@ Constraint* MakeRouteConstraint( model, std::move(route_cost_vars), std::move(route_evaluator))); } +namespace { + +/// GlobalVehicleBreaksConstraint ensures breaks constraints are enforced on +/// all vehicles in the dimension passed to its constructor. +/// It is intended to be used for dimensions representing time. +/// A break constraint ensures break intervals fit on the route of a vehicle. +/// For a given vehicle, it forces break intervals to be disjoint from visit +/// intervals, where visit intervals start at CumulVar(node) and last for +/// node_visit_transit[node]. Moreover, it ensures that there is enough time +/// between two consecutive nodes of a route to do transit and vehicle breaks, +/// i.e. if Next(nodeA) = nodeB, CumulVar(nodeA) = tA and CumulVar(nodeB) = tB, +/// then SlackVar(nodeA) >= sum_{breaks \subseteq [tA, tB)} duration(break). +class GlobalVehicleBreaksConstraint : public Constraint { + public: + explicit GlobalVehicleBreaksConstraint(const RoutingDimension* dimension); + std::string DebugString() const override { + return "GlobalVehicleBreaksConstraint"; + } + + void Post() override; + void InitialPropagate() override; + + private: + void PropagateNode(int node); + void PropagateVehicle(int vehicle); + + const RoutingModel* model_; + const RoutingDimension* const dimension_; + std::vector vehicle_demons_; + + DimensionValues dimension_values_; + PrePostVisitValues visits_; + std::vector cumul_intervals_; + std::vector slack_intervals_; + BreakPropagator break_propagator_; +}; + +GlobalVehicleBreaksConstraint::GlobalVehicleBreaksConstraint( + const RoutingDimension* dimension) + : Constraint(dimension->model()->solver()), + model_(dimension->model()), + dimension_(dimension), + dimension_values_(dimension->model()->vehicles(), + dimension->cumuls().size()), + visits_(dimension->model()->vehicles(), dimension->cumuls().size()), + cumul_intervals_(dimension->cumuls().size()), + slack_intervals_(dimension->cumuls().size()), + break_propagator_(dimension->cumuls().size()) { + vehicle_demons_.resize(model_->vehicles()); +} + +void GlobalVehicleBreaksConstraint::Post() { + for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) { + if (dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() && + dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) { + continue; + } + vehicle_demons_[vehicle] = MakeDelayedConstraintDemon1( + solver(), this, &GlobalVehicleBreaksConstraint::PropagateVehicle, + "PropagateVehicle", vehicle); + for (IntervalVar* interval : + dimension_->GetBreakIntervalsOfVehicle(vehicle)) { + interval->WhenAnything(vehicle_demons_[vehicle]); + } + } + const int num_cumuls = dimension_->cumuls().size(); + const int num_nexts = model_->Nexts().size(); + for (int node = 0; node < num_cumuls; node++) { + Demon* dimension_demon = MakeConstraintDemon1( + solver(), this, &GlobalVehicleBreaksConstraint::PropagateNode, + "PropagateNode", node); + if (node < num_nexts) { + model_->NextVar(node)->WhenBound(dimension_demon); + dimension_->SlackVar(node)->WhenRange(dimension_demon); + } + model_->VehicleVar(node)->WhenBound(dimension_demon); + dimension_->CumulVar(node)->WhenRange(dimension_demon); + } +} + +void GlobalVehicleBreaksConstraint::InitialPropagate() { + for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) { + if (!dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() || + !dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) { + PropagateVehicle(vehicle); + } + } +} + +// This dispatches node events to the right vehicle propagator. +// It also filters out a part of uninteresting events, on which the vehicle +// propagator will not find anything new. +void GlobalVehicleBreaksConstraint::PropagateNode(int node) { + if (!model_->VehicleVar(node)->Bound()) return; + const int vehicle = model_->VehicleVar(node)->Min(); + if (vehicle < 0 || vehicle_demons_[vehicle] == nullptr) return; + EnqueueDelayedDemon(vehicle_demons_[vehicle]); +} + +// First, perform energy-based reasoning on intervals and cumul variables. +// Then, perform reasoning on slack variables. +void GlobalVehicleBreaksConstraint::PropagateVehicle(int vehicle) { + dimension_values_.Revert(); + visits_.Revert(); + + // Fill dimension_values_ from the path. + // If the path is not a complete start -> end, return. + // This leverages travel caching in FillDimensionValuesFromRoutingDimension(). + int node = model_->Start(vehicle); + while (!model_->IsEnd(node)) { + dimension_values_.PushNode(node); + if (model_->NextVar(node)->Bound()) { + node = model_->NextVar(node)->Min(); + } else { + return; + } + } + dimension_values_.PushNode(node); + dimension_values_.MakePathFromNewNodes(vehicle); + // Translate CP variables to Intervals, and fill dimension_values_. + const auto& cp_cumuls = dimension_->cumuls(); + const auto& cp_slacks = dimension_->slacks(); + for (const int node : dimension_values_.Nodes(vehicle)) { + cumul_intervals_[node] = {.min = cp_cumuls[node]->Min(), + .max = cp_cumuls[node]->Max()}; + if (dimension_->model()->IsEnd(node)) { + slack_intervals_[node] = {.min = 0, .max = 0}; + } else { + slack_intervals_[node] = {.min = cp_slacks[node]->Min(), + .max = cp_slacks[node]->Max()}; + } + } + if (!FillDimensionValuesFromRoutingDimension( + vehicle, dimension_->vehicle_capacities()[vehicle], + dimension_->vehicle_span_upper_bounds()[vehicle], cumul_intervals_, + slack_intervals_, dimension_->transit_evaluator(vehicle), + dimension_values_)) { + solver()->Fail(); + } + if (!PropagateTransitAndSpan(vehicle, dimension_values_)) { + solver()->Fail(); + } + // Extract pre/post visit data. + auto any_invocable = [this](int evaluator_index) + -> std::optional> { + const auto& evaluator = + evaluator_index == -1 + ? nullptr + : dimension_->model()->TransitCallback(evaluator_index); + if (evaluator == nullptr) return std::nullopt; + return evaluator; + }; + FillPrePostVisitValues( + vehicle, dimension_values_, + any_invocable(dimension_->GetPreTravelEvaluatorOfVehicle(vehicle)), + any_invocable(dimension_->GetPostTravelEvaluatorOfVehicle(vehicle)), + visits_); + // Copy break data into dimension_values_. + using VehicleBreak = DimensionValues::VehicleBreak; + const std::vector& cp_breaks = + dimension_->GetBreakIntervalsOfVehicle(vehicle); + std::vector& dv_breaks = + dimension_values_.MutableVehicleBreaks(vehicle); + dv_breaks.clear(); + for (const IntervalVar* cp_break : cp_breaks) { + if (cp_break->MayBePerformed()) { + dv_breaks.push_back( + {.start = {.min = cp_break->StartMin(), .max = cp_break->StartMax()}, + .end = {.min = cp_break->EndMin(), .max = cp_break->EndMax()}, + .duration = {.min = cp_break->DurationMin(), + .max = cp_break->DurationMax()}, + .is_performed = {.min = cp_break->MustBePerformed(), .max = 1}}); + } else { + dv_breaks.push_back({.start = {.min = 0, .max = 0}, + .end = {.min = 0, .max = 0}, + .duration = {.min = 0, .max = 0}, + .is_performed = {.min = 0, .max = 0}}); + } + } + // Propagate inside dimension_values_, fail if infeasible. + if (break_propagator_.FastPropagations(vehicle, dimension_values_, visits_) == + BreakPropagator::kInfeasible) { + solver()->Fail(); + } + const auto& interbreaks = + dimension_->GetBreakDistanceDurationOfVehicle(vehicle); + if (break_propagator_.PropagateInterbreak(vehicle, dimension_values_, + interbreaks) == + BreakPropagator::kInfeasible) { + solver()->Fail(); + } + if (!PropagateTransitAndSpan(vehicle, dimension_values_)) { + solver()->Fail(); + } + // Copy changes back to CP variables. + using Interval = DimensionValues::Interval; + const int num_nodes = dimension_values_.NumNodes(vehicle); + const absl::Span nodes = dimension_values_.Nodes(vehicle); + const absl::Span dv_cumuls = + dimension_values_.Cumuls(vehicle); + for (int r = 0; r < num_nodes; ++r) { + const int node = nodes[r]; + cp_cumuls[node]->SetRange(dv_cumuls[r].min, dv_cumuls[r].max); + } + const int num_breaks = cp_breaks.size(); + for (int b = 0; b < num_breaks; ++b) { + IntervalVar* cp_break = cp_breaks[b]; + if (!cp_break->MayBePerformed()) continue; + const VehicleBreak& dv_break = dv_breaks[b]; + cp_break->SetStartRange(dv_break.start.min, dv_break.start.max); + cp_break->SetEndRange(dv_break.end.min, dv_break.end.max); + cp_break->SetDurationRange(dv_break.duration.min, dv_break.duration.max); + if (dv_break.is_performed.min == 1) { + cp_break->SetPerformed(true); + } else if (dv_break.is_performed.max == 0) { + cp_break->SetPerformed(false); + } + } + // If everything went fine, we can save dimension state. + // Saving is only done for caching reasons, this allows subsequent calls to + // FillDimensionValuesFromRoutingDimension() to re-use travel evaluations. + dimension_values_.Commit(); + visits_.Commit(); +} + +} // namespace + +Constraint* MakeGlobalVehicleBreaksConstraint( + Solver* solver, const RoutingDimension* dimension) { + return solver->RevAlloc(new GlobalVehicleBreaksConstraint(dimension)); +} + } // namespace operations_research::routing diff --git a/ortools/routing/constraints.h b/ortools/routing/constraints.h index c681523683..4d7e1759b8 100644 --- a/ortools/routing/constraints.h +++ b/ortools/routing/constraints.h @@ -49,6 +49,9 @@ Constraint* MakeRouteConstraint( std::function(const std::vector&)> route_evaluator); +Constraint* MakeGlobalVehicleBreaksConstraint( + Solver* solver, const RoutingDimension* dimension); + } // namespace operations_research::routing #endif // OR_TOOLS_ROUTING_CONSTRAINTS_H_ diff --git a/ortools/routing/decision_builders.cc b/ortools/routing/decision_builders.cc index 4e1cd43e29..65d2a17013 100644 --- a/ortools/routing/decision_builders.cc +++ b/ortools/routing/decision_builders.cc @@ -913,16 +913,6 @@ void FinalizerVariables::AddWeightedVariableTarget(IntVar* var, int64_t target, } } -void FinalizerVariables::AddWeightedVariableToMinimize(IntVar* var, - int64_t cost) { - AddWeightedVariableTarget(var, std::numeric_limits::min(), cost); -} - -void FinalizerVariables::AddWeightedVariableToMaximize(IntVar* var, - int64_t cost) { - AddWeightedVariableTarget(var, std::numeric_limits::max(), cost); -} - void FinalizerVariables::AddVariableTarget(IntVar* var, int64_t target) { CHECK(var != nullptr); if (finalizer_variable_target_set_.contains(var)) return; @@ -930,14 +920,6 @@ void FinalizerVariables::AddVariableTarget(IntVar* var, int64_t target) { finalizer_variable_targets_.push_back({var, target}); } -void FinalizerVariables::AddVariableToMaximize(IntVar* var) { - AddVariableTarget(var, std::numeric_limits::max()); -} - -void FinalizerVariables::AddVariableToMinimize(IntVar* var) { - AddVariableTarget(var, std::numeric_limits::min()); -} - DecisionBuilder* FinalizerVariables::CreateFinalizer() { std::stable_sort(weighted_finalizer_variable_targets_.begin(), weighted_finalizer_variable_targets_.end(), diff --git a/ortools/routing/decision_builders.h b/ortools/routing/decision_builders.h index 096ef46561..8cefd90393 100644 --- a/ortools/routing/decision_builders.h +++ b/ortools/routing/decision_builders.h @@ -70,27 +70,18 @@ DecisionBuilder* MakeRestoreDimensionValuesForUnchangedRoutes( class FinalizerVariables { public: explicit FinalizerVariables(Solver* solver) : solver_(solver) {} - /// Adds a variable to minimize in the solution finalizer. The solution - /// finalizer is called each time a solution is found during the search and - /// allows to instantiate secondary variables (such as dimension cumul - /// variables). - void AddVariableToMinimize(IntVar* var); - /// Adds a variable to maximize in the solution finalizer (see above for - /// information on the solution finalizer). - void AddVariableToMaximize(IntVar* var); - /// Adds a variable to minimize in the solution finalizer, with a weighted - /// priority: the higher the more priority it has. - void AddWeightedVariableToMinimize(IntVar* var, int64_t cost); - /// Adds a variable to maximize in the solution finalizer, with a weighted - /// priority: the higher the more priority it has. - void AddWeightedVariableToMaximize(IntVar* var, int64_t cost); /// Add a variable to set the closest possible to the target value in the - /// solution finalizer. + /// solution finalizer. The solution finalizer is called each time a solution + /// is found during the search and allows to instantiate secondary variables + /// (such as dimension cumul variables). void AddVariableTarget(IntVar* var, int64_t target); /// Same as above with a weighted priority: the higher the cost, the more /// priority it has to be set close to the target value. void AddWeightedVariableTarget(IntVar* var, int64_t target, int64_t cost); - /// + /// Returns a DecisionBuilder* that sets the variables passed through + /// AddVariableTarget and AddWeightedVariableTarget towards their target, + /// setting weigthed variables by decreasing weight first, then unweighted + /// variables in the order they were added. DecisionBuilder* CreateFinalizer(); private: diff --git a/ortools/routing/docs/ROUTING.md b/ortools/routing/docs/ROUTING.md index b8b23b2270..4f517df89f 100644 --- a/ortools/routing/docs/ROUTING.md +++ b/ortools/routing/docs/ROUTING.md @@ -19,15 +19,16 @@ and .Net. Each language have different requirements for the code samples. ### C++ code samples ```cpp +// Snippet from ortools/routing/samples/simple_routing_program.cc #include #include #include #include +#include "ortools/base/init_google.h" #include "absl/base/log_severity.h" #include "absl/log/globals.h" #include "absl/log/log.h" -#include "ortools/base/init_google.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/enums.pb.h" #include "ortools/routing/index_manager.h" @@ -98,6 +99,7 @@ int main(int argc, char* argv[]) { ```python #!/usr/bin/env python3 +# Snippet from ortools/routing/samples/simple_routing_program.py """Vehicle Routing example.""" from ortools.routing import enums_pb2 @@ -105,72 +107,75 @@ from ortools.routing import pywraprouting def main(): - """Entry point of the program.""" - # Instantiate the data problem. - num_locations = 5 - num_vehicles = 1 - depot = 0 + """Entry point of the program.""" + # Instantiate the data problem. + num_locations = 5 + num_vehicles = 1 + depot = 0 - # Create the routing index manager. - manager = pywraprouting.RoutingIndexManager(num_locations, num_vehicles, depot) + # Create the routing index manager. + manager = pywraprouting.RoutingIndexManager( + num_locations, num_vehicles, depot + ) - # Create Routing Model. - routing = pywraprouting.RoutingModel(manager) + # Create Routing Model. + routing = pywraprouting.RoutingModel(manager) - # Create and register a transit callback. - def distance_callback(from_index, to_index): - """Returns the absolute difference between the two nodes.""" - # Convert from routing variable Index to user NodeIndex. - from_node = int(manager.IndexToNode(from_index)) - to_node = int(manager.IndexToNode(to_index)) - return abs(to_node - from_node) + # Create and register a transit callback. + def distance_callback(from_index, to_index): + """Returns the absolute difference between the two nodes.""" + # Convert from routing variable Index to user NodeIndex. + from_node = int(manager.IndexToNode(from_index)) + to_node = int(manager.IndexToNode(to_index)) + return abs(to_node - from_node) - transit_callback_index = routing.RegisterTransitCallback(distance_callback) + transit_callback_index = routing.RegisterTransitCallback(distance_callback) - # Define cost of each arc. - routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) + # Define cost of each arc. + routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) - # Setting first solution heuristic. - search_parameters = pywraprouting.DefaultRoutingSearchParameters() - search_parameters.first_solution_strategy = ( - enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC - ) # pylint: disable=no-member + # Setting first solution heuristic. + search_parameters = pywraprouting.DefaultRoutingSearchParameters() + search_parameters.first_solution_strategy = ( + enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC + ) # pylint: disable=no-member - # Solve the problem. - assignment = routing.SolveWithParameters(search_parameters) + # Solve the problem. + assignment = routing.SolveWithParameters(search_parameters) - # Print solution on console. - print(f"Objective: {assignment.ObjectiveValue()}") - index = routing.Start(0) - plan_output = "Route for vehicle 0:\n" - route_distance = 0 - while not routing.IsEnd(index): - plan_output += f"{manager.IndexToNode(index)} -> " - previous_index = index - index = assignment.Value(routing.NextVar(index)) - route_distance += routing.GetArcCostForVehicle(previous_index, index, 0) - plan_output += f"{manager.IndexToNode(index)}\n" - plan_output += f"Distance of the route: {route_distance}m\n" - print(plan_output) + # Print solution on console. + print(f"Objective: {assignment.ObjectiveValue()}") + index = routing.Start(0) + plan_output = "Route for vehicle 0:\n" + route_distance = 0 + while not routing.IsEnd(index): + plan_output += f"{manager.IndexToNode(index)} -> " + previous_index = index + index = assignment.Value(routing.NextVar(index)) + route_distance += routing.GetArcCostForVehicle(previous_index, index, 0) + plan_output += f"{manager.IndexToNode(index)}\n" + plan_output += f"Distance of the route: {route_distance}m\n" + print(plan_output) if __name__ == "__main__": - main() + main() ``` ### Java code samples ```java +// Snippet from ortools/routing/samples/SimpleRoutingProgram.java package com.google.ortools.routing.samples; import static java.lang.Math.abs; import com.google.ortools.Loader; -import com.google.ortools.constraintsolver.Assignment; import com.google.ortools.routing.FirstSolutionStrategy; +import com.google.ortools.routing.RoutingSearchParameters; +import com.google.ortools.constraintsolver.Assignment; import com.google.ortools.routing.Globals; import com.google.ortools.routing.RoutingIndexManager; import com.google.ortools.routing.RoutingModel; -import com.google.ortools.routing.RoutingSearchParameters; import java.util.logging.Logger; /** Minimal Routing example to showcase calling the solver.*/ @@ -234,7 +239,8 @@ public class SimpleRoutingProgram { ### .Net code samples -```cs +```csharp +// Snippet from ortools/routing/samples/SimpleRoutingProgram.cs using System; using Google.OrTools.ConstraintSolver; using Google.OrTools.Routing; diff --git a/ortools/routing/filter_committables.cc b/ortools/routing/filter_committables.cc new file mode 100644 index 0000000000..dee45e1787 --- /dev/null +++ b/ortools/routing/filter_committables.cc @@ -0,0 +1,73 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/routing/filter_committables.h" + +#include "absl/log/check.h" +#include "absl/types/span.h" + +namespace operations_research::routing { + +bool PropagateTransitAndSpan(int path, DimensionValues& dimension_values) { + DCHECK_GT(dimension_values.NumNodes(path), 0); + using Interval = DimensionValues::Interval; + const absl::Span cumuls = dimension_values.MutableCumuls(path); + const absl::Span transits = dimension_values.Transits(path); + const int num_nodes = dimension_values.NumNodes(path); + const Interval span = dimension_values.Span(path); + // Span -> cumul front/back. + if (!cumuls.back().IntersectWith(cumuls[0] + span)) return false; + if (!cumuls[0].IntersectWith(cumuls.back() - span)) return false; + // Propagate from start to end. + Interval cumul = cumuls[0]; + for (int t = 0; t < num_nodes - 1; ++t) { + cumul.Add(transits[t]); + if (!cumul.IntersectWith(cumuls[t + 1])) return false; + cumuls[t + 1] = cumul; + } + // Propagate span to cumul front, then re-propagate from start to end + // as long as there are changes. + cumul = cumuls.back() - span; + for (int t = 0; t < num_nodes; ++t) { + if (!cumul.IntersectWith(cumuls[t])) return false; + if (cumul == cumuls[t]) break; + cumuls[t] = cumul; + if (t == num_nodes - 1) break; + cumul.Add(transits[t]); + } + // Propagate from end to start. + cumul = cumuls.back(); + for (int t = num_nodes - 2; t >= 0; --t) { + cumul.Subtract(transits[t]); + if (!cumul.IntersectWith(cumuls[t])) return false; + cumuls[t] = cumul; + } + // Propagate span to cumul back, then re-propagate from end to start + // as long as there are changes. + cumul = cumuls[0] + span; + for (int t = num_nodes - 1; t >= 0; --t) { + if (!cumul.IntersectWith(cumuls[t])) return false; + if (cumul == cumuls[t]) break; + cumuls[t] = cumul; + if (t == 0) break; + cumul.Subtract(transits[t - 1]); + } + // Cumul front/back -> span. + if (!dimension_values.MutableSpan(path).IntersectWith(cumuls.back() - + cumuls[0])) { + return false; + } + return true; +} + +} // namespace operations_research::routing diff --git a/ortools/routing/filter_committables.h b/ortools/routing/filter_committables.h index 536a2a38f6..fa193fcbd5 100644 --- a/ortools/routing/filter_committables.h +++ b/ortools/routing/filter_committables.h @@ -254,22 +254,26 @@ class DimensionValues { max = std::min(max, other.max); return min <= max; } - // A set addition, with intervals: adds other.min to the min, other.max to + // Set addition of intervals: adds other.min to the min, other.max to the + // max, with CapAdd(). + Interval operator+(const Interval& other) const { + DCHECK(!IsEmpty()); + DCHECK(!other.IsEmpty()); + return {.min = CapAdd(min, other.min), .max = CapAdd(max, other.max)}; + } + // Set addition, with intervals: adds other.min to the min, other.max to // the max, with CapAdd(). - void Add(const Interval& other) { - DCHECK(!IsEmpty()); - DCHECK(!other.IsEmpty()); - min = CapAdd(min, other.min); - max = CapAdd(max, other.max); - } - // A set subtraction, with intervals: subtracts other.max from the min, + void Add(const Interval& other) { *this = *this + other; } + // Set subtraction, with intervals: subtracts other.max from the min, // other.min from the max, with CapSub(). - void Subtract(const Interval& other) { + Interval operator-(const Interval& other) const { DCHECK(!IsEmpty()); DCHECK(!other.IsEmpty()); - min = CapSub(min, other.max); - max = CapSub(max, other.min); + return {.min = CapSub(min, other.max), .max = CapSub(max, other.min)}; } + // Set subtraction, with intervals: subtracts other.max from the min, + // other.min from the max, with CapSub(). + void Subtract(const Interval& other) { *this = *this - other; } // Returns an interval containing all integers: {kint64min, kint64max}. static Interval AllIntegers() { return {.min = std::numeric_limits::min(), @@ -505,6 +509,8 @@ class DimensionValues { CommittableValue num_elements_; }; +bool PropagateTransitAndSpan(int path, DimensionValues& dimension_values); + class PrePostVisitValues { public: PrePostVisitValues(int num_paths, int num_nodes) diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 6a39419513..90e9be3f43 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -45,6 +45,7 @@ #include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" +#include "ortools/routing/breaks.h" #include "ortools/routing/filter_committables.h" #include "ortools/routing/lp_scheduling.h" #include "ortools/routing/parameters.pb.h" @@ -839,6 +840,135 @@ IntVarLocalSearchFilter* MakeVehicleAmortizedCostFilter( namespace { +// TODO(user): Make this filter use PathStates. +// TODO(user): Optimize the case where same vehicle groups are disjoint and +// deltas are not "splitting" the groups. +class SameVehicleCostFilter : public BasePathFilter { + public: + explicit SameVehicleCostFilter(const RoutingModel& model) + : BasePathFilter(model.Nexts(), model.Size() + model.vehicles(), + model.GetPathsMetadata()), + model_(model), + same_vehicle_costs_per_node_(model.Size()), + nodes_per_vehicle_(model.GetNumberOfSoftSameVehicleConstraints()), + new_nodes_per_vehicle_(model.GetNumberOfSoftSameVehicleConstraints()), + current_vehicle_per_node_(model.Size()), + current_cost_(0) { + for (int i = 0; i < model.GetNumberOfSoftSameVehicleConstraints(); ++i) { + for (int node : model.GetSoftSameVehicleIndices(i)) { + same_vehicle_costs_per_node_[node].push_back(i); + } + } + start_to_vehicle_.resize(Size(), -1); + for (int v = 0; v < model.vehicles(); v++) { + const int64_t start = model.Start(v); + start_to_vehicle_[start] = v; + } + } + int64_t GetSynchronizedObjectiveValue() const override { + return current_cost_; + } + int64_t GetAcceptedObjectiveValue() const override { + return lns_detected() ? 0 : delta_cost_; + } + std::string DebugString() const override { return "SameVehicleCostFilter"; } + + private: + bool InitializeAcceptPath() override { + delta_cost_ = current_cost_; + for (int same_vehicle_cost_index : touched_) { + new_nodes_per_vehicle_[same_vehicle_cost_index] = + nodes_per_vehicle_[same_vehicle_cost_index]; + } + touched_.clear(); + return true; + } + bool AcceptPath(int64_t path_start, int64_t chain_start, + int64_t chain_end) override { + const int64_t vehicle = start_to_vehicle_[path_start]; + DCHECK_GE(vehicle, 0); + if (chain_start == chain_end) return true; + for (int64_t node = GetNext(chain_start); node != chain_end; + node = GetNext(node)) { + if (vehicle == current_vehicle_per_node_[node]) continue; + for (int same_vehicle_cost_index : same_vehicle_costs_per_node_[node]) { + auto& new_nodes = new_nodes_per_vehicle_[same_vehicle_cost_index]; + new_nodes[vehicle]++; + new_nodes[current_vehicle_per_node_[node]]--; + if (new_nodes[current_vehicle_per_node_[node]] == 0) { + new_nodes.erase(current_vehicle_per_node_[node]); + } + touched_.insert(same_vehicle_cost_index); + } + } + return true; + } + bool FinalizeAcceptPath(int64_t /*objective_min*/, + int64_t objective_max) override { + for (int same_vehicle_cost_index : touched_) { + CapAddTo(CapSub(GetCost(same_vehicle_cost_index, new_nodes_per_vehicle_), + GetCost(same_vehicle_cost_index, nodes_per_vehicle_)), + &delta_cost_); + } + return delta_cost_ <= objective_max; + } + + void OnAfterSynchronizePaths() override { + current_cost_ = 0; + touched_.clear(); + for (int same_vehicle_cost_index = 0; + same_vehicle_cost_index < nodes_per_vehicle_.size(); + ++same_vehicle_cost_index) { + nodes_per_vehicle_[same_vehicle_cost_index].clear(); + touched_.insert(same_vehicle_cost_index); + } + current_vehicle_per_node_.assign(model_.Size(), -1); + for (int v = 0; v < model_.vehicles(); ++v) { + int64_t node = GetNext(model_.Start(v)); + DCHECK(model_.IsEnd(node) || IsVarSynced(node)); + while (!model_.IsEnd(node)) { + for (int same_vehicle_cost_index : same_vehicle_costs_per_node_[node]) { + nodes_per_vehicle_[same_vehicle_cost_index][v]++; + } + current_vehicle_per_node_[node] = v; + node = GetNext(node); + } + } + for (int same_vehicle_cost_index = 0; + same_vehicle_cost_index < nodes_per_vehicle_.size(); + ++same_vehicle_cost_index) { + CapAddTo(GetCost(same_vehicle_cost_index, nodes_per_vehicle_), + ¤t_cost_); + } + } + int64_t GetCost(int index, const std::vector>& + nodes_per_vehicle) const { + const int num_vehicles_used = nodes_per_vehicle[index].size(); + if (num_vehicles_used <= 1) return 0; + return CapProd(num_vehicles_used - 1, model_.GetSoftSameVehicleCost(index)); + } + + const RoutingModel& model_; + std::vector start_to_vehicle_; + std::vector> same_vehicle_costs_per_node_; + std::vector> nodes_per_vehicle_; + std::vector> new_nodes_per_vehicle_; + absl::flat_hash_set touched_; + std::vector current_vehicle_per_node_; + int64_t current_cost_; + int64_t delta_cost_; +}; + +} // namespace + +IntVarLocalSearchFilter* MakeSameVehicleCostFilter( + const RoutingModel& routing_model) { + return routing_model.solver()->RevAlloc( + new SameVehicleCostFilter(routing_model)); +} + +namespace { + class TypeRegulationsFilter : public BasePathFilter { public: explicit TypeRegulationsFilter(const RoutingModel& model); @@ -1189,29 +1319,31 @@ bool FillDimensionValuesFromRoutingDimension( // TODO(user): use committed values as a cache to avoid calling evaluators. void FillPrePostVisitValues( int path, const DimensionValues& dimension_values, - absl::AnyInvocable pre_travel_evaluator, - absl::AnyInvocable post_travel_evaluator, + std::optional> + pre_travel_evaluator, + std::optional> + post_travel_evaluator, PrePostVisitValues& visit_values) { const int num_nodes = dimension_values.NumNodes(path); visit_values.ChangePathSize(path, num_nodes); absl::Span pre_visits = visit_values.MutablePreVisits(path); absl::Span post_visits = visit_values.MutablePostVisits(path); absl::Span nodes = dimension_values.Nodes(path); - if (pre_travel_evaluator == nullptr) { - absl::c_fill(post_visits, 0); - } else { + if (pre_travel_evaluator.has_value()) { for (int i = 0; i < num_nodes - 1; ++i) { - post_visits[i] = pre_travel_evaluator(nodes[i], nodes[i + 1]); + post_visits[i] = (*pre_travel_evaluator)(nodes[i], nodes[i + 1]); } post_visits.back() = 0; - } - if (post_travel_evaluator == nullptr) { - absl::c_fill(pre_visits, 0); } else { + absl::c_fill(post_visits, 0); + } + if (post_travel_evaluator.has_value()) { pre_visits[0] = 0; for (int i = 1; i < num_nodes; ++i) { - pre_visits[i] = post_travel_evaluator(nodes[i - 1], nodes[i]); + pre_visits[i] = (*post_travel_evaluator)(nodes[i - 1], nodes[i]); } + } else { + absl::c_fill(pre_visits, 0); } } @@ -1366,10 +1498,7 @@ class PathCumulFilter : public BasePathFilter { bool FillDimensionValues(int path); // Propagates the transit constraint, cumul[r] + transit[r] == cumul[r+1], // in dimension_values_'s current path data. - bool PropagateTransits(int path); - // Propagates the transit constraint supposing that there are no forbidden - // intervals for cumuls. This is faster than considering the intervals. - bool PropagateTransitsWithoutForbiddenIntervals(int path); + bool PropagateTransitsAndSpans(int path); // Propagates both the transit constraint and cumul forbidden intervals. bool PropagateTransitsWithForbiddenIntervals(int path); // Propagates the span constraint, cumul[start] + span == cumul[end]. @@ -1500,6 +1629,9 @@ class PathCumulFilter : public BasePathFilter { // Data reflecting information on path variables for the committed and the // current solution. DimensionValues dimension_values_; + PrePostVisitValues visits_; + BreakPropagator break_propagator_; + // Maps each path to the sum of its path-only costs: span/slack cost, // soft cumul costs, soft span limits. CommittableArray cost_of_path_; @@ -1521,6 +1653,7 @@ class PathCumulFilter : public BasePathFilter { // This vector is empty if there are no precedences on the dimension_. const std::vector> node_index_to_precedences_; + absl::flat_hash_map, int64_t> precedence_offsets_; struct PathAndRank { int path = -1; int rank = -1; @@ -1699,6 +1832,8 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, })), dimension_values_(routing_model.vehicles(), dimension.cumuls().size()), + visits_(routing_model.vehicles(), dimension.cumuls().size()), + break_propagator_(dimension.cumuls().size()), cost_of_path_(NumPaths(), 0), synchronized_objective_value_(0), accepted_objective_value_(0), @@ -1713,6 +1848,15 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, filter_objective_cost_(filter_objective_cost), may_use_optimizers_(may_use_optimizers), propagate_own_objective_value_(propagate_own_objective_value) { + for (int node = 0; node < node_index_to_precedences_.size(); ++node) { + for (const auto [first_node, second_node, offset, + unused_performed_constraint] : + node_index_to_precedences_[node]) { + int64_t& current_offset = gtl::LookupOrInsert( + &precedence_offsets_, {first_node, second_node}, offset); + current_offset = std::max(current_offset, offset); + } + } #ifndef NDEBUG for (int vehicle = 0; vehicle < routing_model.vehicles(); vehicle++) { if (FilterWithDimensionCumulOptimizerForVehicle(vehicle)) { @@ -1723,11 +1867,12 @@ PathCumulFilter::PathCumulFilter(const RoutingModel& routing_model, #endif // NDEBUG } -bool PathCumulFilter::PropagateTransits(int path) { +bool PathCumulFilter::PropagateTransitsAndSpans(int path) { if (has_forbidden_intervals_) { - return PropagateTransitsWithForbiddenIntervals(path); + return PropagateSpan(path) && + PropagateTransitsWithForbiddenIntervals(path) && PropagateSpan(path); } else { - return PropagateTransitsWithoutForbiddenIntervals(path); + return PropagateTransitAndSpan(path, dimension_values_); } } @@ -1766,27 +1911,6 @@ bool PathCumulFilter::PropagateTransitsWithForbiddenIntervals(int path) { return true; } -bool PathCumulFilter::PropagateTransitsWithoutForbiddenIntervals(int path) { - DCHECK_LT(0, dimension_values_.NumNodes(path)); - absl::Span cumuls = dimension_values_.MutableCumuls(path); - absl::Span transits = dimension_values_.Transits(path); - const int num_nodes = dimension_values_.NumNodes(path); - // Propagate from start to end. - Interval cumul = cumuls.front(); - for (int r = 1; r < num_nodes; ++r) { - cumul.Add(transits[r - 1]); - if (!cumul.IntersectWith(cumuls[r])) return false; - cumuls[r] = cumul; - } - // Propagate from end to start. - for (int r = num_nodes - 2; r >= 0; --r) { - cumul.Subtract(transits[r]); - if (!cumul.IntersectWith(cumuls[r])) return false; - cumuls[r] = cumul; - } - return true; -} - bool PathCumulFilter::PropagateSpan(int path) { absl::Span travel_sums = dimension_values_.TravelSums(path); absl::Span cumuls = dimension_values_.MutableCumuls(path); @@ -1904,19 +2028,58 @@ bool PathCumulFilter::AcceptPath(int64_t path_start, int64_t /*chain_start*/, // Filter feasibility: cumul windows, transit, span, breaks. // The first PropagateSpan() is mostly used to check feasibility of total // travel within span max, the second can tighten all start/end/span bounds. - if (!PropagateSpan(path) || !PropagateTransits(path) || - !PropagateSpan(path)) { - return false; - } + if (!PropagateTransitsAndSpans(path)) return false; if (dimension_.HasPickupToDeliveryLimits()) { if (!PropagatePickupToDeliveryLimits(path)) return false; // Re-propagate span and transits. - if (!PropagateSpan(path) || !PropagateTransits(path) || - !PropagateSpan(path)) { + if (!PropagateTransitsAndSpans(path)) return false; + } + if (FilterVehicleBreaks(path)) { + // TODO(user) using enum BreakPropagator::PropagationResult once C++20 + // support is available in OSS. + const auto& interbreaks = + dimension_.GetBreakDistanceDurationOfVehicle(path); + if (!PropagateVehicleBreaks(path) || + break_propagator_.PropagateInterbreak(path, dimension_values_, + interbreaks) == + BreakPropagator::PropagationResult::kInfeasible || + !PropagateTransitsAndSpans(path)) { return false; } + // Fill pre/post travel data. + visits_.Revert(); + auto any_invocable = [this](int evaluator_index) + -> std::optional> { + const auto& evaluator = + evaluator_index == -1 + ? nullptr + : dimension_.model()->TransitCallback(evaluator_index); + if (evaluator == nullptr) return std::nullopt; + return evaluator; + }; + FillPrePostVisitValues( + path, dimension_values_, + any_invocable(dimension_.GetPreTravelEvaluatorOfVehicle(path)), + any_invocable(dimension_.GetPostTravelEvaluatorOfVehicle(path)), + visits_); + // Loop until there are no changes, capped at a small number of iterations. + BreakPropagator::PropagationResult result = BreakPropagator::kChanged; + int num_iterations = 2; + while (result == BreakPropagator::kChanged && num_iterations-- > 0) { + result = + break_propagator_.FastPropagations(path, dimension_values_, visits_); + if (result == BreakPropagator::kChanged) { + if (!PropagateVehicleBreaks(path) || + break_propagator_.PropagateInterbreak(path, dimension_values_, + interbreaks) == + BreakPropagator::PropagationResult::kInfeasible || + !PropagateTransitsAndSpans(path)) { + return false; + } + } + } + if (result == BreakPropagator::kInfeasible) return false; } - if (FilterVehicleBreaks(path) && !PropagateVehicleBreaks(path)) return false; // Filter costs: span (linear/quadratic/piecewise), // soft cumul windows (linear/piecewise). @@ -1991,6 +2154,28 @@ bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/, int64_t objective_max) { if (lns_detected()) return true; if (FilterPrecedences()) { + // Fast pass on consecutive nodes of changed paths, useful when the number + // of precedences is much larger than the number of nodes. + // TODO(user): Remove this when we have a better way to express + // precedence chains, which does not require a quadratic number of + // precedences. + for (const int path : dimension_values_.ChangedPaths()) { + const absl::Span travel_sums = + dimension_values_.TravelSums(path); + int prev = -1; + int rank = -1; + for (const int node : dimension_values_.Nodes(path)) { + int64_t offset = std::numeric_limits::min(); + // Check the "opposite" precedence constraint. + if (gtl::FindCopy(precedence_offsets_, std::pair{node, prev}, + &offset) && + CapSub(travel_sums[rank], travel_sums[rank + 1]) < offset) { + return false; + } + prev = node; + ++rank; + } + } // Find location of all nodes: remove committed nodes of changed paths, // then add nodes of changed paths. This removes nodes that became loops. for (const int path : dimension_values_.ChangedPaths()) { @@ -2030,6 +2215,17 @@ bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/, DCHECK(node == first_node || node == second_node); DCHECK_EQ(first_node, dimension_values_.Nodes(path1)[rank1]); DCHECK_EQ(second_node, dimension_values_.Nodes(path2)[rank2]); + // Check the compatibility between the precedence and the implicit + // precedence induced from the route sequence. + if (path1 == path2 && rank2 < rank1) { + absl::Span travel_sums = + dimension_values_.TravelSums(path); + // Check that travel(second_node, first_node) <= -offset, + // (equivalent to -travel(second_node, first_node) >= offset). + if (CapSub(travel_sums[rank2], travel_sums[rank1]) < offset) { + return false; + } + } // Check that cumul1 + offset <= cumul2 is feasible. if (CapAdd(dimension_values_.Cumuls(path1)[rank1].min, offset) > dimension_values_.Cumuls(path2)[rank2].max) @@ -2100,9 +2296,7 @@ bool PathCumulFilter::FinalizeAcceptPath(int64_t /*objective_min*/, path_accessor_, /*resource=*/nullptr, filter_objective_cost_ ? &path_cost_with_lp : nullptr); solve_duration_shares--; - if (status == DimensionSchedulingStatus::INFEASIBLE) { - return false; - } + if (status == DimensionSchedulingStatus::INFEASIBLE) return false; // Replace previous path cost with the LP optimizer cost. if (filter_objective_cost_ && (status == DimensionSchedulingStatus::OPTIMAL || diff --git a/ortools/routing/filters.h b/ortools/routing/filters.h index 6837dca98a..7d27414fea 100644 --- a/ortools/routing/filters.h +++ b/ortools/routing/filters.h @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -52,8 +53,10 @@ bool FillDimensionValuesFromRoutingDimension( void FillPrePostVisitValues( int path, const DimensionValues& dimension_values, - absl::AnyInvocable pre_travel_evaluator, - absl::AnyInvocable post_travel_evaluator, + std::optional> + pre_travel_evaluator, + std::optional> + post_travel_evaluator, PrePostVisitValues& visit_values); // Propagates vehicle break constraints in dimension_values. @@ -86,6 +89,10 @@ IntVarLocalSearchFilter* MakeNodeDisjunctionFilter( IntVarLocalSearchFilter* MakeVehicleAmortizedCostFilter( const RoutingModel& routing_model); +/// Returns a filter computing same vehicle costs. +IntVarLocalSearchFilter* MakeSameVehicleCostFilter( + const RoutingModel& routing_model); + /// Returns a filter ensuring type regulation constraints are enforced. IntVarLocalSearchFilter* MakeTypeRegulationsFilter( const RoutingModel& routing_model); diff --git a/ortools/routing/flow.cc b/ortools/routing/flow.cc index 7ba53c461c..110f5b70bf 100644 --- a/ortools/routing/flow.cc +++ b/ortools/routing/flow.cc @@ -24,7 +24,6 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/types/span.h" -#include "ortools/base/int_type.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" #include "ortools/constraint_solver/constraint_solver.h" @@ -120,7 +119,7 @@ bool RoutingModel::IsMatchingModel() const { return false; } -// Solve matching model using a min-cost flow. Here is the underlyihg flow: +// Solve matching model using a min-cost flow. Here is the underlying flow: // // ---------- Source ------------- // | (1,0) | (N,0) @@ -167,8 +166,8 @@ bool RoutingModel::SolveMatchingModel( std::vector optimizers; optimizers.reserve(dimensions.size()); for (RoutingDimension* dimension : dimensions) { - optimizers.emplace_back(dimension, - parameters.continuous_scheduling_solver()); + optimizers.emplace_back( + dimension, parameters.continuous_scheduling_solver(), &search_stats_); } int num_flow_nodes = 0; @@ -389,6 +388,7 @@ bool RoutingModel::SolveMatchingModel( flow.SetNodeSupply(sink, -flow_supply); // TODO(user): Take time limit into account. + search_stats_.num_min_cost_flow_calls++; if (flow.Solve() != SimpleMinCostFlow::OPTIMAL) { return false; } diff --git a/ortools/routing/heuristic_parameters.proto b/ortools/routing/heuristic_parameters.proto new file mode 100644 index 0000000000..ed950db6cc --- /dev/null +++ b/ortools/routing/heuristic_parameters.proto @@ -0,0 +1,101 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Protocol buffer used to parametrize the local cheapest insertion heuristics. + +syntax = "proto3"; + +package operations_research.routing; + +// Parameters used to configure local cheapest insertion heuristics. +message LocalCheapestInsertionParameters { + // In insertion-based heuristics, describes what positions must be considered + // when inserting a pickup/delivery pair, and in what order they are + // considered. + enum PairInsertionStrategy { + // Let the solver decide the set of positions and its ordering. + AUTOMATIC = 0; + // Consider all positions, by increasing (cost(pickup), cost(delivery)). + BEST_PICKUP_THEN_BEST_DELIVERY = 1; + // Consider all positions, by increasing by cost(pickup) + cost(delivery). + BEST_PICKUP_DELIVERY_PAIR = 2; + // Only consider insertion positions that are compatible with the multitour + // property, meaning a series of pickups may only start when the vehicle + // is not carrying any delivery. This setting is designed to explore much + // less possibilities than the full BEST_PICKUP_DELIVERY_PAIR. + // Order by increasing by cost(pickup) + cost(delivery). + BEST_PICKUP_DELIVERY_PAIR_MULTITOUR = 3; + } + + // Choice of insertion strategy for pickup/delivery pairs, used in local + // cheapest insertion, both first solution heuristic and LNS. + PairInsertionStrategy pickup_delivery_strategy = 1; + + // Properties used to select in which order nodes or node pairs are considered + // in insertion heuristics. + enum InsertionSortingProperty { + // Invalid property. + SORTING_PROPERTY_UNSPECIFIED = 0; + // Selects nodes with the least number of allowed vehicles. + SORTING_PROPERTY_ALLOWED_VEHICLES = 1; + // Selects nodes with the highest penalty. + SORTING_PROPERTY_PENALTY = 2; + // Selects nodes with the highest penalty / number of allowed vehicles + // ratio. + SORTING_PROPERTY_PENALTY_OVER_ALLOWED_VEHICLES_RATIO = 3; + // Selects nodes that are on average the farthest from vehicles. + SORTING_PROPERTY_HIGHEST_AVG_ARC_COST_TO_VEHICLE_START_ENDS = 4; + // Selects nodes that are on average the closest to vehicles. + SORTING_PROPERTY_LOWEST_AVG_ARC_COST_TO_VEHICLE_START_ENDS = 5; + // Select nodes with the smallest distance to the closest vehicle. + SORTING_PROPERTY_LOWEST_MIN_ARC_COST_TO_VEHICLE_START_ENDS = 6; + // Selects nodes that have a higher dimension usage on average, where the + // usage is determined as the ratio of node demand over vehicle capacity. + // Currently, this property only supports unary dimensions. + SORTING_PROPERTY_HIGHEST_DIMENSION_USAGE = 7; + // Selects nodes in random order. + // This property cannot be used in conjunction with other properties. + SORTING_PROPERTY_RANDOM = 8; + } + + // The properties used to sort insertion entries in the local cheapest + // insertion heuristic, in *decreasing* order of priority. The properties + // listed here are applied hierarchically, from highest to lowest priority. + // When no properties are provided + // (SORTING_PROPERTY_ALLOWED_VEHICLES, SORTING_PROPERTY_PENALTY) + // is used by default. + repeated InsertionSortingProperty insertion_sorting_properties = 2; +} + +// Parameters used to configure savings heuristics. +message SavingsParameters { + // Ratio (in ]0, 1]) of neighbors to consider for each node when constructing + // the savings. If unspecified, its value is considered to be 1.0. + double neighbors_ratio = 1; + // The number of neighbors considered for each node in the Savings heuristic + // is chosen so that the space used to store the savings doesn't exceed + // max_memory_usage_bytes, which must be in ]0, 1e10]. + // NOTE: If both neighbors_ratio and max_memory_usage_bytes + // are specified, the number of neighbors considered for each node will be the + // minimum of the two numbers determined by these parameters. + double max_memory_usage_bytes = 2; + // Add savings related to reverse arcs when finding the nearest neighbors + // of the nodes. + bool add_reverse_arcs = 3; + // Coefficient of the cost of the arc for which the saving value is being + // computed: + // Saving(a-->b) = Cost(a-->end) + Cost(start-->b) + // - arc_coefficient * Cost(a-->b) + // This parameter must be greater than 0, and its default value is 1. + double arc_coefficient = 4; +} diff --git a/ortools/routing/ils.cc b/ortools/routing/ils.cc index 454747a239..d8dae1e2c1 100644 --- a/ortools/routing/ils.cc +++ b/ortools/routing/ils.cc @@ -34,7 +34,6 @@ #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/routing/ils.pb.h" #include "ortools/routing/parameters.pb.h" -#include "ortools/routing/parameters_utils.h" #include "ortools/routing/routing.h" #include "ortools/routing/search.h" #include "ortools/routing/types.h" @@ -65,20 +64,15 @@ MakeGlobalCheapestInsertionParameters( return gci_parameters; } -// Returns savings parameters based on the given search parameters. -// TODO(user): consider having an ILS specific set of parameters. -SavingsFilteredHeuristic::SavingsParameters MakeSavingsParameters( - const RoutingSearchParameters& search_parameters) { - SavingsFilteredHeuristic::SavingsParameters savings_parameters; - savings_parameters.neighbors_ratio = - search_parameters.savings_neighbors_ratio(); - savings_parameters.max_memory_usage_bytes = - search_parameters.savings_max_memory_usage_bytes(); - savings_parameters.add_reverse_arcs = - search_parameters.savings_add_reverse_arcs(); - savings_parameters.arc_coefficient = - search_parameters.savings_arc_coefficient(); - return savings_parameters; +// Returns local cheapest insertion parameters based on the given recreate +// strategy if available. Returns default parameters otherwise. +LocalCheapestInsertionParameters GetLocalCheapestInsertionParameters( + const RecreateStrategy& recreate_strategy, + const LocalCheapestInsertionParameters& default_parameters) { + return recreate_strategy.has_parameters() && + recreate_strategy.parameters().has_local_cheapest_insertion() + ? recreate_strategy.parameters().local_cheapest_insertion() + : default_parameters; } // Returns a ruin procedure based on the given ruin strategy. @@ -226,28 +220,27 @@ std::unique_ptr MakeRecreateProcedure( const RoutingSearchParameters& parameters, RoutingModel* model, std::function stop_search, LocalSearchFilterManager* filter_manager) { - switch (parameters.iterated_local_search_parameters() - .ruin_recreate_parameters() - .recreate_strategy()) { + const RecreateStrategy& recreate_strategy = + parameters.iterated_local_search_parameters() + .ruin_recreate_parameters() + .recreate_strategy(); + switch (recreate_strategy.heuristic()) { case FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION: { - const LocalCheapestInsertionParameters& lci_params = - parameters.local_cheapest_insertion_parameters(); return std::make_unique( model, std::move(stop_search), absl::bind_front(&RoutingModel::GetArcCostForVehicle, model), - lci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lci_params.insertion_sorting_properties()), + GetLocalCheapestInsertionParameters( + recreate_strategy, + parameters.local_cheapest_insertion_parameters()), filter_manager, model->GetBinCapacities()); } case FirstSolutionStrategy::LOCAL_CHEAPEST_COST_INSERTION: { - const LocalCheapestInsertionParameters& lci_params = - parameters.local_cheapest_cost_insertion_parameters(); return std::make_unique( model, std::move(stop_search), - /*evaluator=*/nullptr, lci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lci_params.insertion_sorting_properties()), + /*evaluator=*/nullptr, + GetLocalCheapestInsertionParameters( + recreate_strategy, + parameters.local_cheapest_cost_insertion_parameters()), filter_manager, model->GetBinCapacities()); } case FirstSolutionStrategy::SEQUENTIAL_CHEAPEST_INSERTION: { @@ -273,13 +266,15 @@ std::unique_ptr MakeRecreateProcedure( filter_manager, gci_parameters); } case FirstSolutionStrategy::SAVINGS: { + // TODO(user): support ILS-specific savings parameters. return std::make_unique( - model, std::move(stop_search), MakeSavingsParameters(parameters), + model, std::move(stop_search), parameters.savings_parameters(), filter_manager); } case FirstSolutionStrategy::PARALLEL_SAVINGS: { + // TODO(user): support ILS-specific savings parameters. return std::make_unique( - model, std::move(stop_search), MakeSavingsParameters(parameters), + model, std::move(stop_search), parameters.savings_parameters(), filter_manager); } default: diff --git a/ortools/routing/ils.proto b/ortools/routing/ils.proto index 1f0bab3425..33dc98dcbc 100644 --- a/ortools/routing/ils.proto +++ b/ortools/routing/ils.proto @@ -26,6 +26,7 @@ option java_multiple_files = true; option csharp_namespace = "Google.OrTools.Routing"; import "ortools/routing/enums.proto"; +import "ortools/routing/heuristic_parameters.proto"; package operations_research.routing; @@ -146,6 +147,21 @@ message RuinStrategy { } } +// Parameters to customize a recreate strategy. +message RecreateParameters { + oneof parameters { + LocalCheapestInsertionParameters local_cheapest_insertion = 1; + } +} + +// Strategy defining how a solution is recreated. +message RecreateStrategy { + optional FirstSolutionStrategy.Value heuristic = 1; + // The selected parameters should match the chosen recreate heuristic. + // If not set, the default parameters from the RoutingModel are used. + optional RecreateParameters parameters = 2; +} + // The ruin composition strategies specifies how ruin are selected at every ILS // iteration. message RuinCompositionStrategy { @@ -175,7 +191,7 @@ message RuinRecreateParameters { RuinCompositionStrategy.Value ruin_composition_strategy = 2; // Strategy defining how a reference solution is recreated. - FirstSolutionStrategy.Value recreate_strategy = 3; + RecreateStrategy recreate_strategy = 3; // Ratio in [0, 1] of non start/end nodes to consider as neighbors for the // identification of routes spatially close to a non start/end seed node. diff --git a/ortools/routing/index_manager.cc b/ortools/routing/index_manager.cc index 4c90d8c85f..64bf481a7e 100644 --- a/ortools/routing/index_manager.cc +++ b/ortools/routing/index_manager.cc @@ -14,7 +14,6 @@ #include "ortools/routing/index_manager.h" #include -#include #include #include diff --git a/ortools/routing/insertion_lns.cc b/ortools/routing/insertion_lns.cc index 3b89fa64ae..bbafc0dba0 100644 --- a/ortools/routing/insertion_lns.cc +++ b/ortools/routing/insertion_lns.cc @@ -22,7 +22,6 @@ #include #include "absl/log/check.h" -#include "ortools/base/int_type.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/routing/routing.h" diff --git a/ortools/routing/lp_scheduling.cc b/ortools/routing/lp_scheduling.cc index f2055afd29..c6bcdf9481 100644 --- a/ortools/routing/lp_scheduling.cc +++ b/ortools/routing/lp_scheduling.cc @@ -39,7 +39,6 @@ #include "ortools/base/map_util.h" #include "ortools/base/mathutil.h" #include "ortools/base/strong_vector.h" -#include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/glop/parameters.pb.h" #include "ortools/graph/min_cost_flow.h" @@ -57,6 +56,9 @@ namespace operations_research::routing { namespace { +constexpr int64_t kint64min = std::numeric_limits::min(); +constexpr int64_t kint64max = std::numeric_limits::max(); + // The following sets of parameters give the fastest response time without // impacting solutions found negatively. glop::GlopParameters GetGlopParametersForLocalLP() { @@ -88,11 +90,11 @@ bool GetCumulBoundsWithOffset(const RoutingDimension& dimension, std::max(dimension.GetFirstPossibleGreaterOrEqualValueForNode( node_index, cumul_offset), cumul_var.Min()); - DCHECK_LT(first_after_offset, std::numeric_limits::max()); + DCHECK_LT(first_after_offset, kint64max); *lower_bound = CapSub(first_after_offset, cumul_offset); DCHECK_GE(*lower_bound, 0); - if (*upper_bound == std::numeric_limits::max()) { + if (*upper_bound == kint64max) { return true; } *upper_bound = CapSub(*upper_bound, cumul_offset); @@ -182,7 +184,8 @@ void StoreVisitedPickupDeliveryPairsOnRoute( LocalDimensionCumulOptimizer::LocalDimensionCumulOptimizer( const RoutingDimension* dimension, - RoutingSearchParameters::SchedulingSolver solver_type) + RoutingSearchParameters::SchedulingSolver solver_type, + RoutingSearchStats* search_stats) : optimizer_core_(dimension, /*use_precedence_propagator=*/false) { // Using one solver per vehicle in the hope that if routes don't change this // will be faster. @@ -194,14 +197,14 @@ LocalDimensionCumulOptimizer::LocalDimensionCumulOptimizer( for (int vehicle = 0; vehicle < vehicles; ++vehicle) { // TODO(user): Instead of passing false, detect if the relaxation // will always violate the MIPL constraints. - solver_[vehicle] = - std::make_unique(false, parameters); + solver_[vehicle] = std::make_unique( + false, parameters, search_stats); } break; } case RoutingSearchParameters::SCHEDULING_CP_SAT: { for (int vehicle = 0; vehicle < vehicles; ++vehicle) { - solver_[vehicle] = std::make_unique(); + solver_[vehicle] = std::make_unique(search_stats); } break; } @@ -332,6 +335,23 @@ LocalDimensionCumulOptimizer::ComputePackedRouteCumuls( resource, solver_[vehicle].get(), packed_cumuls, packed_breaks); } +DimensionSchedulingStatus +LocalDimensionCumulOptimizer::ComputeRouteCumulsWithTransitTargets( + int vehicle, double solve_duration_ratio, + const std::function& next_accessor, + absl::Span transit_targets, + DimensionCumulOptimizerCore::TransitTargetCost transit_target_cost, + std::vector* optimal_transits, + std::vector* optimal_cumuls, + std::vector* optimal_breaks) { + DCHECK_GT(solve_duration_ratio, 0); + DCHECK_LE(solve_duration_ratio, 1); + return optimizer_core_.OptimizeSingleRouteWithTransitTargets( + vehicle, solve_duration_ratio, next_accessor, transit_targets, + transit_target_cost, solver_[vehicle].get(), optimal_transits, + optimal_cumuls, optimal_breaks); +} + const int CumulBoundsPropagator::kNoParent = -2; const int CumulBoundsPropagator::kParentToBePropagated = -1; @@ -361,7 +381,7 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( const std::function& next_accessor, int64_t cumul_offset, const std::vector* dimension_travel_info_per_route) { - propagated_bounds_.assign(num_nodes_, std::numeric_limits::min()); + propagated_bounds_.assign(num_nodes_, kint64min); for (std::vector& arcs : outgoing_arcs_) { arcs.clear(); @@ -383,7 +403,7 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( return false; } lower_bounds[PositiveNode(node)] = cumul_lb; - if (cumul_ub < std::numeric_limits::max()) { + if (cumul_ub < kint64max) { lower_bounds[NegativeNode(node)] = -cumul_ub; } @@ -407,7 +427,7 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( // node + transit + slack_var == next // Add arcs for node + transit + slack_min <= next AddArcs(node, next, CapAdd(transit, slack_var.Min())); - if (slack_var.Max() < std::numeric_limits::max()) { + if (slack_var.Max() < kint64max) { // Add arcs for node + transit + slack_max >= next. AddArcs(next, node, CapSub(-slack_var.Max(), transit)); } @@ -417,7 +437,7 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( // Add vehicle span upper bound: end - span_ub <= start. const int64_t span_ub = dimension_.GetSpanUpperBoundForVehicle(vehicle); - if (span_ub < std::numeric_limits::max()) { + if (span_ub < kint64max) { AddArcs(model->End(vehicle), model->Start(vehicle), -span_ub); } @@ -442,7 +462,7 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( const int64_t limit = dimension_.GetPickupToDeliveryLimitForPair( pair_index, model->GetPickupPosition(pickup_index)->alternative_index, model->GetDeliveryPosition(delivery_index)->alternative_index); - if (limit < std::numeric_limits::max()) { + if (limit < kint64max) { // delivery_cumul - limit <= pickup_cumul. AddArcs(delivery_index, pickup_index, -limit); } @@ -455,11 +475,9 @@ bool CumulBoundsPropagator::InitializeArcsAndBounds( continue; } const bool first_node_unperformed = - lower_bounds[PositiveNode(first_node)] == - std::numeric_limits::min(); + lower_bounds[PositiveNode(first_node)] == kint64min; const bool second_node_unperformed = - lower_bounds[PositiveNode(second_node)] == - std::numeric_limits::min(); + lower_bounds[PositiveNode(second_node)] == kint64min; switch (RoutingDimension::GetPrecedenceStatus(first_node_unperformed, second_node_unperformed, performed_constraint)) { @@ -552,10 +570,9 @@ bool CumulBoundsPropagator::PropagateCumulBounds( for (const ArcInfo& arc : outgoing_arcs_[node]) { // NOTE: kint64min as a lower bound means no lower bound at all, so we // don't use this value to propagate. - const int64_t induced_lb = - (lower_bound == std::numeric_limits::min()) - ? std::numeric_limits::min() - : CapAdd(lower_bound, arc.offset); + const int64_t induced_lb = (lower_bound == kint64min) + ? kint64min + : CapAdd(lower_bound, arc.offset); const int head_node = arc.head; if (induced_lb <= current_lb[head_node]) { @@ -632,7 +649,7 @@ DimensionCumulOptimizerCore::ComputeSingleRouteSolutionCostWithoutFixedTransits( !model->IsEnd(next_accessor(model->Start(vehicle))) || model->IsVehicleUsedWhenEmpty(vehicle); if (!SetRouteCumulConstraints( - vehicle, next_accessor, dimension_->transit_evaluator(vehicle), + vehicle, next_accessor, dimension_->transit_evaluator(vehicle), {}, dimension_travel_info, dimension_->GetLocalOptimizerOffsetForVehicle(vehicle), optimize_vehicle_costs, solver, nullptr, &cost_offset_value)) { @@ -777,9 +794,7 @@ bool GetDomainOffsetBounds(const Domain& domain, int64_t offset, const int64_t lower_bound = std::max(CapSub(domain.Min(), offset), 0); const int64_t upper_bound = - domain.Max() == std::numeric_limits::max() - ? std::numeric_limits::max() - : CapSub(domain.Max(), offset); + domain.Max() == kint64max ? kint64max : CapSub(domain.Max(), offset); if (lower_bound > upper_bound) return false; *interval = ClosedInterval(lower_bound, upper_bound); @@ -892,7 +907,7 @@ DimensionCumulOptimizerCore::OptimizeSingleRouteWithResources( const int64_t cumul_offset = dimension_->GetLocalOptimizerOffsetForVehicle(vehicle); int64_t cost_offset = 0; - if (!SetRouteCumulConstraints(vehicle, next_accessor, transit_accessor, + if (!SetRouteCumulConstraints(vehicle, next_accessor, transit_accessor, {}, dimension_travel_info, cumul_offset, optimize_vehicle_costs, solver, transit_cost, &cost_offset)) { @@ -951,12 +966,12 @@ DimensionCumulOptimizerCore::OptimizeSingleRouteWithResources( } if (cumul_values != nullptr) { - SetValuesFromLP(current_route_cumul_variables_, cumul_offset, solver, - &cumul_values->at(i)); + SetValuesFromLP(current_route_cumul_variables_, cumul_offset, kint64min, + solver, &cumul_values->at(i)); } if (break_values != nullptr) { - SetValuesFromLP(current_route_break_variables_, cumul_offset, solver, - &break_values->at(i)); + SetValuesFromLP(current_route_break_variables_, cumul_offset, kint64min, + solver, &break_values->at(i)); } } @@ -1009,7 +1024,7 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::Optimize( ? nullptr : &dimension_travel_info_per_route[vehicle]; if (!SetRouteCumulConstraints( - vehicle, next_accessor, dimension_->transit_evaluator(vehicle), + vehicle, next_accessor, dimension_->transit_evaluator(vehicle), {}, dimension_travel_info, cumul_offset, optimize_vehicle_costs, solver, &route_transit_cost, &route_cost_offset)) { return DimensionSchedulingStatus::INFEASIBLE; @@ -1046,8 +1061,10 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::Optimize( // TODO(user): In case the status is RELAXED_OPTIMAL_ONLY, check we can // safely avoid filling variable and cost values. - SetValuesFromLP(index_to_cumul_variable_, cumul_offset, solver, cumul_values); - SetValuesFromLP(all_break_variables_, cumul_offset, solver, break_values); + SetValuesFromLP(index_to_cumul_variable_, cumul_offset, kint64min, solver, + cumul_values); + SetValuesFromLP(all_break_variables_, cumul_offset, kint64min, solver, + break_values); SetResourceIndices(solver, resource_indices_per_group); if (cost_without_transits != nullptr) { @@ -1101,9 +1118,10 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::OptimizeAndPack( // TODO(user): In case the status is RELAXED_OPTIMAL_ONLY, check we can // safely avoid filling variable values. const int64_t global_offset = dimension_->GetGlobalOptimizerOffset(); - SetValuesFromLP(index_to_cumul_variable_, global_offset, solver, + SetValuesFromLP(index_to_cumul_variable_, global_offset, kint64min, solver, cumul_values); - SetValuesFromLP(all_break_variables_, global_offset, solver, break_values); + SetValuesFromLP(all_break_variables_, global_offset, kint64min, solver, + break_values); solver->Clear(); return status; } @@ -1148,10 +1166,10 @@ DimensionCumulOptimizerCore::OptimizeAndPackSingleRoute( } const int64_t local_offset = dimension_->GetLocalOptimizerOffsetForVehicle(vehicle); - SetValuesFromLP(current_route_cumul_variables_, local_offset, solver, - cumul_values); - SetValuesFromLP(current_route_break_variables_, local_offset, solver, - break_values); + SetValuesFromLP(current_route_cumul_variables_, local_offset, kint64min, + solver, cumul_values); + SetValuesFromLP(current_route_break_variables_, local_offset, kint64min, + solver, break_values); solver->Clear(); return status; } @@ -1208,10 +1226,10 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::PackRoutes( solver->ClearObjective(); for (int vehicle : vehicles) { const int end_cumul_var = index_to_cumul_variable_[model->End(vehicle)]; - // end_cumul_var <= solver.GetValue(end_cumul_var) - solver->SetVariableBounds( - end_cumul_var, solver->GetVariableLowerBound(end_cumul_var), - MathUtil::FastInt64Round(solver->GetValue(end_cumul_var))); + // end_cumul_var <= solver.GetVariableValue(end_cumul_var) + solver->SetVariableBounds(end_cumul_var, + solver->GetVariableLowerBound(end_cumul_var), + solver->GetVariableValue(end_cumul_var)); // Maximize the starts of the routes. solver->SetObjectiveCoefficient( @@ -1227,6 +1245,168 @@ DimensionSchedulingStatus DimensionCumulOptimizerCore::PackRoutes( return status; } +DimensionSchedulingStatus +DimensionCumulOptimizerCore::OptimizeSingleRouteWithTransitTargets( + int vehicle, double solve_duration_ratio, + const std::function& next_accessor, + absl::Span transit_targets, + TransitTargetCost transit_target_cost, RoutingLinearSolverWrapper* solver, + std::vector* optimal_transits, + std::vector* optimal_cumuls, + std::vector* optimal_breaks) { + ClearIfNonNull(optimal_transits); + ClearIfNonNull(optimal_cumuls); + ClearIfNonNull(optimal_breaks); + InitOptimizer(solver); + const int64_t cumul_offset = + dimension_->GetLocalOptimizerOffsetForVehicle(vehicle); + const auto& transit_evaluator = dimension_->transit_evaluator(vehicle); + // Setup the regular route cumul constraints. + if (!SetRouteCumulConstraints( + vehicle, next_accessor, transit_evaluator, transit_targets, {}, + cumul_offset, /*optimize_costs=*/false, solver, nullptr, nullptr)) { + return DimensionSchedulingStatus::INFEASIBLE; + } + DCHECK_GE(current_route_cumul_variables_.size(), 2); + DCHECK_EQ(transit_targets.size(), current_route_cumul_variables_.size() - 1); + + const auto [threshold_ratio, cost_coefficient_below_threshold, + cost_coefficient_above_threshold] = transit_target_cost; + DCHECK_GT(threshold_ratio, 0); + DCHECK_LT(threshold_ratio, 1); + DCHECK_GT(cost_coefficient_above_threshold, 0); + DCHECK_GT(cost_coefficient_below_threshold, cost_coefficient_above_threshold); + + // Add transit target costs, to try and be as close as possible to the transit + // targets. + const std::vector& variable_transits = + current_route_variable_transit_variables_; + DCHECK_EQ(variable_transits.size(), transit_targets.size()); + for (int pos = 0; pos < variable_transits.size(); ++pos) { + int variable_transit = variable_transits[pos]; + if (variable_transit < 0) { + DCHECK_LE(transit_targets[pos], + transit_evaluator(current_route_nodes_[pos], + current_route_nodes_[pos + 1])); + continue; + } + + // NOTE: In the following, constants are identified in upper-case and + // variables are in lower-case. + // We want the variable_transit to be as close as possible to its upper + // bound, UB = TRANSIT_TARGET - FIXED_TRANSIT. + // We therefore try to maximize each variable_transit, but by adding convex + // costs so that potential violations from the transit targets are spread + // along the path. + // + // We use a more "aggressive" cost (hence a more aggressive cost slope) + // when the variable_transit is below a given threshold, determined as + // THRESHOLD = + // (TRANSIT_TARGET_THRESHOLD_RATIO * TRANSIT_TARGET) - FIXED_TRANSIT. + // + // We use violation_above_threshold and violation_below_threshold variables + // to represent how much the variable_transit differs from its upper bound, + // by representing the overall violation as a sum of the violation above the + // THRESHOLD and below it. We have: + // variable_transit + violation_above_threshold + violation_below_threshold + // == UB, with + // violation_above_threshold ∈ [0, UB - THRESHOLD] and + // violation_below_threshold ∈ [0, THRESHOLD]. + // The goal is then to minimize the overall violation, by + // adding to the objective function: + // Cost += violation_above_threshold * COST_COEFFICIENT_ABOVE_THRESHOLD + + // violation_below_threshold * COST_COEFFICIENT_BELOW_THRESHOLD. + // + // Since the cost coefficients are such that the cost function is convex wrt + // the variable_transit + // (COST_COEFFICIENT_BELOW_THRESHOLD > COST_COEFFICIENT_ABOVE_THRESHOLD), + // The solver will use up all the violation_above_threshold before starting + // to use violation_below_threshold in order to minimize the overall cost, + // with a preference for using up as little of violation_below as possible + // along the route. + const int64_t variable_transit_ub = + solver->GetVariableUpperBound(variable_transit); + + const int64_t transit_target = transit_targets[pos]; + const int64_t fixed_transit = CapSub(transit_target, variable_transit_ub); + DCHECK_GT(transit_target, fixed_transit); + DCHECK_GE(fixed_transit, 0); + const int64_t threshold = + std::max(CapSub(threshold_ratio * transit_target, fixed_transit), 0L); + + DCHECK_GT(variable_transit_ub, threshold); + const int violation_above_threshold = + solver->AddVariable(0, CapSub(variable_transit_ub, threshold)); + const int violation_below_threshold = solver->AddVariable(0, threshold); + solver->AddLinearConstraint(variable_transit_ub, variable_transit_ub, + {{variable_transit, 1}, + {violation_above_threshold, 1}, + {violation_below_threshold, 1}}); + solver->SetObjectiveCoefficient(violation_above_threshold, + cost_coefficient_above_threshold); + solver->SetObjectiveCoefficient(violation_below_threshold, + cost_coefficient_below_threshold); + } + + // TODO(user): Adapt the solve duration ratio here and below. Divide by 2? + const RoutingModel& model = *dimension_->model(); + DimensionSchedulingStatus status = + solver->Solve(model.RemainingTime() * solve_duration_ratio); + if (status == DimensionSchedulingStatus::INFEASIBLE) { + solver->Clear(); + return status; + } + + // Now force the values of the variable transits to their optimal ones wrt. + // the previous model, then setup the cumul costs in the objective and solve + // again. + // NOTE(user): Given our constraint matrix, our problem *should* always + // have an integer optimal solution, in which case we can round to the nearest + // integer for the variable_transits. + // If this DCHECK ever fails, it can be removed but the code below should be + // adapted to have a 2-phase approach, solving once with the rounded values as + // bound and if this fails, solve again using std::floor. + DCHECK(solver->SolutionIsInteger()); + for (int pos = 0; pos < variable_transits.size(); ++pos) { + const int variable_transit = variable_transits[pos]; + if (variable_transit < 0) { + continue; + } + const int64_t variable_transit_value = + solver->GetVariableValue(variable_transit); + DCHECK_GE(variable_transit_value, 0); + solver->SetVariableBounds(variable_transit, variable_transit_value, + variable_transit_value); + } + solver->ClearObjective(); + SetRouteCumulCosts(vehicle, cumul_offset, /*total_fixed_transit=*/0, solver, + nullptr, nullptr); + status = solver->Solve(model.RemainingTime() * solve_duration_ratio); + if (status == DimensionSchedulingStatus::INFEASIBLE) { + solver->Clear(); + return status; + } + SetValuesFromLP(current_route_cumul_variables_, cumul_offset, kint64min, + solver, optimal_cumuls); + SetValuesFromLP(current_route_break_variables_, cumul_offset, kint64min, + solver, optimal_breaks); + SetValuesFromLP(current_route_variable_transit_variables_, 0, 0, solver, + optimal_transits); + if (optimal_transits != nullptr) { + DCHECK_EQ(optimal_transits->size(), current_route_nodes_.size() - 1); + // Add the fixed transit on each arc to optimal_transits. + for (int pos = 0; pos < optimal_transits->size(); ++pos) { + const int64_t fixed_transit = + std::min(transit_targets[pos], + transit_evaluator(current_route_nodes_[pos], + current_route_nodes_[pos + 1])); + CapAddTo(fixed_transit, &(*optimal_transits)[pos]); + } + } + solver->Clear(); + return status; +} + #define SET_DEBUG_VARIABLE_NAME(solver, var, name) \ do { \ if (DEBUG_MODE) { \ @@ -1295,8 +1475,7 @@ bool DimensionCumulOptimizerCore::TightenRouteCumulBounds( for (int pos = route_size - 2; pos >= 0; --pos) { // If cumul_max[pos+1] is kint64max, it will be translated to // double +infinity, so it must not constrain cumul_max[pos]. - if (current_route_max_cumuls_[pos + 1] < - std::numeric_limits::max()) { + if (current_route_max_cumuls_[pos + 1] < kint64max) { const int64_t slack_min = dimension_->SlackVar(route[pos])->Min(); current_route_max_cumuls_[pos] = std::min( current_route_max_cumuls_[pos], @@ -1365,23 +1544,36 @@ double FindBestScaling(absl::Span coefficients, bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( const RouteDimensionTravelInfo* dimension_travel_info, - absl::Span lp_slacks, absl::Span fixed_transit, + absl::Span lp_slacks, absl::Span fixed_transits, + absl::Span transit_targets, RoutingLinearSolverWrapper* solver) { const std::vector& lp_cumuls = current_route_cumul_variables_; const int path_size = lp_cumuls.size(); + std::vector& variable_transits = + current_route_variable_transit_variables_; if (dimension_travel_info == nullptr || dimension_travel_info->transition_info.empty()) { - // Travel is not travel-start dependent. + variable_transits.assign(path_size - 1, -1); // Add all path constraints to LP: - // cumul[i] + fixed_transit[i] + slack[i] == cumul[i+1] - // <=> fixed_transit[i] == cumul[i+1] - cumul[i] - slack[i]. + // cumul[i+1] == + // cumul[i] + fixed_transit[i] + variable_transit[i] + slack[i] + // <=> fixed_transit[i] == + // cumul[i+1] - cumul[i] - slack[i] - variable_transit[i]. for (int pos = 0; pos < path_size - 1; ++pos) { - const int ct = - solver->CreateNewConstraint(fixed_transit[pos], fixed_transit[pos]); + const int64_t fixed_transit = fixed_transits[pos]; + const int ct = solver->CreateNewConstraint(fixed_transit, fixed_transit); solver->SetCoefficient(ct, lp_cumuls[pos + 1], 1); solver->SetCoefficient(ct, lp_cumuls[pos], -1); solver->SetCoefficient(ct, lp_slacks[pos], -1); + if (!transit_targets.empty()) { + const int64_t max_variable_transit = + CapSub(transit_targets[pos], fixed_transit); + if (max_variable_transit > 0) { + variable_transits[pos] = solver->AddVariable(0, max_variable_transit); + solver->SetCoefficient(ct, variable_transits[pos], -1); + } + } } return true; } @@ -1543,10 +1735,9 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( if (factor <= 0) return false; const int linearization_ct = solver->AddLinearConstraint( - MathUtil::FastInt64Round(factor * (y_intercept - 0.5)), - std::numeric_limits::max(), - {{travel_value, MathUtil::FastInt64Round(factor)}, - {travel_start, MathUtil::FastInt64Round(-factor * slope)}}); + MathUtil::Round(factor * (y_intercept - 0.5)), kint64max, + {{travel_value, MathUtil::Round(factor)}, + {travel_start, MathUtil::Round(-factor * slope)}}); if (need_bins) { solver->SetEnforcementLiteral(linearization_ct, belongs_to_this_segment_var); @@ -1561,8 +1752,8 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // const int64_t Tm = (transit_function.y_anchors[seg] + // transit_function.y_anchors[seg + 1]) / 2; The constraint is // implemented as: cost_scaled * Tm >= cost const int cost_ct = - // solver->AddLinearConstraint(0, std::numeric_limits::max(), - // {{cost_scaled, Tm}, {cost, -1}}); + // solver->AddLinearConstraint(0, kint64max, + // {{cost_scaled, Tm}, {cost, -1}}); // solver->SetEnforcementLiteral(cost_ct, belongs_to_this_segment_var); } @@ -1641,11 +1832,10 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( if (factor <= 0) return false; solver->AddLinearConstraint( - MathUtil::FastInt64Round(factor * y_intercept), - std::numeric_limits::max(), + MathUtil::Round(factor * y_intercept), kint64max, {{compression_cost, std::round(factor)}, {travel_compression_absolute, - MathUtil::FastInt64Round(-factor * slope)}}); + MathUtil::Round(-factor * slope)}}); } // ====== UNCOMMENT TO USE PRODUCT TO COMPUTE THE EXACT ERROR ===== // // Normally cost_scaled = C₂×(Tᵣ - T)²/Tᵣ @@ -1654,8 +1844,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // const int prod = solver->CreateNewPositiveVariable(); // solver->AddProductConstraint(prod, {cost_scaled, travel_value}); // The constraint is implemented as: cost_scaled * Tᵣ >= cost - // solver->AddLinearConstraint(0, std::numeric_limits::max(), - // {{prod, 1}, {cost, -1}}); + // solver->AddLinearConstraint(0, kint64max, {{prod, 1}, {cost, -1}}); // ====== UNCOMMENT TO USE AVERAGE ERROR APPROXIMATION ===== // // Normally cost_scaled = C₂×(Tᵣ - T)²/Tᵣ @@ -1665,8 +1854,7 @@ bool DimensionCumulOptimizerCore::SetRouteTravelConstraints( // cost_scaled = cost. So the cost_function must be defined as cost = // C₂×(Tᵣ - T)²/Tₐ The constraint is implemented as: cost_scaled >= cost solver->AddLinearConstraint( - 0, std::numeric_limits::max(), - {{relative_compression_cost, 1}, {compression_cost, -1}}); + 0, kint64max, {{relative_compression_cost, 1}, {compression_cost, -1}}); solver->SetObjectiveCoefficient(relative_compression_cost, 1.0); } @@ -1691,12 +1879,14 @@ bool RouteIsValid(const RoutingModel& model, int vehicle, bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, + absl::Span transit_targets, const RouteDimensionTravelInfo* dimension_travel_info, int64_t cumul_offset, bool optimize_costs, RoutingLinearSolverWrapper* solver, int64_t* route_transit_cost, int64_t* route_cost_offset) { RoutingModel* const model = dimension_->model(); // Extract the vehicle's path from next_accessor. - std::vector path; + std::vector& path = current_route_nodes_; + path.clear(); { DCHECK(RouteIsValid(*model, vehicle, next_accessor)); int node = model->Start(vehicle); @@ -1715,6 +1905,9 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( for (int pos = 1; pos < path_size; ++pos) { int64_t& transit = fixed_transit[pos - 1]; transit = transit_accessor(path[pos - 1], path[pos]); + if (!transit_targets.empty()) { + transit = std::min(transit, transit_targets[pos - 1]); + } total_fixed_transit = CapAdd(total_fixed_transit, transit); } } @@ -1785,63 +1978,11 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( } if (!SetRouteTravelConstraints(dimension_travel_info, lp_slacks, - fixed_transit, solver)) { + fixed_transit, transit_targets, solver)) { return false; } if (route_cost_offset != nullptr) *route_cost_offset = 0; - if (optimize_costs) { - // Add soft upper bounds. - for (int pos = 0; pos < path_size; ++pos) { - if (!dimension_->HasCumulVarSoftUpperBound(path[pos])) continue; - const int64_t coef = - dimension_->GetCumulVarSoftUpperBoundCoefficient(path[pos]); - if (coef == 0) continue; - int64_t bound = dimension_->GetCumulVarSoftUpperBound(path[pos]); - if (bound < cumul_offset && route_cost_offset != nullptr) { - // Add coef * (cumul_offset - bound) to the cost offset. - *route_cost_offset = CapAdd(*route_cost_offset, - CapProd(CapSub(cumul_offset, bound), coef)); - } - bound = std::max(0, CapSub(bound, cumul_offset)); - if (current_route_max_cumuls_[pos] <= bound) { - // constraint is never violated. - continue; - } - const int soft_ub_diff = solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME(solver, soft_ub_diff, - absl::StrFormat("soft_ub_diff(%ld)", pos)); - solver->SetObjectiveCoefficient(soft_ub_diff, coef); - // cumul - soft_ub_diff <= bound. - const int ct = solver->CreateNewConstraint( - std::numeric_limits::min(), bound); - solver->SetCoefficient(ct, lp_cumuls[pos], 1); - solver->SetCoefficient(ct, soft_ub_diff, -1); - } - // Add soft lower bounds. - for (int pos = 0; pos < path_size; ++pos) { - if (!dimension_->HasCumulVarSoftLowerBound(path[pos])) continue; - const int64_t coef = - dimension_->GetCumulVarSoftLowerBoundCoefficient(path[pos]); - if (coef == 0) continue; - const int64_t bound = std::max( - 0, CapSub(dimension_->GetCumulVarSoftLowerBound(path[pos]), - cumul_offset)); - if (current_route_min_cumuls_[pos] >= bound) { - // constraint is never violated. - continue; - } - const int soft_lb_diff = solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME(solver, soft_lb_diff, - absl::StrFormat("soft_lb_diff(%ld)", pos)); - solver->SetObjectiveCoefficient(soft_lb_diff, coef); - // bound - cumul <= soft_lb_diff - const int ct = solver->CreateNewConstraint( - bound, std::numeric_limits::max()); - solver->SetCoefficient(ct, lp_cumuls[pos], 1); - solver->SetCoefficient(ct, soft_lb_diff, 1); - } - } // Add pickup and delivery limits. std::vector visited_pairs; StoreVisitedPickupDeliveryPairsOnRoute( @@ -1863,10 +2004,9 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( const int64_t limit = dimension_->GetPickupToDeliveryLimitForPair( pair_index, model->GetPickupPosition(pickup_index)->alternative_index, model->GetDeliveryPosition(delivery_index)->alternative_index); - if (limit < std::numeric_limits::max()) { + if (limit < kint64max) { // delivery_cumul - pickup_cumul <= limit. - const int ct = solver->CreateNewConstraint( - std::numeric_limits::min(), limit); + const int ct = solver->CreateNewConstraint(kint64min, limit); solver->SetCoefficient(ct, index_to_cumul_variable_[delivery_index], 1); solver->SetCoefficient(ct, index_to_cumul_variable_[pickup_index], -1); } @@ -1874,107 +2014,26 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( // Add span bound constraint. const int64_t span_bound = dimension_->GetSpanUpperBoundForVehicle(vehicle); - if (span_bound < std::numeric_limits::max()) { + if (span_bound < kint64max) { // end_cumul - start_cumul <= bound - const int ct = solver->CreateNewConstraint( - std::numeric_limits::min(), span_bound); + const int ct = solver->CreateNewConstraint(kint64min, span_bound); solver->SetCoefficient(ct, lp_cumuls.back(), 1); solver->SetCoefficient(ct, lp_cumuls.front(), -1); } - // Add span and slack costs. - // NOTE: The fixed transit is removed from the span cost since it doesn't - // affect the optimization of the scheduling of the route. - const int64_t span_cost_coef = - dimension_->GetSpanCostCoefficientForVehicle(vehicle); - const int64_t slack_cost_coef = CapAdd( - span_cost_coef, dimension_->GetSlackCostCoefficientForVehicle(vehicle)); - if (optimize_costs && slack_cost_coef > 0) { - // span_without_fixed_transit_var = - // end_cumul - start_cumul - total_fixed_transit - const int span_without_fixed_transit_var = - solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME(solver, span_without_fixed_transit_var, - "span_without_fixed_transit_var"); - solver->AddLinearConstraint(total_fixed_transit, total_fixed_transit, - {{lp_cumuls.back(), 1}, - {lp_cumuls.front(), -1}, - {span_without_fixed_transit_var, -1}}); - solver->SetObjectiveCoefficient(span_without_fixed_transit_var, - slack_cost_coef); - } - // Add soft span cost. - if (optimize_costs && dimension_->HasSoftSpanUpperBounds()) { - const BoundCost bound_cost = - dimension_->GetSoftSpanUpperBoundForVehicle(vehicle); - if (bound_cost.bound < std::numeric_limits::max() && - bound_cost.cost > 0) { - const int span_violation = solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME(solver, span_violation, "span_violation"); - // end - start <= bound + span_violation - const int violation = solver->CreateNewConstraint( - std::numeric_limits::min(), bound_cost.bound); - solver->SetCoefficient(violation, lp_cumuls.back(), 1.0); - solver->SetCoefficient(violation, lp_cumuls.front(), -1.0); - solver->SetCoefficient(violation, span_violation, -1.0); - // Add span_violation * cost to objective. - solver->SetObjectiveCoefficient(span_violation, bound_cost.cost); - } - } - if (optimize_costs && solver->IsCPSATSolver() && - dimension_->HasQuadraticCostSoftSpanUpperBounds()) { - // NOTE: the quadratic soft bound might be different from the one above. - const BoundCost bound_cost = - dimension_->GetQuadraticCostSoftSpanUpperBoundForVehicle(vehicle); - if (bound_cost.bound < std::numeric_limits::max() && - bound_cost.cost > 0) { - const int span_violation = solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME( - solver, span_violation, - absl::StrFormat("quadratic_span_violation(%ld)", vehicle)); - // end - start <= bound + span_violation - const int violation = solver->CreateNewConstraint( - std::numeric_limits::min(), bound_cost.bound); - solver->SetCoefficient(violation, lp_cumuls.back(), 1.0); - solver->SetCoefficient(violation, lp_cumuls.front(), -1.0); - solver->SetCoefficient(violation, span_violation, -1.0); - // Add variable squared_span_violation, equal to span_violation². - const int squared_span_violation = solver->CreateNewPositiveVariable(); - SET_DEBUG_VARIABLE_NAME( - solver, squared_span_violation, - absl::StrFormat("squared_span_violation(%ld)", vehicle)); - solver->AddProductConstraint(squared_span_violation, - {span_violation, span_violation}); - // Add squared_span_violation * cost to objective. - solver->SetObjectiveCoefficient(squared_span_violation, bound_cost.cost); - } - } - // Add global span constraint. - if (optimize_costs && dimension_->global_span_cost_coefficient() > 0) { - // min_start_cumul_ <= cumuls[start] - int ct = - solver->CreateNewConstraint(std::numeric_limits::min(), 0); - solver->SetCoefficient(ct, min_start_cumul_, 1); - solver->SetCoefficient(ct, lp_cumuls.front(), -1); - // max_end_cumul_ >= cumuls[end] - ct = solver->CreateNewConstraint(0, std::numeric_limits::max()); - solver->SetCoefficient(ct, max_end_cumul_, 1); - solver->SetCoefficient(ct, lp_cumuls.back(), -1); - } - // Fill transit cost if specified. - if (route_transit_cost != nullptr) { - if (optimize_costs && span_cost_coef > 0) { - *route_transit_cost = CapProd(total_fixed_transit, span_cost_coef); - } else { - *route_transit_cost = 0; - } + + if (optimize_costs) { + SetRouteCumulCosts(vehicle, cumul_offset, total_fixed_transit, solver, + route_transit_cost, route_cost_offset); } + + current_route_break_variables_.clear(); + if (!dimension_->HasBreakConstraints()) return true; + // For every break that must be inside the route, the duration of that break // must be flowed in the slacks of arcs that can intersect the break. // This LP modelization is correct but not complete: // can miss some cases where the breaks cannot fit. // TODO(user): remove the need for returns in the code below. - current_route_break_variables_.clear(); - if (!dimension_->HasBreakConstraints()) return true; const std::vector& breaks = dimension_->GetBreakIntervalsOfVehicle(vehicle); const int num_breaks = breaks.size(); @@ -1982,15 +2041,14 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( // and it reduces to a span maximum. // TODO(user): Also add the case where no breaks can intersect the route. if (num_breaks == 0) { - int64_t maximum_route_span = std::numeric_limits::max(); + int64_t maximum_route_span = kint64max; for (const auto& distance_duration : dimension_->GetBreakDistanceDurationOfVehicle(vehicle)) { maximum_route_span = std::min(maximum_route_span, distance_duration.first); } - if (maximum_route_span < std::numeric_limits::max()) { - const int ct = solver->CreateNewConstraint( - std::numeric_limits::min(), maximum_route_span); + if (maximum_route_span < kint64max) { + const int ct = solver->CreateNewConstraint(kint64min, maximum_route_span); solver->SetCoefficient(ct, lp_cumuls.back(), 1); solver->SetCoefficient(ct, lp_cumuls.front(), -1); } @@ -2098,8 +2156,7 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( // Break can be before route. if (break_end_min <= vehicle_start_max) { const int ct = solver->AddLinearConstraint( - 0, std::numeric_limits::max(), - {{lp_cumuls.front(), 1}, {lp_breaks[br].end, -1}}); + 0, kint64max, {{lp_cumuls.front(), 1}, {lp_breaks[br].end, -1}}); const int break_is_before_route = solver->AddVariable(0, 1); SET_DEBUG_VARIABLE_NAME( solver, break_is_before_route, @@ -2110,8 +2167,7 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( // Break can be after route. if (vehicle_end_min <= break_start_max) { const int ct = solver->AddLinearConstraint( - 0, std::numeric_limits::max(), - {{lp_breaks[br].start, 1}, {lp_cumuls.back(), -1}}); + 0, kint64max, {{lp_breaks[br].start, 1}, {lp_cumuls.back(), -1}}); const int break_is_after_route = solver->AddVariable(0, 1); SET_DEBUG_VARIABLE_NAME( solver, break_is_after_route, @@ -2148,8 +2204,8 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( solver, break_in_slack, absl::StrFormat("break_in_slack(%ld, %ld)", br, pos)); if (slack_linear_lower_bound_ct[pos] == -1) { - slack_linear_lower_bound_ct[pos] = solver->AddLinearConstraint( - std::numeric_limits::min(), 0, {{lp_slacks[pos], -1}}); + slack_linear_lower_bound_ct[pos] = + solver->AddLinearConstraint(kint64min, 0, {{lp_slacks[pos], -1}}); } // To keep the model clean // (cf. glop::LinearProgram::NotifyThatColumnsAreClean), constraints on @@ -2177,21 +2233,21 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( solver->AddProductConstraint(break_duration_in_slack, {break_in_slack, lp_breaks[br].duration}); if (slack_exact_lower_bound_ct[pos] == -1) { - slack_exact_lower_bound_ct[pos] = solver->AddLinearConstraint( - std::numeric_limits::min(), 0, {{lp_slacks[pos], -1}}); + slack_exact_lower_bound_ct[pos] = + solver->AddLinearConstraint(kint64min, 0, {{lp_slacks[pos], -1}}); } solver->SetCoefficient(slack_exact_lower_bound_ct[pos], break_duration_in_slack, 1); // If break_in_slack_i == 1, then // 1) break_start >= cumul[pos] + pre_travel[pos] const int break_start_after_current_ct = solver->AddLinearConstraint( - pre_travel[pos], std::numeric_limits::max(), + pre_travel[pos], kint64max, {{lp_breaks[br].start, 1}, {lp_cumuls[pos], -1}}); solver->SetEnforcementLiteral(break_start_after_current_ct, break_in_slack); // 2) break_end <= cumul[pos+1] - post_travel[pos] const int break_ends_before_next_ct = solver->AddLinearConstraint( - post_travel[pos], std::numeric_limits::max(), + post_travel[pos], kint64max, {{lp_cumuls[pos + 1], 1}, {lp_breaks[br].end, -1}}); solver->SetEnforcementLiteral(break_ends_before_next_ct, break_in_slack); @@ -2341,7 +2397,7 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( for (int br = 1; br < num_breaks; ++br) { if (lp_breaks[br].start == -1 || lp_breaks[br - 1].start == -1) continue; solver->AddLinearConstraint( - 0, std::numeric_limits::max(), + 0, kint64max, {{lp_breaks[br - 1].end, -1}, {lp_breaks[br].start, 1}}); } } @@ -2399,11 +2455,11 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( solver->AddLinearConstraint( 1, 1, {{break_is_eligible, 1}, {break_is_not_eligible, 1}}); const int positive_ct = solver->AddLinearConstraint( - min_break_duration, std::numeric_limits::max(), + min_break_duration, kint64max, {{lp_break.end, 1}, {lp_break.start, -1}}); solver->SetEnforcementLiteral(positive_ct, break_is_eligible); const int negative_ct = solver->AddLinearConstraint( - std::numeric_limits::min(), min_break_duration - 1, + kint64min, min_break_duration - 1, {{lp_break.end, 1}, {lp_break.start, -1}}); solver->SetEnforcementLiteral(negative_ct, break_is_not_eligible); } @@ -2426,30 +2482,168 @@ bool DimensionCumulOptimizerCore::SetRouteCumulConstraints( solver->SetEnforcementLiteral(empty_cover_ct, break_is_not_eligible); const int cover = - solver->AddVariable(CapAdd(vehicle_start_min, limit), - std::numeric_limits::max()); + solver->AddVariable(CapAdd(vehicle_start_min, limit), kint64max); SET_DEBUG_VARIABLE_NAME(solver, cover, absl::StrFormat("cover(%ld)", br)); solver->AddMaximumConstraint(cover, {previous_cover, break_cover}); // Cover chaining. If route end is not covered, break start must be: // cover_{i-1} < route_end => s_i <= cover_{i-1} const int route_end_is_not_covered = solver->AddReifiedLinearConstraint( - 1, std::numeric_limits::max(), - {{lp_cumuls.back(), 1}, {previous_cover, -1}}); + 1, kint64max, {{lp_cumuls.back(), 1}, {previous_cover, -1}}); const int break_start_cover_ct = solver->AddLinearConstraint( - 0, std::numeric_limits::max(), - {{previous_cover, 1}, {lp_break.start, -1}}); + 0, kint64max, {{previous_cover, 1}, {lp_break.start, -1}}); solver->SetEnforcementLiteral(break_start_cover_ct, route_end_is_not_covered); previous_cover = cover; } - solver->AddLinearConstraint(0, std::numeric_limits::max(), + solver->AddLinearConstraint(0, kint64max, {{previous_cover, 1}, {lp_cumuls.back(), -1}}); } return true; } // NOLINT(readability/fn_size) +void DimensionCumulOptimizerCore::SetRouteCumulCosts( + int vehicle, int64_t cumul_offset, int64_t total_fixed_transit, + RoutingLinearSolverWrapper* solver, int64_t* route_transit_cost, + int64_t* route_cost_offset) { + const std::vector& lp_cumuls = current_route_cumul_variables_; + const std::vector& path = current_route_nodes_; + const int path_size = path.size(); + // Add soft upper bounds. + for (int pos = 0; pos < path_size; ++pos) { + const int64_t node = path[pos]; + if (!dimension_->HasCumulVarSoftUpperBound(node)) continue; + const int64_t coef = dimension_->GetCumulVarSoftUpperBoundCoefficient(node); + if (coef == 0) continue; + int64_t bound = dimension_->GetCumulVarSoftUpperBound(node); + if (bound < cumul_offset && route_cost_offset != nullptr) { + // Add coef * (cumul_offset - bound) to the cost offset. + *route_cost_offset = CapAdd(*route_cost_offset, + CapProd(CapSub(cumul_offset, bound), coef)); + } + bound = std::max(0, CapSub(bound, cumul_offset)); + if (current_route_max_cumuls_[pos] <= bound) { + // constraint is never violated. + continue; + } + const int soft_ub_diff = solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME(solver, soft_ub_diff, + absl::StrFormat("soft_ub_diff(%ld)", pos)); + solver->SetObjectiveCoefficient(soft_ub_diff, coef); + // cumul - soft_ub_diff <= bound. + const int ct = solver->CreateNewConstraint(kint64min, bound); + solver->SetCoefficient(ct, lp_cumuls[pos], 1); + solver->SetCoefficient(ct, soft_ub_diff, -1); + } + // Add soft lower bounds. + for (int pos = 0; pos < path_size; ++pos) { + const int64_t node = path[pos]; + if (!dimension_->HasCumulVarSoftLowerBound(node)) continue; + const int64_t coef = dimension_->GetCumulVarSoftLowerBoundCoefficient(node); + if (coef == 0) continue; + const int64_t bound = std::max( + 0, CapSub(dimension_->GetCumulVarSoftLowerBound(node), cumul_offset)); + if (current_route_min_cumuls_[pos] >= bound) { + // constraint is never violated. + continue; + } + const int soft_lb_diff = solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME(solver, soft_lb_diff, + absl::StrFormat("soft_lb_diff(%ld)", pos)); + solver->SetObjectiveCoefficient(soft_lb_diff, coef); + // bound - cumul <= soft_lb_diff + const int ct = solver->CreateNewConstraint(bound, kint64max); + solver->SetCoefficient(ct, lp_cumuls[pos], 1); + solver->SetCoefficient(ct, soft_lb_diff, 1); + } + + // Add span and slack costs. + // NOTE: The fixed transit is removed from the span cost since it doesn't + // affect the optimization of the scheduling of the route. + const int64_t span_cost_coef = + dimension_->GetSpanCostCoefficientForVehicle(vehicle); + const int64_t slack_cost_coef = CapAdd( + span_cost_coef, dimension_->GetSlackCostCoefficientForVehicle(vehicle)); + if (slack_cost_coef > 0) { + // span_without_fixed_transit_var = + // end_cumul - start_cumul - total_fixed_transit + const int span_without_fixed_transit_var = + solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME(solver, span_without_fixed_transit_var, + "span_without_fixed_transit_var"); + solver->AddLinearConstraint(total_fixed_transit, total_fixed_transit, + {{lp_cumuls.back(), 1}, + {lp_cumuls.front(), -1}, + {span_without_fixed_transit_var, -1}}); + solver->SetObjectiveCoefficient(span_without_fixed_transit_var, + slack_cost_coef); + } + // Add soft span cost. + if (dimension_->HasSoftSpanUpperBounds()) { + const BoundCost bound_cost = + dimension_->GetSoftSpanUpperBoundForVehicle(vehicle); + if (bound_cost.bound < kint64max && bound_cost.cost > 0) { + const int span_violation = solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME(solver, span_violation, "span_violation"); + // end - start <= bound + span_violation + const int violation = + solver->CreateNewConstraint(kint64min, bound_cost.bound); + solver->SetCoefficient(violation, lp_cumuls.back(), 1.0); + solver->SetCoefficient(violation, lp_cumuls.front(), -1.0); + solver->SetCoefficient(violation, span_violation, -1.0); + // Add span_violation * cost to objective. + solver->SetObjectiveCoefficient(span_violation, bound_cost.cost); + } + } + if (solver->IsCPSATSolver() && + dimension_->HasQuadraticCostSoftSpanUpperBounds()) { + // NOTE: the quadratic soft bound might be different from the one above. + const BoundCost bound_cost = + dimension_->GetQuadraticCostSoftSpanUpperBoundForVehicle(vehicle); + if (bound_cost.bound < kint64max && bound_cost.cost > 0) { + const int span_violation = solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME( + solver, span_violation, + absl::StrFormat("quadratic_span_violation(%ld)", vehicle)); + // end - start <= bound + span_violation + const int violation = + solver->CreateNewConstraint(kint64min, bound_cost.bound); + solver->SetCoefficient(violation, lp_cumuls.back(), 1.0); + solver->SetCoefficient(violation, lp_cumuls.front(), -1.0); + solver->SetCoefficient(violation, span_violation, -1.0); + // Add variable squared_span_violation, equal to span_violation². + const int squared_span_violation = solver->CreateNewPositiveVariable(); + SET_DEBUG_VARIABLE_NAME( + solver, squared_span_violation, + absl::StrFormat("squared_span_violation(%ld)", vehicle)); + solver->AddProductConstraint(squared_span_violation, + {span_violation, span_violation}); + // Add squared_span_violation * cost to objective. + solver->SetObjectiveCoefficient(squared_span_violation, bound_cost.cost); + } + } + // Add global span constraint. + if (dimension_->global_span_cost_coefficient() > 0) { + // min_start_cumul_ <= cumuls[start] + int ct = solver->CreateNewConstraint(kint64min, 0); + solver->SetCoefficient(ct, min_start_cumul_, 1); + solver->SetCoefficient(ct, lp_cumuls.front(), -1); + // max_end_cumul_ >= cumuls[end] + ct = solver->CreateNewConstraint(0, kint64max); + solver->SetCoefficient(ct, max_end_cumul_, 1); + solver->SetCoefficient(ct, lp_cumuls.back(), -1); + } + // Fill transit cost if specified. + if (route_transit_cost != nullptr) { + if (span_cost_coef > 0) { + *route_transit_cost = CapProd(total_fixed_transit, span_cost_coef); + } else { + *route_transit_cost = 0; + } + } +} + namespace { bool AllValuesContainedExcept(const IntVar& var, absl::Span values, const absl::flat_hash_set& ignored_values) { @@ -2503,8 +2697,7 @@ bool DimensionCumulOptimizerCore::SetGlobalConstraints( << " has a self-precedence on node " << first_node << "."; // cumul[second_node] - cumul[first_node] >= offset. - const int ct = solver->CreateNewConstraint( - offset, std::numeric_limits::max()); + const int ct = solver->CreateNewConstraint(offset, kint64max); solver->SetCoefficient(ct, second_cumul_var, 1); solver->SetCoefficient(ct, first_cumul_var, -1); } @@ -2704,19 +2897,14 @@ bool DimensionCumulOptimizerCore::SetGlobalConstraintsForResourceAssignment( #undef SET_DEBUG_VARIABLE_NAME void DimensionCumulOptimizerCore::SetValuesFromLP( - absl::Span lp_variables, int64_t offset, + absl::Span lp_variables, int64_t offset, int64_t default_value, RoutingLinearSolverWrapper* solver, std::vector* lp_values) const { if (lp_values == nullptr) return; - lp_values->assign(lp_variables.size(), std::numeric_limits::min()); + lp_values->assign(lp_variables.size(), default_value); for (int i = 0; i < lp_variables.size(); i++) { const int lp_var = lp_variables[i]; - if (lp_var < 0) continue; // Keep default value, kint64min. - const double lp_value_double = solver->GetValue(lp_var); - const int64_t lp_value_int64 = - (lp_value_double >= std::numeric_limits::max()) - ? std::numeric_limits::max() - : MathUtil::FastInt64Round(lp_value_double); - (*lp_values)[i] = CapAdd(lp_value_int64, offset); + if (lp_var < 0) continue; // Keep default value. + (*lp_values)[i] = CapAdd(solver->GetVariableValue(lp_var), offset); } } @@ -2762,7 +2950,8 @@ void DimensionCumulOptimizerCore::SetResourceIndices( for (int rc = 0; rc < num_resource_classes; rc++) { const int assignment_var = resource_class_to_vehicle_assignment_vars[rc * num_vehicles + v]; - if (assignment_var >= 0 && solver->GetValue(assignment_var) == 1) { + if (assignment_var >= 0 && + solver->GetVariableValue(assignment_var) == 1) { // This resource class is assigned to this vehicle. const std::vector& class_resource_indices = resource_indices_per_class[RCIndex(rc)]; @@ -2784,7 +2973,8 @@ void DimensionCumulOptimizerCore::SetResourceIndices( GlobalDimensionCumulOptimizer::GlobalDimensionCumulOptimizer( const RoutingDimension* dimension, - RoutingSearchParameters::SchedulingSolver solver_type) + RoutingSearchParameters::SchedulingSolver solver_type, + RoutingSearchStats* search_stats) : optimizer_core_(dimension, /*use_precedence_propagator=*/ !dimension->GetNodePrecedences().empty()) { @@ -2794,11 +2984,11 @@ GlobalDimensionCumulOptimizer::GlobalDimensionCumulOptimizer( /*is_relaxation=*/!dimension->model() ->GetDimensionResourceGroupIndices(dimension) .empty(), - GetGlopParametersForGlobalLP()); + GetGlopParametersForGlobalLP(), search_stats); break; } case RoutingSearchParameters::SCHEDULING_CP_SAT: { - solver_ = std::make_unique(); + solver_ = std::make_unique(search_stats); break; } default: @@ -3213,12 +3403,11 @@ std::string DomainToString( return absl::StrFormat("= %s", Int64ToStr(domain->Get(0))); } else if (domain->Get(0) == 0 && domain->Get(1) == 1) { return "∈ Binary"; - } else if (domain->Get(0) == std::numeric_limits::min() && - domain->Get(1) == std::numeric_limits::max()) { + } else if (domain->Get(0) == kint64min && domain->Get(1) == kint64max) { return "∈ ℝ"; - } else if (domain->Get(0) == std::numeric_limits::min()) { + } else if (domain->Get(0) == kint64min) { return absl::StrFormat("≤ %s", Int64ToStr(domain->Get(1))); - } else if (domain->Get(1) == std::numeric_limits::max()) { + } else if (domain->Get(1) == kint64max) { return absl::StrFormat("≥ %s", Int64ToStr(domain->Get(0))); } return absl::StrFormat("∈ [%ls, %s]", Int64ToStr(domain->Get(0)), @@ -3239,12 +3428,7 @@ std::string VariableToString( if (response_.IsInitialized() && variable.IsInitialized() && (response_.status() == sat::CpSolverStatus::OPTIMAL || response_.status() == sat::CpSolverStatus::FEASIBLE)) { - const double lp_value_double = response_.solution(index); - const int64_t lp_value_int64 = - (lp_value_double >= std::numeric_limits::max()) - ? std::numeric_limits::max() - : MathUtil::FastInt64Round(lp_value_double); - s += Int64ToStr(lp_value_int64) + " "; + s += Int64ToStr(response_.solution(index)) + " "; } else { s += "? "; } diff --git a/ortools/routing/lp_scheduling.h b/ortools/routing/lp_scheduling.h index ec2a884f6e..ca526f41f8 100644 --- a/ortools/routing/lp_scheduling.h +++ b/ortools/routing/lp_scheduling.h @@ -171,6 +171,8 @@ class RoutingLinearSolverWrapper { public: static const int kNoConstraint = -1; + explicit RoutingLinearSolverWrapper(RoutingSearchStats* search_stats) + : search_stats_(search_stats) {} virtual ~RoutingLinearSolverWrapper() = default; virtual void Clear() = 0; virtual int CreateNewPositiveVariable() = 0; @@ -195,7 +197,7 @@ class RoutingLinearSolverWrapper { virtual void SetEnforcementLiteral(int ct, int condition) = 0; virtual DimensionSchedulingStatus Solve(absl::Duration duration_limit) = 0; virtual int64_t GetObjectiveValue() const = 0; - virtual double GetValue(int index) const = 0; + virtual int64_t GetVariableValue(int index) const = 0; virtual bool SolutionIsInteger() const = 0; // This function is meant to override the parameters of the solver. @@ -273,12 +275,17 @@ class RoutingLinearSolverWrapper { SetEnforcementLiteral(within_bounds_ct, within_bounds); return within_bounds; } + + protected: + RoutingSearchStats* const search_stats_; }; class RoutingGlopWrapper : public RoutingLinearSolverWrapper { public: - RoutingGlopWrapper(bool is_relaxation, const glop::GlopParameters& parameters) - : is_relaxation_(is_relaxation) { + RoutingGlopWrapper(bool is_relaxation, const glop::GlopParameters& parameters, + RoutingSearchStats* search_stats) + : RoutingLinearSolverWrapper(search_stats), + is_relaxation_(is_relaxation) { lp_solver_.SetParameters(parameters); linear_program_.SetMaximizationProblem(false); } @@ -380,7 +387,8 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { if (coefficient != 0) { const double normalized_coeff = coefficient / max_coefficient; SetCoefficient(ct.value(), variable, normalized_coeff); - normalized_objective_value += normalized_coeff * GetValue(variable); + normalized_objective_value += + normalized_coeff * GetValueDouble(glop::ColIndex(variable)); } } normalized_objective_value = std::max( @@ -406,6 +414,7 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { linear_program_.NotifyThatColumnsAreClean(); VLOG(2) << linear_program_.Dump(); const glop::ProblemStatus status = lp_solver_.Solve(linear_program_); + if (search_stats_) search_stats_->num_glop_calls_in_lp_scheduling++; const bool feasible_only = status == glop::ProblemStatus::PRIMAL_FEASIBLE; if (status != glop::ProblemStatus::OPTIMAL && status != glop::ProblemStatus::IMPRECISE && !feasible_only) { @@ -415,11 +424,7 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { return DimensionSchedulingStatus::RELAXED_OPTIMAL_ONLY; } for (const auto& allowed_interval : allowed_intervals_) { - const double value_double = GetValue(allowed_interval.first); - const int64_t value = - (value_double >= std::numeric_limits::max()) - ? std::numeric_limits::max() - : MathUtil::FastInt64Round(value_double); + const int64_t value = GetVariableValue(allowed_interval.first); const SortedDisjointIntervalList* const interval_list = allowed_interval.second.get(); const auto it = interval_list->FirstIntervalGreaterOrEqual(value); @@ -433,10 +438,13 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { return DimensionSchedulingStatus::OPTIMAL; } int64_t GetObjectiveValue() const override { - return MathUtil::FastInt64Round(lp_solver_.GetObjectiveValue()); + return MathUtil::Round(lp_solver_.GetObjectiveValue()); } - double GetValue(int index) const override { - return lp_solver_.variable_values()[glop::ColIndex(index)]; + int64_t GetVariableValue(int index) const override { + const double value_double = GetValueDouble(glop::ColIndex(index)); + return (value_double >= std::numeric_limits::max()) + ? std::numeric_limits::max() + : MathUtil::Round(value_double); } bool SolutionIsInteger() const override { return linear_program_.SolutionIsInteger(lp_solver_.variable_values(), @@ -455,6 +463,10 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { std::string PrintModel() const override { return linear_program_.Dump(); } private: + double GetValueDouble(glop::ColIndex index) const { + return lp_solver_.variable_values()[index]; + } + const bool is_relaxation_; glop::LinearProgram linear_program_; glop::LPSolver lp_solver_; @@ -464,8 +476,9 @@ class RoutingGlopWrapper : public RoutingLinearSolverWrapper { class RoutingCPSatWrapper : public RoutingLinearSolverWrapper { public: - RoutingCPSatWrapper() { - parameters_.set_num_search_workers(1); + explicit RoutingCPSatWrapper(RoutingSearchStats* const search_stats) + : RoutingLinearSolverWrapper(search_stats) { + parameters_.set_num_workers(1); // Keeping presolve but with 1 iteration; as of 10/2023 it is // significantly faster than both full presolve and no presolve. parameters_.set_cp_model_presolve(true); @@ -480,6 +493,7 @@ class RoutingCPSatWrapper : public RoutingLinearSolverWrapper { parameters_.set_cut_level(0); parameters_.set_add_lp_constraints_lazily(false); parameters_.set_use_absl_random(false); + parameters_.set_alternative_pool_size(0); } ~RoutingCPSatWrapper() override {} void Clear() override { @@ -646,6 +660,7 @@ class RoutingCPSatWrapper : public RoutingLinearSolverWrapper { sat::Model model; model.Add(sat::NewSatParameters(parameters_)); response_ = sat::SolveCpModel(model_, &model); + if (search_stats_) search_stats_->num_cp_sat_calls_in_lp_scheduling++; VLOG(2) << response_; DCHECK_NE(response_.status(), sat::CpSolverStatus::MODEL_INVALID); if (response_.status() == sat::CpSolverStatus::OPTIMAL || @@ -661,9 +676,9 @@ class RoutingCPSatWrapper : public RoutingLinearSolverWrapper { return DimensionSchedulingStatus::INFEASIBLE; } int64_t GetObjectiveValue() const override { - return MathUtil::FastInt64Round(response_.objective_value()); + return MathUtil::Round(response_.objective_value()); } - double GetValue(int index) const override { + int64_t GetVariableValue(int index) const override { return response_.solution(index); } bool SolutionIsInteger() const override { return true; } @@ -776,6 +791,20 @@ class DimensionCumulOptimizerCore { const Resource* resource, RoutingLinearSolverWrapper* solver, std::vector* cumul_values, std::vector* break_values); + struct TransitTargetCost { + double threshold_ratio; + int64_t cost_coefficient_below_threshold; + int64_t cost_coefficient_above_threshold; + }; + DimensionSchedulingStatus OptimizeSingleRouteWithTransitTargets( + int vehicle, double solve_duration_ratio, + const std::function& next_accessor, + absl::Span transit_targets, + TransitTargetCost transit_target_cost, RoutingLinearSolverWrapper* solver, + std::vector* optimal_transits, + std::vector* optimal_cumuls, + std::vector* optimal_breaks); + const RoutingDimension* dimension() const { return dimension_; } private: @@ -802,17 +831,29 @@ class DimensionCumulOptimizerCore { bool SetRouteCumulConstraints( int vehicle, const std::function& next_accessor, const std::function& transit_accessor, + absl::Span transit_targets, const RouteDimensionTravelInfo* dimension_travel_info, int64_t cumul_offset, bool optimize_costs, RoutingLinearSolverWrapper* solver, int64_t* route_transit_cost, int64_t* route_cost_offset); + // Sets the objective coefficients related to the cumuls and transits of the + // route in the solver. Supposes that the current_route_cumul_variables_ and + // current_route_nodes_ have correctly been initialized prior to calling this + // method. + void SetRouteCumulCosts(int vehicle, int64_t cumul_offset, + int64_t total_fixed_transit, + RoutingLinearSolverWrapper* solver, + int64_t* route_transit_cost, + int64_t* route_cost_offset); + // Sets the constraints for all variables related to travel. Handles // static or time-dependent travel values. // Returns false if some infeasibility was detected, true otherwise. bool SetRouteTravelConstraints( const RouteDimensionTravelInfo* dimension_travel_info, - absl::Span lp_slacks, absl::Span fixed_transit, + absl::Span lp_slacks, absl::Span fixed_transits, + absl::Span transit_targets, RoutingLinearSolverWrapper* solver); // Sets the global constraints on the dimension, and adds global objective @@ -832,6 +873,7 @@ class DimensionCumulOptimizerCore { int64_t cumul_offset, RoutingLinearSolverWrapper* solver); void SetValuesFromLP(absl::Span lp_variables, int64_t offset, + int64_t default_value, RoutingLinearSolverWrapper* solver, std::vector* lp_values) const; @@ -853,10 +895,14 @@ class DimensionCumulOptimizerCore { std::unique_ptr propagator_; std::vector current_route_min_cumuls_; std::vector current_route_max_cumuls_; + // Stores the nodes on the current route. + std::vector current_route_nodes_; const RoutingDimension* const dimension_; // Scheduler variables for current route cumuls and for all nodes cumuls. std::vector current_route_cumul_variables_; std::vector index_to_cumul_variable_; + // Scheduler variables for current route transits. + std::vector current_route_variable_transit_variables_; // Scheduler variables for current route breaks and all vehicle breaks. // There are two variables for each break: start and end. // current_route_break_variables_ has variables corresponding to @@ -896,7 +942,8 @@ class LocalDimensionCumulOptimizer { public: LocalDimensionCumulOptimizer( const RoutingDimension* dimension, - RoutingSearchParameters::SchedulingSolver solver_type); + RoutingSearchParameters::SchedulingSolver solver_type, + RoutingSearchStats* search_stats); // If feasible, computes the optimal cost of the route performed by a vehicle, // minimizing cumul soft lower and upper bound costs and vehicle span costs, @@ -973,6 +1020,18 @@ class LocalDimensionCumulOptimizer { const RoutingModel::ResourceGroup::Resource* resource, std::vector* packed_cumuls, std::vector* packed_breaks); + // TODO(user): Add a "resource" to the method. + // TODO(user): Also pack the route at the end of the optimization. + // --> Merge with the "packing" method ? + DimensionSchedulingStatus ComputeRouteCumulsWithTransitTargets( + int vehicle, double solve_duration_ratio, + const std::function& next_accessor, + absl::Span transit_targets, + DimensionCumulOptimizerCore::TransitTargetCost transit_target_cost, + std::vector* optimal_transits, + std::vector* optimal_cumuls, + std::vector* optimal_breaks); + const RoutingDimension* dimension() const { return optimizer_core_.dimension(); } @@ -986,7 +1045,8 @@ class GlobalDimensionCumulOptimizer { public: GlobalDimensionCumulOptimizer( const RoutingDimension* dimension, - RoutingSearchParameters::SchedulingSolver solver_type); + RoutingSearchParameters::SchedulingSolver solver_type, + RoutingSearchStats* search_stats); // If feasible, computes the optimal cost of the entire model with regards to // the optimizer_core_'s dimension costs, minimizing cumul soft lower/upper // bound costs and vehicle/global span costs, and stores it in "optimal_cost" diff --git a/ortools/routing/parameters.cc b/ortools/routing/parameters.cc index 12b5f744ec..1143540fcd 100644 --- a/ortools/routing/parameters.cc +++ b/ortools/routing/parameters.cc @@ -19,11 +19,14 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" +#include "absl/strings/string_view.h" #include "absl/time/time.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/duration.pb.h" +#include "google/protobuf/extension_set.h" #include "google/protobuf/message.h" #include "ortools/base/logging.h" #include "ortools/base/proto_enum_utils.h" @@ -33,6 +36,7 @@ #include "ortools/constraint_solver/solver_parameters.pb.h" #include "ortools/port/proto_utils.h" #include "ortools/routing/enums.pb.h" +#include "ortools/routing/heuristic_parameters.pb.h" #include "ortools/routing/ils.pb.h" #include "ortools/routing/parameters.pb.h" #include "ortools/sat/sat_parameters.pb.h" @@ -67,7 +71,8 @@ IteratedLocalSearchParameters CreateDefaultIteratedLocalSearchParameters() { // ->mutable_spatially_close_routes() // ->set_num_ruined_routes(2); rr->set_ruin_composition_strategy(RuinCompositionStrategy::UNSET); - rr->set_recreate_strategy(FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION); + rr->mutable_recreate_strategy()->set_heuristic( + FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION); rr->set_route_selection_neighbors_ratio(1.0); rr->set_route_selection_min_neighbors(10); rr->set_route_selection_max_neighbors(100); @@ -86,10 +91,10 @@ RoutingSearchParameters CreateDefaultRoutingSearchParameters() { RoutingSearchParameters p; p.set_first_solution_strategy(FirstSolutionStrategy::AUTOMATIC); p.set_use_unfiltered_first_solution_strategy(false); - p.set_savings_neighbors_ratio(1); - p.set_savings_max_memory_usage_bytes(6e9); - p.set_savings_add_reverse_arcs(false); - p.set_savings_arc_coefficient(1); + p.mutable_savings_parameters()->set_neighbors_ratio(1); + p.mutable_savings_parameters()->set_max_memory_usage_bytes(6e9); + p.mutable_savings_parameters()->set_add_reverse_arcs(false); + p.mutable_savings_parameters()->set_arc_coefficient(1); p.set_cheapest_insertion_farthest_seeds_ratio(0); p.set_cheapest_insertion_first_solution_neighbors_ratio(1); p.set_cheapest_insertion_first_solution_min_neighbors(1); @@ -162,7 +167,7 @@ RoutingSearchParameters CreateDefaultRoutingSearchParameters() { p.set_use_cp_sat(BOOL_FALSE); p.set_use_generalized_cp_sat(BOOL_FALSE); p.mutable_sat_parameters()->set_linearization_level(2); - p.mutable_sat_parameters()->set_num_search_workers(1); + p.mutable_sat_parameters()->set_num_workers(1); p.set_report_intermediate_cp_sat_solutions(false); p.set_fallback_to_cp_sat_size_threshold(20); p.set_continuous_scheduling_solver(RoutingSearchParameters::SCHEDULING_GLOP); @@ -259,6 +264,72 @@ bool IsValidNonNegativeDuration(const google::protobuf::Duration& d) { status_or_duration.value() >= absl::ZeroDuration(); } +// Searches for errors in LocalCheapestInsertionParameters and appends them to +// the given `errors` vector. +void FindErrorsInLocalCheapestInsertionParameters( + absl::string_view prefix, + const LocalCheapestInsertionParameters& parameters, + std::vector& errors) { + using absl::StrCat; + + absl::flat_hash_map< + LocalCheapestInsertionParameters::InsertionSortingProperty, int> + sorting_properties_map; + for (const LocalCheapestInsertionParameters::InsertionSortingProperty + property : + REPEATED_ENUM_ADAPTER(parameters, insertion_sorting_properties)) { + if (property == + LocalCheapestInsertionParameters::SORTING_PROPERTY_UNSPECIFIED) { + errors.emplace_back(StrCat( + prefix, " - Invalid insertion sorting property: ", + LocalCheapestInsertionParameters::InsertionSortingProperty_Name( + LocalCheapestInsertionParameters::SORTING_PROPERTY_UNSPECIFIED))); + } + const int occurrences = sorting_properties_map[property]++; + if (occurrences == 2) { + errors.emplace_back(StrCat( + prefix, " - Duplicate insertion sorting property: ", + LocalCheapestInsertionParameters::InsertionSortingProperty_Name( + property))); + } + if (property == LocalCheapestInsertionParameters::SORTING_PROPERTY_RANDOM && + parameters.insertion_sorting_properties().size() > 1) { + errors.emplace_back( + StrCat(prefix, + " - SORTING_PROPERTY_RANDOM cannot be used in conjunction " + "with other properties.")); + } + } +} + +void FindErrorsInRecreateParameters( + const FirstSolutionStrategy::Value heuristic, + const RecreateParameters& parameters, std::vector& errors) { + switch (parameters.parameters_case()) { + case RecreateParameters::kLocalCheapestInsertion: { + const std::string prefix = + heuristic == FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION + ? "Local cheapest insertion (recreate heuristic)" + : "Local cheapest cost insertion (recreate heuristic)"; + FindErrorsInLocalCheapestInsertionParameters( + prefix, parameters.local_cheapest_insertion(), errors); + break; + } + default: + LOG(DFATAL) << "Unsupported unset recreate parameters."; + break; + } +} + +std::string GetRecreateParametersName(const RecreateParameters& parameters) { + switch (parameters.parameters_case()) { + case RecreateParameters::kLocalCheapestInsertion: + return "local_cheapest_insertion"; + case RecreateParameters::PARAMETERS_NOT_SET: + return "PARAMETERS_NOT_SET"; + } +} + // Searches for errors in ILS parameters and appends them to the given `errors` // vector. void FindErrorsInIteratedLocalSearchParameters( @@ -374,12 +445,52 @@ void FindErrorsInIteratedLocalSearchParameters( "route_selection_max_neighbors")); } - if (rr.recreate_strategy() == FirstSolutionStrategy::UNSET) { + const FirstSolutionStrategy::Value recreate_heuristic = + rr.recreate_strategy().heuristic(); + if (recreate_heuristic == FirstSolutionStrategy::UNSET) { errors.emplace_back( StrCat("Invalid value for " "iterated_local_search_parameters.ruin_recreate_parameters." - "recreate_strategy: ", - rr.recreate_strategy())); + "recreate_strategy.heuristic: ", + FirstSolutionStrategy::Value_Name(recreate_heuristic))); + } + + if (rr.recreate_strategy().has_parameters()) { + const RecreateParameters& recreate_params = + rr.recreate_strategy().parameters(); + if (recreate_params.parameters_case() == + RecreateParameters::PARAMETERS_NOT_SET) { + errors.emplace_back( + StrCat("Invalid value for " + "iterated_local_search_parameters.ruin_recreate_parameters." + "recreate_strategy.parameters: ", + GetRecreateParametersName(recreate_params))); + } else { + const absl::flat_hash_map + strategy_to_parameters_case_map = { + {FirstSolutionStrategy::LOCAL_CHEAPEST_INSERTION, + RecreateParameters::kLocalCheapestInsertion}, + {FirstSolutionStrategy::LOCAL_CHEAPEST_COST_INSERTION, + RecreateParameters::kLocalCheapestInsertion}}; + + const RecreateParameters& recreate_params = + rr.recreate_strategy().parameters(); + + if (const auto params = + strategy_to_parameters_case_map.find(recreate_heuristic); + params == strategy_to_parameters_case_map.end() || + recreate_params.parameters_case() != params->second) { + errors.emplace_back( + StrCat("recreate_strategy.heuristic is set to ", + FirstSolutionStrategy::Value_Name(recreate_heuristic), + " but recreate_strategy.parameters define ", + GetRecreateParametersName(recreate_params))); + } else { + FindErrorsInRecreateParameters(recreate_heuristic, recreate_params, + errors); + } + } } } @@ -485,20 +596,23 @@ std::vector FindErrorsInRoutingSearchParameters( } } #endif // !__ANDROID__ && !__wasm__ - if (const double ratio = search_parameters.savings_neighbors_ratio(); + if (const double ratio = + search_parameters.savings_parameters().neighbors_ratio(); std::isnan(ratio) || ratio <= 0 || ratio > 1) { - errors.emplace_back(StrCat("Invalid savings_neighbors_ratio: ", ratio)); + errors.emplace_back( + StrCat("Invalid savings_parameters.neighbors_ratio: ", ratio)); } if (const double max_memory = - search_parameters.savings_max_memory_usage_bytes(); + search_parameters.savings_parameters().max_memory_usage_bytes(); std::isnan(max_memory) || max_memory <= 0 || max_memory > 1e10) { - errors.emplace_back( - StrCat("Invalid savings_max_memory_usage_bytes: ", max_memory)); + errors.emplace_back(StrCat( + "Invalid savings_parameters.max_memory_usage_bytes: ", max_memory)); } - if (const double coefficient = search_parameters.savings_arc_coefficient(); + if (const double coefficient = + search_parameters.savings_parameters().arc_coefficient(); std::isnan(coefficient) || coefficient <= 0 || std::isinf(coefficient)) { errors.emplace_back( - StrCat("Invalid savings_arc_coefficient: ", coefficient)); + StrCat("Invalid savings_parameters.arc_coefficient: ", coefficient)); } if (const double ratio = search_parameters.cheapest_insertion_farthest_seeds_ratio(); @@ -532,40 +646,14 @@ std::vector FindErrorsInRoutingSearchParameters( "Invalid cheapest_insertion_ls_operator_min_neighbors: ", min_neighbors, ". Must be greater or equal to 1.")); } - { - absl::flat_hash_map< - LocalCheapestInsertionParameters::InsertionSortingProperty, int> - sorting_properties_map; - for (const LocalCheapestInsertionParameters::InsertionSortingProperty - property : REPEATED_ENUM_ADAPTER( - search_parameters.local_cheapest_insertion_parameters(), - insertion_sorting_properties)) { - if (property == - LocalCheapestInsertionParameters::SORTING_PROPERTY_UNSPECIFIED) { - errors.emplace_back(StrCat( - "Invalid local cheapest insertion sorting property: ", - LocalCheapestInsertionParameters::InsertionSortingProperty_Name( - LocalCheapestInsertionParameters:: - SORTING_PROPERTY_UNSPECIFIED))); - } - const int occurrences = sorting_properties_map[property]++; - if (occurrences == 2) { - errors.emplace_back(StrCat( - "Duplicate local cheapest insertion sorting property: ", - LocalCheapestInsertionParameters::InsertionSortingProperty_Name( - property))); - } - if (property == - LocalCheapestInsertionParameters::SORTING_PROPERTY_RANDOM && - search_parameters.local_cheapest_insertion_parameters() - .insertion_sorting_properties() - .size() > 1) { - errors.emplace_back( - StrCat("SORTING_PROPERTY_RANDOM cannot be used in conjunction " - "with other properties.")); - } - } - } + + FindErrorsInLocalCheapestInsertionParameters( + "Local cheapest insertion (first solution heuristic)", + search_parameters.local_cheapest_insertion_parameters(), errors); + FindErrorsInLocalCheapestInsertionParameters( + "Local cheapest cost insertion (first solution heuristic)", + search_parameters.local_cheapest_cost_insertion_parameters(), errors); + if (const double ratio = search_parameters.ls_operator_neighbors_ratio(); std::isnan(ratio) || ratio <= 0 || ratio > 1) { errors.emplace_back(StrCat("Invalid ls_operator_neighbors_ratio: ", ratio)); @@ -746,7 +834,7 @@ std::vector FindErrorsInRoutingSearchParameters( if (const sat::SatParameters& sat_parameters = search_parameters.sat_parameters(); sat_parameters.enumerate_all_solutions() && - (sat_parameters.num_search_workers() > 1 || + (sat_parameters.num_workers() > 1 || sat_parameters.interleave_search())) { errors.emplace_back( "sat_parameters.enumerate_all_solutions cannot be true in parallel" diff --git a/ortools/routing/parameters.proto b/ortools/routing/parameters.proto index 48f90ec9f8..34b839e092 100644 --- a/ortools/routing/parameters.proto +++ b/ortools/routing/parameters.proto @@ -24,81 +24,22 @@ option csharp_namespace = "Google.OrTools.Routing"; import "google/protobuf/duration.proto"; import "ortools/constraint_solver/solver_parameters.proto"; import "ortools/routing/enums.proto"; +import "ortools/routing/heuristic_parameters.proto"; import "ortools/routing/ils.proto"; import "ortools/sat/sat_parameters.proto"; import "ortools/util/optional_boolean.proto"; package operations_research.routing; -// Parameters used to configure local insertion heuristics. -message LocalCheapestInsertionParameters { - // In insertion-based heuristics, describes what positions must be considered - // when inserting a pickup/delivery pair, and in what order they are - // considered. - enum PairInsertionStrategy { - // Let the solver decide the set of positions and its ordering. - AUTOMATIC = 0; - // Consider all positions, by increasing (cost(pickup), cost(delivery)). - BEST_PICKUP_THEN_BEST_DELIVERY = 1; - // Consider all positions, by increasing by cost(pickup) + cost(delivery). - BEST_PICKUP_DELIVERY_PAIR = 2; - // Only consider insertion positions that are compatible with the multitour - // property, meaning a series of pickups may only start when the vehicle - // is not carrying any delivery. This setting is designed to explore much - // less possibilities than the full BEST_PICKUP_DELIVERY_PAIR. - // Order by increasing by cost(pickup) + cost(delivery). - BEST_PICKUP_DELIVERY_PAIR_MULTITOUR = 3; - } - - // Choice of insertion strategy for pickup/delivery pairs, used in local - // cheapest insertion, both first solution heuristic and LNS. - PairInsertionStrategy pickup_delivery_strategy = 1; - - // Properties used to select in which order nodes or node pairs are considered - // in insertion heuristics. - enum InsertionSortingProperty { - // Invalid property. - SORTING_PROPERTY_UNSPECIFIED = 0; - // Selects nodes with the least number of allowed vehicles. - SORTING_PROPERTY_ALLOWED_VEHICLES = 1; - // Selects nodes with the highest penalty. - SORTING_PROPERTY_PENALTY = 2; - // Selects nodes with the highest penalty / number of allowed vehicles - // ratio. - SORTING_PROPERTY_PENALTY_OVER_ALLOWED_VEHICLES_RATIO = 3; - // Selects nodes that are on average the farthest from vehicles. - SORTING_PROPERTY_HIGHEST_AVG_ARC_COST_TO_VEHICLE_START_ENDS = 4; - // Selects nodes that are on average the closest to vehicles. - SORTING_PROPERTY_LOWEST_AVG_ARC_COST_TO_VEHICLE_START_ENDS = 5; - // Select nodes with the smallest distance to the closest vehicle. - SORTING_PROPERTY_LOWEST_MIN_ARC_COST_TO_VEHICLE_START_ENDS = 6; - // Selects nodes that have a higher dimension usage on average, where the - // usage is determined as the ratio of node demand over vehicle capacity. - // Currently, this property only supports unary dimensions. - SORTING_PROPERTY_HIGHEST_DIMENSION_USAGE = 7; - // Selects nodes in random order. - // This property cannot be used in conjunction with other properties. - SORTING_PROPERTY_RANDOM = 8; - } - - // The properties used to sort insertion entries in the local cheapest - // insertion heuristic, in *decreasing* order of priority. The properties - // listed here are applied hierarchically, from highest to lowest priority. - // When no properties are provided - // (SORTING_PROPERTY_ALLOWED_VEHICLES, SORTING_PROPERTY_PENALTY) - // is used by default. - repeated InsertionSortingProperty insertion_sorting_properties = 2; -} - // Parameters defining the search used to solve vehicle routing problems. // // If a parameter is unset (or, equivalently, set to its default value), // then the routing library will pick its preferred value for that parameter // automatically: this should be the case for most parameters. // To see those "default" parameters, call GetDefaultRoutingSearchParameters(). -// Next ID: 70 +// Next ID: 71 message RoutingSearchParameters { - reserved 19, 49, 55, 65, 67; + reserved 14, 15, 18, 19, 23, 49, 55, 65, 67; // First solution strategies, used as starting point of local search. FirstSolutionStrategy.Value first_solution_strategy = 1; @@ -108,26 +49,9 @@ message RoutingSearchParameters { // // Use filtered version of first solution strategy if available. bool use_unfiltered_first_solution_strategy = 2; - // Parameters specific to the Savings first solution heuristic. - // Ratio (in ]0, 1]) of neighbors to consider for each node when constructing - // the savings. If unspecified, its value is considered to be 1.0. - double savings_neighbors_ratio = 14; - // The number of neighbors considered for each node in the Savings heuristic - // is chosen so that the space used to store the savings doesn't exceed - // savings_max_memory_usage_bytes, which must be in ]0, 1e10]. - // NOTE: If both savings_neighbors_ratio and savings_max_memory_usage_bytes - // are specified, the number of neighbors considered for each node will be the - // minimum of the two numbers determined by these parameters. - double savings_max_memory_usage_bytes = 23; - // Add savings related to reverse arcs when finding the nearest neighbors - // of the nodes. - bool savings_add_reverse_arcs = 15; - // Coefficient of the cost of the arc for which the saving value is being - // computed: - // Saving(a-->b) = Cost(a-->end) + Cost(start-->b) - // - savings_arc_coefficient * Cost(a-->b) - // This parameter must be greater than 0, and its default value is 1. - double savings_arc_coefficient = 18; + + // Parameters for the Savings heuristic. + SavingsParameters savings_parameters = 70; // Ratio (between 0 and 1) of available vehicles in the model on which // farthest nodes of the model are inserted as seeds in the diff --git a/ortools/routing/parameters_utils.h b/ortools/routing/parameters_utils.h index bb01fa5eef..45b0603a39 100644 --- a/ortools/routing/parameters_utils.h +++ b/ortools/routing/parameters_utils.h @@ -17,6 +17,7 @@ #include #include "absl/types/span.h" +#include "ortools/routing/heuristic_parameters.pb.h" #include "ortools/routing/parameters.pb.h" namespace operations_research::routing { diff --git a/ortools/routing/routing.cc b/ortools/routing/routing.cc index 517c315519..a7dc30470a 100644 --- a/ortools/routing/routing.cc +++ b/ortools/routing/routing.cc @@ -39,6 +39,7 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/flags/flag.h" +#include "absl/functional/any_invocable.h" #include "absl/functional/bind_front.h" #include "absl/hash/hash.h" #include "absl/log/check.h" @@ -51,7 +52,6 @@ #include "absl/time/time.h" #include "absl/types/span.h" #include "google/protobuf/util/message_differencer.h" -#include "ortools/base/int_type.h" #include "ortools/base/logging.h" #include "ortools/base/map_util.h" #include "ortools/base/mathutil.h" @@ -77,7 +77,6 @@ #include "ortools/routing/neighborhoods.h" #include "ortools/routing/parameters.h" #include "ortools/routing/parameters.pb.h" -#include "ortools/routing/parameters_utils.h" #include "ortools/routing/search.h" #include "ortools/routing/types.h" #include "ortools/routing/utils.h" @@ -1391,7 +1390,7 @@ void RoutingModel::SetAmortizedCostFactorsOfVehicle( } void RoutingModel::AddRouteConstraint( - std::function(const std::vector&)> + absl::AnyInvocable(const std::vector&)> route_evaluator, bool costs_are_homogeneous_across_vehicles) { costs_are_homogeneous_across_vehicles_ &= @@ -1755,6 +1754,52 @@ void RoutingModel::FinalizeVisitTypes() { TopologicallySortVisitTypes(); } +namespace { +template +std::vector> GetTopologicallySortedNodes( + const SparseBitset<>& active_nodes, std::vector node_in_degree, + const std::vector>& children, + const C& comparator) { + std::vector current_nodes_with_zero_indegree; + for (int node : active_nodes.PositionsSetAtLeastOnce()) { + if (node_in_degree[node] == 0) { + current_nodes_with_zero_indegree.push_back(node); + } + } + std::vector> topologically_sorted_nodes; + int num_nodes_added = 0; + while (!current_nodes_with_zero_indegree.empty()) { + // Add all zero-degree nodes to the same topological order group, while + // also marking their dependent nodes that become part of the next group. + topologically_sorted_nodes.push_back({}); + std::vector& topological_group = topologically_sorted_nodes.back(); + std::vector next_nodes_with_zero_indegree; + for (int node : current_nodes_with_zero_indegree) { + topological_group.push_back(node); + num_nodes_added++; + for (int dependent_node : children[node]) { + DCHECK_GT(node_in_degree[dependent_node], 0); + if (--node_in_degree[dependent_node] == 0) { + next_nodes_with_zero_indegree.push_back(dependent_node); + } + } + } + absl::c_sort(topological_group, comparator); + // Swap the current nodes with zero in-degree with the next ones. + current_nodes_with_zero_indegree.swap(next_nodes_with_zero_indegree); + } + + const int num_active_nodes = + active_nodes.NumberOfSetCallsWithDifferentArguments(); + DCHECK_LE(num_nodes_added, num_active_nodes); + if (num_nodes_added < num_active_nodes) { + // Graph is cyclic, no topological order. + topologically_sorted_nodes.clear(); + } + return topologically_sorted_nodes; +} +} // namespace + void RoutingModel::TopologicallySortVisitTypes() { if (!HasSameVehicleTypeRequirements() && !HasTemporalTypeRequirements()) { return; @@ -1795,59 +1840,56 @@ void RoutingModel::TopologicallySortVisitTypes() { } } - // Compute topological order of visit types. - topologically_sorted_visit_types_.clear(); - std::vector current_types_with_zero_indegree; - for (int type : types_in_requirement_graph.PositionsSetAtLeastOnce()) { - DCHECK(type_requirement_tightness[type].first > 0 || - type_requirement_tightness[type].second > 0); - if (in_degree[type] == 0) { - current_types_with_zero_indegree.push_back(type); - } - } + topologically_sorted_visit_types_ = GetTopologicallySortedNodes( + types_in_requirement_graph, std::move(in_degree), type_to_dependent_types, + // Sort the types in the current topological group based on their + // requirement tightness. + // NOTE: For a deterministic order, types with equal tightness are sorted + // by increasing type. + // TODO(user): Put types of the same topological order and same + // requirement tightness in a single group (so that they all get inserted + // simultaneously by the GlobalCheapestInsertion heuristic, for instance). + [&type_requirement_tightness](int type1, int type2) { + const auto& tightness1 = type_requirement_tightness[type1]; + const auto& tightness2 = type_requirement_tightness[type2]; + return tightness1 > tightness2 || + (tightness1 == tightness2 && type1 < type2); + }); +} - int num_types_added = 0; - while (!current_types_with_zero_indegree.empty()) { - // Add all zero-degree nodes to the same topological order group, while - // also marking their dependent types that become part of the next group. - topologically_sorted_visit_types_.push_back({}); - std::vector& topological_group = - topologically_sorted_visit_types_.back(); - std::vector next_types_with_zero_indegree; - for (int type : current_types_with_zero_indegree) { - topological_group.push_back(type); - num_types_added++; - for (int dependent_type : type_to_dependent_types[type]) { - DCHECK_GT(in_degree[dependent_type], 0); - if (--in_degree[dependent_type] == 0) { - next_types_with_zero_indegree.push_back(dependent_type); - } - } +void RoutingModel::FinalizePrecedences() { + for (const RoutingDimension* dimension : dimensions_) { + if (dimension->GetNodePrecedences().empty()) continue; + std::vector in_degree(Size(), 0); + SparseBitset<> nodes_in_precedences(Size()); + std::vector> successors(Size()); + std::vector node_max_offset(Size(), + std::numeric_limits::min()); + // Note: A precedence constraint between first_node and second_node with an + // offset enforces cumuls(second_node) >= cumuls(first_node) + offset. + for (const auto [first_node, second_node, offset, unused] : + dimension->GetNodePrecedences()) { + in_degree[second_node]++; + nodes_in_precedences.Set(first_node); + nodes_in_precedences.Set(second_node); + successors[first_node].insert(second_node); + node_max_offset[first_node] = + std::max(node_max_offset[first_node], offset); + node_max_offset[second_node] = + std::max(node_max_offset[second_node], offset); } - // Sort the types in the current topological group based on their - // requirement tightness. - // NOTE: For a deterministic order, types with equal tightness are sorted by - // increasing type. - // TODO(user): Put types of the same topological order and same - // requirement tightness in a single group (so that they all get inserted - // simultaneously by the GlobalCheapestInsertion heuristic, for instance). - std::sort(topological_group.begin(), topological_group.end(), - [&type_requirement_tightness](int type1, int type2) { - const auto& tightness1 = type_requirement_tightness[type1]; - const auto& tightness2 = type_requirement_tightness[type2]; - return tightness1 > tightness2 || - (tightness1 == tightness2 && type1 < type2); - }); - // Swap the current types with zero in-degree with the next ones. - current_types_with_zero_indegree.swap(next_types_with_zero_indegree); - } - - const int num_types_in_requirement_graph = - types_in_requirement_graph.NumberOfSetCallsWithDifferentArguments(); - DCHECK_LE(num_types_added, num_types_in_requirement_graph); - if (num_types_added < num_types_in_requirement_graph) { - // Requirement graph is cyclic, no topological order. - topologically_sorted_visit_types_.clear(); + topologically_sorted_node_precedences_.push_back( + GetTopologicallySortedNodes( + nodes_in_precedences, std::move(in_degree), successors, + // Sort the nodes in the current topological group based on their + // precedence offset. + // NOTE: For a deterministic order, nodes with equal offset are + // sorted by increasing node. + [&node_max_offset](int node1, int node2) { + const int64_t offset1 = node_max_offset[node1]; + const int64_t offset2 = node_max_offset[node2]; + return offset1 > offset2 || (offset1 == offset2 && node1 < node2); + })); } } @@ -1966,7 +2008,7 @@ void RoutingModel::AddSoftSameVehicleConstraint(std::vector indices, } } -void RoutingModel::SetAllowedVehiclesForIndex(const std::vector& vehicles, +void RoutingModel::SetAllowedVehiclesForIndex(absl::Span vehicles, int64_t index) { DCHECK(!closed_); auto& allowed_vehicles = allowed_vehicles_[index]; @@ -2513,6 +2555,7 @@ void RoutingModel::CloseModelWithParameters( ComputeVehicleTypes(); ComputeResourceClasses(); FinalizeVisitTypes(); + FinalizePrecedences(); vehicle_start_class_callback_ = [this](int64_t start) { return GetVehicleStartClass(start); }; @@ -3558,6 +3601,16 @@ void RoutingModel::SetAssignmentFromOtherModelAssignment( target_assignment->AddObjective(cost_); } +SubSolverStatistics RoutingModel::GetSubSolverStatistics() const { + SubSolverStatistics stats; + stats.set_num_glop_calls_in_lp_scheduling( + search_stats_.num_glop_calls_in_lp_scheduling); + stats.set_num_cp_sat_calls_in_lp_scheduling( + search_stats_.num_cp_sat_calls_in_lp_scheduling); + stats.set_num_min_cost_flow_calls(search_stats_.num_min_cost_flow_calls); + return stats; +} + // Computing a lower bound to the cost of a vehicle routing problem solving a // a linear assignment problem (minimum-cost perfect bipartite matching). // A bipartite graph is created with left nodes representing the nodes of the @@ -4926,10 +4979,7 @@ void RoutingModel::CreateNeighborhoodOperators( parameters.local_cheapest_insertion_parameters(); return std::make_unique( this, [this]() { return CheckLimit(time_buffer_); }, - GetLocalSearchArcCostCallback(parameters), - lci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lci_params.insertion_sorting_properties()), + GetLocalSearchArcCostCallback(parameters), lci_params, GetOrCreateLocalSearchFilterManager( parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/false}), @@ -5253,6 +5303,12 @@ RoutingModel::CreateLocalSearchFilters( kAccept, priority}); } } + if (!same_vehicle_costs_.empty()) { + if (options.filter_objective) { + filter_events.push_back( + {MakeSameVehicleCostFilter(*this), kAccept, priority}); + } + } // If vehicle costs are not homogeneous, vehicle variables will be added to // local search deltas and their domain will be checked by @@ -5378,15 +5434,6 @@ RoutingModel::CreateLocalSearchFilters( {MakeRouteConstraintFilter(*this), kAccept, priority}); } - { - ++priority; - for (const RoutingDimension* dimension : dimensions_) { - if (!dimension->HasBreakConstraints()) continue; - filter_events.push_back( - {MakeVehicleBreaksFilter(*this, *dimension), kAccept, priority}); - } - } - if (!extra_filters_.empty()) { ++priority; for (const auto& event : extra_filters_) { @@ -5503,9 +5550,11 @@ void RoutingModel::StoreDimensionCumulOptimizers( global_optimizer_index_[dim] = global_dimension_optimizers_.size(); global_dimension_optimizers_.push_back( {std::make_unique( - dimension, parameters.continuous_scheduling_solver()), + dimension, parameters.continuous_scheduling_solver(), + &search_stats_), std::make_unique( - dimension, parameters.mixed_integer_scheduling_solver())}); + dimension, parameters.mixed_integer_scheduling_solver(), + &search_stats_)}); if (!AllTransitsPositive(*dimension)) { dimension->SetOffsetForGlobalOptimizer(0); } else { @@ -5579,9 +5628,11 @@ void RoutingModel::StoreDimensionCumulOptimizers( local_optimizer_index_[dim] = local_dimension_optimizers_.size(); local_dimension_optimizers_.push_back( {std::make_unique( - dimension, parameters.continuous_scheduling_solver()), + dimension, parameters.continuous_scheduling_solver(), + &search_stats_), std::make_unique( - dimension, parameters.mixed_integer_scheduling_solver())}); + dimension, parameters.mixed_integer_scheduling_solver(), + &search_stats_)}); } if (needs_optimizer) { optimized_dimensions_collector_assignment->Add(dimension->cumuls()); @@ -5906,9 +5957,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( [this](int64_t i, int64_t j, int64_t vehicle) { return GetArcCostForVehicle(i, j, vehicle); }, - lci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lci_params.insertion_sorting_properties()), + lci_params, GetOrCreateLocalSearchFilterManager( search_parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/false}), @@ -5920,9 +5969,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( [this](int64_t i, int64_t j, int64_t vehicle) { return GetArcCostForVehicle(i, j, vehicle); }, - lci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lci_params.insertion_sorting_properties()), + lci_params, GetOrCreateLocalSearchFilterManager(search_parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/true}), @@ -5943,9 +5990,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( [FirstSolutionStrategy::LOCAL_CHEAPEST_COST_INSERTION] = CreateIntVarFilteredDecisionBuilder< LocalCheapestInsertionFilteredHeuristic>( - /*evaluator=*/nullptr, lcci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lcci_params.insertion_sorting_properties()), + /*evaluator=*/nullptr, lcci_params, GetOrCreateLocalSearchFilterManager( search_parameters, {/*filter_objective=*/true, /*filter_with_cp_solver=*/false}), @@ -5954,9 +5999,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( IntVarFilteredDecisionBuilder* const strong_lcci = CreateIntVarFilteredDecisionBuilder< LocalCheapestInsertionFilteredHeuristic>( - /*evaluator=*/nullptr, lcci_params.pickup_delivery_strategy(), - GetLocalCheapestInsertionSortingProperties( - lcci_params.insertion_sorting_properties()), + /*evaluator=*/nullptr, lcci_params, GetOrCreateLocalSearchFilterManager(search_parameters, {/*filter_objective=*/true, /*filter_with_cp_solver=*/true}), @@ -5971,15 +6014,6 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( [FirstSolutionStrategy::BEST_INSERTION])); // Savings - SavingsFilteredHeuristic::SavingsParameters savings_parameters; - savings_parameters.neighbors_ratio = - search_parameters.savings_neighbors_ratio(); - savings_parameters.max_memory_usage_bytes = - search_parameters.savings_max_memory_usage_bytes(); - savings_parameters.add_reverse_arcs = - search_parameters.savings_add_reverse_arcs(); - savings_parameters.arc_coefficient = - search_parameters.savings_arc_coefficient(); LocalSearchFilterManager* filter_manager = nullptr; if (!search_parameters.use_unfiltered_first_solution_strategy()) { filter_manager = GetOrCreateLocalSearchFilterManager( @@ -5989,7 +6023,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( IntVarFilteredDecisionBuilder* parallel_savings_db = CreateIntVarFilteredDecisionBuilder( - savings_parameters, filter_manager); + search_parameters.savings_parameters(), filter_manager); if (!search_parameters.use_unfiltered_first_solution_strategy()) { first_solution_filtered_decision_builders_ [FirstSolutionStrategy::PARALLEL_SAVINGS] = parallel_savings_db; @@ -5999,14 +6033,14 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( solver_->Try( parallel_savings_db, CreateIntVarFilteredDecisionBuilder( - savings_parameters, + search_parameters.savings_parameters(), GetOrCreateLocalSearchFilterManager( search_parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/true}))); IntVarFilteredDecisionBuilder* sequential_savings_db = CreateIntVarFilteredDecisionBuilder( - savings_parameters, filter_manager); + search_parameters.savings_parameters(), filter_manager); if (!search_parameters.use_unfiltered_first_solution_strategy()) { first_solution_filtered_decision_builders_[FirstSolutionStrategy::SAVINGS] = sequential_savings_db; @@ -6017,7 +6051,7 @@ void RoutingModel::CreateFirstSolutionDecisionBuilders( sequential_savings_db, CreateIntVarFilteredDecisionBuilder< SequentialSavingsFilteredHeuristic>( - savings_parameters, + search_parameters.savings_parameters(), GetOrCreateLocalSearchFilterManager( search_parameters, {/*filter_objective=*/false, /*filter_with_cp_solver=*/true}))); @@ -6471,12 +6505,12 @@ void RoutingModel::AddWeightedVariableTargetToFinalizer(IntVar* var, void RoutingModel::AddWeightedVariableMinimizedByFinalizer(IntVar* var, int64_t cost) { - finalizer_variables_->AddWeightedVariableToMinimize(var, cost); + finalizer_variables_->AddWeightedVariableTarget(var, kint64min, cost); } void RoutingModel::AddWeightedVariableMaximizedByFinalizer(IntVar* var, int64_t cost) { - finalizer_variables_->AddWeightedVariableToMaximize(var, cost); + finalizer_variables_->AddWeightedVariableTarget(var, kint64max, cost); } void RoutingModel::AddVariableTargetToFinalizer(IntVar* var, int64_t target) { @@ -6484,11 +6518,11 @@ void RoutingModel::AddVariableTargetToFinalizer(IntVar* var, int64_t target) { } void RoutingModel::AddVariableMaximizedByFinalizer(IntVar* var) { - finalizer_variables_->AddVariableToMaximize(var); + finalizer_variables_->AddVariableTarget(var, kint64max); } void RoutingModel::AddVariableMinimizedByFinalizer(IntVar* var) { - finalizer_variables_->AddVariableToMinimize(var); + finalizer_variables_->AddVariableTarget(var, kint64min); } void RoutingModel::SetupSearch( @@ -6562,9 +6596,9 @@ RoutingDimension::~RoutingDimension() { } void RoutingDimension::Initialize( - const std::vector& transit_evaluators, - const std::vector& cumul_dependent_transit_evaluators, - const std::vector& state_dependent_transit_evaluators, + absl::Span transit_evaluators, + absl::Span cumul_dependent_transit_evaluators, + absl::Span state_dependent_transit_evaluators, int64_t slack_max) { InitializeCumuls(); InitializeTransits(transit_evaluators, cumul_dependent_transit_evaluators, @@ -6895,7 +6929,7 @@ bool TypeRequirementChecker::HasRegulationsToCheck() const { } bool TypeRequirementChecker::CheckRequiredTypesCurrentlyOnRoute( - const std::vector>& required_type_alternatives, + absl::Span> required_type_alternatives, int pos) { for (const absl::flat_hash_set& requirement_alternatives : required_type_alternatives) { @@ -7084,9 +7118,26 @@ void RoutingDimension::CloseModel(bool use_light_propagation) { } } if (HasBreakConstraints()) { - GlobalVehicleBreaksConstraint* constraint = - model()->solver()->RevAlloc(new GlobalVehicleBreaksConstraint(this)); - solver->AddConstraint(constraint); + solver->AddConstraint( + MakeGlobalVehicleBreaksConstraint(model_->solver(), this)); + // If a vehicle has a duration-distance (max interbreak) constraint, + // its breaks must be ordered. + for (int v = 0; v < model_->vehicles(); ++v) { + const std::vector& breaks = GetBreakIntervalsOfVehicle(v); + const int num_breaks = breaks.size(); + if (num_breaks <= 1 || GetBreakDistanceDurationOfVehicle(v).empty()) { + continue; + } + for (int b = 1; b < num_breaks; ++b) { + Constraint* precedence = solver->MakeIntervalVarRelation( + breaks[b], Solver::STARTS_AFTER_END, breaks[b - 1]); + solver->AddConstraint(precedence); + } + } + // Add all cumuls to the finalizer. + for (IntVar* cumul : cumuls_) { + model_->AddVariableMinimizedByFinalizer(cumul); + } } } diff --git a/ortools/routing/routing.h b/ortools/routing/routing.h index d2d90b3886..c52403b301 100644 --- a/ortools/routing/routing.h +++ b/ortools/routing/routing.h @@ -172,12 +172,12 @@ #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" +#include "absl/functional/any_invocable.h" #include "absl/hash/hash.h" #include "absl/log/check.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "absl/types/span.h" -#include "ortools/base/int_type.h" #include "ortools/base/logging.h" #include "ortools/base/strong_vector.h" #include "ortools/base/types.h" @@ -185,6 +185,7 @@ #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/graph/graph.h" #include "ortools/routing/enums.pb.h" +#include "ortools/routing/heuristic_parameters.pb.h" #include "ortools/routing/index_manager.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/types.h" @@ -192,7 +193,6 @@ #include "ortools/util/piecewise_linear_function.h" #include "ortools/util/range_query_function.h" #include "ortools/util/saturated_arithmetic.h" -#include "ortools/util/scheduling.h" #include "ortools/util/sorted_interval_list.h" namespace operations_research::routing { @@ -251,6 +251,12 @@ class PathsMetadata { std::vector path_of_node_; }; +struct RoutingSearchStats { + int64_t num_cp_sat_calls_in_lp_scheduling = 0; + int64_t num_glop_calls_in_lp_scheduling = 0; + int64_t num_min_cost_flow_calls = 0; +}; + class OR_DLL RoutingModel { public: /// Types of precedence policy applied to pickup and delivery pairs. @@ -969,13 +975,28 @@ class OR_DLL RoutingModel { /// Adds a soft constraint to force a set of variable indices to be on the /// same vehicle. If all nodes are not on the same vehicle, each extra vehicle /// used adds 'cost' to the cost function. + /// TODO(user): Extend this to allow nodes/indices to be on the same given + /// set of vehicle. void AddSoftSameVehicleConstraint(std::vector indices, int64_t cost); + /// Returns the number of soft same vehicle constraints in the model. + int GetNumberOfSoftSameVehicleConstraints() const { + return same_vehicle_costs_.size(); + } + /// Returns the indices of the nodes in the soft same vehicle constraint of + /// index 'index'. + const std::vector& GetSoftSameVehicleIndices(int index) const { + return same_vehicle_costs_[index].indices; + } + /// Returns the cost of the soft same vehicle constraint of index 'index'. + int64_t GetSoftSameVehicleCost(int index) const { + return same_vehicle_costs_[index].value; + } /// Sets the vehicles which can visit a given node. If the node is in a /// disjunction, this will not prevent it from being unperformed. /// Specifying an empty vector of vehicles has no effect (all vehicles /// will be allowed to visit the node). - void SetAllowedVehiclesForIndex(const std::vector& vehicles, + void SetAllowedVehiclesForIndex(absl::Span vehicles, int64_t index); /// Returns true if a vehicle is allowed to visit a given node. @@ -1108,6 +1129,11 @@ class OR_DLL RoutingModel { DCHECK(closed_); return topologically_sorted_visit_types_; } + const std::vector>>& + GetTopologicallySortedNodePrecedences() const { + DCHECK(closed_); + return topologically_sorted_node_precedences_; + } #endif // SWIG /// Incompatibilities: /// Two nodes with "hard" incompatible types cannot share the same route at @@ -1285,13 +1311,15 @@ class OR_DLL RoutingModel { // callback must not return a value if the route vector is invalid, and // returns the value of the route otherwise. // The callback must always return the same value for a given route. +#ifndef SWIG void AddRouteConstraint( - std::function(const std::vector&)> + absl::AnyInvocable(const std::vector&)> route_evaluator, bool costs_are_homogeneous_across_vehicles = false); +#endif std::optional GetRouteCost(const std::vector& route) const { int64_t route_cost = 0; - for (const auto& evaluator : route_evaluators_) { + for (auto& evaluator : route_evaluators_) { std::optional cost = evaluator(route); if (!cost.has_value()) return std::nullopt; CapAddTo(cost.value(), &route_cost); @@ -1427,6 +1455,8 @@ class OR_DLL RoutingModel { void SetAssignmentFromOtherModelAssignment( Assignment* target_assignment, const RoutingModel* source_model, const Assignment* source_assignment); + /// Returns detailed search statistics. + operations_research::SubSolverStatistics GetSubSolverStatistics() const; /// Computes a lower bound to the routing problem solving a linear assignment /// problem. The routing model must be closed before calling this method. /// Note that problems with node disjunction constraints (including optional @@ -2258,6 +2288,10 @@ class OR_DLL RoutingModel { void FinalizeVisitTypes(); // Called by FinalizeVisitTypes() to setup topologically_sorted_visit_types_. void TopologicallySortVisitTypes(); + // This method updates topologically_sorted_node_precedences_ which contains + // nodes in topological order based on precedence constraints for + // dimensions of the model. + void FinalizePrecedences(); int64_t GetArcCostForClassInternal(int64_t from_index, int64_t to_index, CostClassIndex cost_class_index) const; int64_t GetArcCostWithGuidedLocalSearchPenalties(int64_t from_index, @@ -2511,8 +2545,8 @@ class OR_DLL RoutingModel { std::vector linear_cost_factor_of_vehicle_; std::vector quadratic_cost_factor_of_vehicle_; bool vehicle_amortized_cost_factors_set_; - std::vector< - std::function(const std::vector&)>> + mutable std::vector< + absl::AnyInvocable(const std::vector&)>> route_evaluators_; /// vehicle_used_when_empty_[vehicle] determines if "vehicle" should be /// taken into account for costs (arc costs, span costs, etc.) and constraints @@ -2618,6 +2652,8 @@ class OR_DLL RoutingModel { std::vector > topologically_sorted_visit_types_; // clang-format on int num_visit_types_; + std::vector>> + topologically_sorted_node_precedences_; // Two indices are equivalent if they correspond to the same node (as given // to the constructors taking a RoutingIndexManager). std::vector index_to_equivalence_class_; @@ -2692,6 +2728,8 @@ class OR_DLL RoutingModel { RegularLimit* first_solution_lns_limit_ = nullptr; absl::Duration time_buffer_; + RoutingSearchStats search_stats_; + std::atomic interrupt_cp_sat_; std::atomic interrupt_cp_; @@ -2737,213 +2775,11 @@ class OR_DLL RoutingModelVisitor : public BaseObject { }; #if !defined(SWIG) -/// This class acts like a CP propagator: it takes a set of tasks given by -/// their start/duration/end features, and reduces the range of possible values. -class DisjunctivePropagator { - public: - /// A structure to hold tasks described by their features. - /// The first num_chain_tasks are considered linked by a chain of precedences, - /// i.e. if i < j < num_chain_tasks, then end(i) <= start(j). - /// This occurs frequently in routing, and can be leveraged by - /// some variants of classic propagators. - struct Tasks { - int num_chain_tasks = 0; - std::vector start_min; - std::vector start_max; - std::vector duration_min; - std::vector duration_max; - std::vector end_min; - std::vector end_max; - std::vector is_preemptible; - std::vector forbidden_intervals; - std::vector> distance_duration; - int64_t span_min = 0; - int64_t span_max = kint64max; - - void Clear() { - start_min.clear(); - start_max.clear(); - duration_min.clear(); - duration_max.clear(); - end_min.clear(); - end_max.clear(); - is_preemptible.clear(); - forbidden_intervals.clear(); - distance_duration.clear(); - span_min = 0; - span_max = kint64max; - num_chain_tasks = 0; - } - }; - - /// Computes new bounds for all tasks, returns false if infeasible. - /// This does not compute a fixed point, so recalling it may filter more. - bool Propagate(Tasks* tasks); - - /// Propagates the deductions from the chain of precedences, if there is one. - bool Precedences(Tasks* tasks); - /// Transforms the problem with a time symmetry centered in 0. Returns true - /// for convenience. - bool MirrorTasks(Tasks* tasks); - /// Does edge-finding deductions on all tasks. - bool EdgeFinding(Tasks* tasks); - /// Does detectable precedences deductions on tasks in the chain precedence, - /// taking the time windows of nonchain tasks into account. - bool DetectablePrecedencesWithChain(Tasks* tasks); - /// Tasks might have holes in their domain, this enforces such holes. - bool ForbiddenIntervals(Tasks* tasks); - /// Propagates distance_duration constraints, if any. - bool DistanceDuration(Tasks* tasks); - /// Propagates a lower bound of the chain span, - /// end[num_chain_tasks] - start[0], to span_min. - bool ChainSpanMin(Tasks* tasks); - /// Computes a lower bound of the span of the chain, taking into account only - /// the first nonchain task. - /// For more accurate results, this should be called after Precedences(), - /// otherwise the lower bound might be lower than feasible. - bool ChainSpanMinDynamic(Tasks* tasks); - - private: - /// The main algorithm uses Vilim's theta tree data structure. - /// See Petr Vilim's PhD thesis "Global Constraints in Scheduling". - ThetaLambdaTree theta_lambda_tree_; - /// Mappings between events and tasks. - std::vector tasks_by_start_min_; - std::vector tasks_by_end_max_; - std::vector event_of_task_; - std::vector nonchain_tasks_by_start_max_; - /// Maps chain elements to the sum of chain task durations before them. - std::vector total_duration_before_; -}; - -struct TravelBounds { - std::vector min_travels; - std::vector max_travels; - std::vector pre_travels; - std::vector post_travels; -}; - -void AppendTasksFromPath(absl::Span path, - const TravelBounds& travel_bounds, - const RoutingDimension& dimension, - DisjunctivePropagator::Tasks* tasks); -void AppendTasksFromIntervals(const std::vector& intervals, - DisjunctivePropagator::Tasks* tasks); void FillPathEvaluation(absl::Span path, const RoutingModel::TransitCallback2& evaluator, std::vector* values); -void FillTravelBoundsOfVehicle(int vehicle, absl::Span path, - const RoutingDimension& dimension, - TravelBounds* travel_bounds); #endif // !defined(SWIG) -/// GlobalVehicleBreaksConstraint ensures breaks constraints are enforced on -/// all vehicles in the dimension passed to its constructor. -/// It is intended to be used for dimensions representing time. -/// A break constraint ensures break intervals fit on the route of a vehicle. -/// For a given vehicle, it forces break intervals to be disjoint from visit -/// intervals, where visit intervals start at CumulVar(node) and last for -/// node_visit_transit[node]. Moreover, it ensures that there is enough time -/// between two consecutive nodes of a route to do transit and vehicle breaks, -/// i.e. if Next(nodeA) = nodeB, CumulVar(nodeA) = tA and CumulVar(nodeB) = tB, -/// then SlackVar(nodeA) >= sum_{breaks \subseteq [tA, tB)} duration(break). -class GlobalVehicleBreaksConstraint : public Constraint { - public: - explicit GlobalVehicleBreaksConstraint(const RoutingDimension* dimension); - std::string DebugString() const override { - return "GlobalVehicleBreaksConstraint"; - } - - void Post() override; - void InitialPropagate() override; - - private: - void PropagateNode(int node); - void PropagateVehicle(int vehicle); - - const RoutingModel* model_; - const RoutingDimension* const dimension_; - std::vector vehicle_demons_; - std::vector path_; - - /// Sets path_ to be the longest sequence such that - /// _ path_[0] is the start of the vehicle - /// _ Next(path_[i-1]) is Bound() and has value path_[i], - /// followed by the end of the vehicle if the last node was not an end. - void FillPartialPathOfVehicle(int vehicle); - void FillPathTravels(absl::Span path); - - /// This translates pruning information to solver variables. - /// If constructed with an IntervalVar*, it follows the usual semantics of - /// IntervalVars. If constructed with an IntVar*, before_start and - /// after_start, operations are translated to simulate an interval that starts - /// at start - before_start and ends and start + after_start. If constructed - /// with nothing, the TaskTranslator will do nothing. This class should have - /// been an interface + subclasses, but that would force pointers in the - /// user's task vector, which means dynamic allocation. With this union-like - /// structure, a vector's reserved size will adjust to usage and eventually no - /// more dynamic allocation will be made. - class TaskTranslator { - public: - TaskTranslator(IntVar* start, int64_t before_start, int64_t after_start) - : start_(start), - before_start_(before_start), - after_start_(after_start) {} - explicit TaskTranslator(IntervalVar* interval) : interval_(interval) {} - TaskTranslator() = default; - - void SetStartMin(int64_t value) { - if (start_ != nullptr) { - start_->SetMin(CapAdd(before_start_, value)); - } else if (interval_ != nullptr) { - interval_->SetStartMin(value); - } - } - void SetStartMax(int64_t value) { - if (start_ != nullptr) { - start_->SetMax(CapAdd(before_start_, value)); - } else if (interval_ != nullptr) { - interval_->SetStartMax(value); - } - } - void SetDurationMin(int64_t value) { - if (interval_ != nullptr) { - interval_->SetDurationMin(value); - } - } - void SetEndMin(int64_t value) { - if (start_ != nullptr) { - start_->SetMin(CapSub(value, after_start_)); - } else if (interval_ != nullptr) { - interval_->SetEndMin(value); - } - } - void SetEndMax(int64_t value) { - if (start_ != nullptr) { - start_->SetMax(CapSub(value, after_start_)); - } else if (interval_ != nullptr) { - interval_->SetEndMax(value); - } - } - - private: - IntVar* start_ = nullptr; - int64_t before_start_; - int64_t after_start_; - IntervalVar* interval_ = nullptr; - }; - - /// Route and interval variables are normalized to the following values. - std::vector task_translators_; - - /// This is used to restrict bounds of tasks. - DisjunctivePropagator disjunctive_propagator_; - DisjunctivePropagator::Tasks tasks_; - - /// Used to help filling tasks_ at each propagation. - TravelBounds travel_bounds_; -}; - class TypeRegulationsChecker { public: explicit TypeRegulationsChecker(const RoutingModel& model); @@ -3035,7 +2871,7 @@ class TypeRequirementChecker : public TypeRegulationsChecker { /// Verifies that for each set in required_type_alternatives, at least one of /// the required types is on the route at position 'pos'. bool CheckRequiredTypesCurrentlyOnRoute( - const std::vector >& required_type_alternatives, + absl::Span> required_type_alternatives, int pos); // clang-format on bool CheckTypeRegulations(int type, VisitTypePolicy policy, int pos) override; @@ -3692,9 +3528,9 @@ class RoutingDimension { const RoutingDimension* base_dimension); RoutingDimension(RoutingModel* model, std::vector vehicle_capacities, const std::string& name, SelfBased); - void Initialize(const std::vector& transit_evaluators, - const std::vector& cumul_dependent_transit_evaluators, - const std::vector& state_dependent_transit_evaluators, + void Initialize(absl::Span transit_evaluators, + absl::Span cumul_dependent_transit_evaluators, + absl::Span state_dependent_transit_evaluators, int64_t slack_max); void InitializeCumuls(); void InitializeTransits( @@ -3813,10 +3649,5 @@ bool SolveModelWithSat(RoutingModel* model, const Assignment* initial_solution, Assignment* solution); -#if !defined(SWIG) -IntVarLocalSearchFilter* MakeVehicleBreaksFilter( - const RoutingModel& routing_model, const RoutingDimension& dimension); -#endif - } // namespace operations_research::routing #endif // OR_TOOLS_ROUTING_ROUTING_H_ diff --git a/ortools/routing/samples/cvrptw_break.py b/ortools/routing/samples/cvrptw_break.py index b243d7b9b7..c9a8e05ad5 100755 --- a/ortools/routing/samples/cvrptw_break.py +++ b/ortools/routing/samples/cvrptw_break.py @@ -27,6 +27,7 @@ Distances are in meters and time in minutes. import functools from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/simple_routing_program.py b/ortools/routing/samples/simple_routing_program.py index c01276e41a..2c1e6f44b6 100644 --- a/ortools/routing/samples/simple_routing_program.py +++ b/ortools/routing/samples/simple_routing_program.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/tsp_circuit_board.py b/ortools/routing/samples/tsp_circuit_board.py index a0cfcc9037..84e6c61c01 100644 --- a/ortools/routing/samples/tsp_circuit_board.py +++ b/ortools/routing/samples/tsp_circuit_board.py @@ -19,6 +19,7 @@ import math from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/tsp_cities.py b/ortools/routing/samples/tsp_cities.py index ce65be83bd..36d4d729a7 100644 --- a/ortools/routing/samples/tsp_cities.py +++ b/ortools/routing/samples/tsp_cities.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/tsp_distance_matrix.py b/ortools/routing/samples/tsp_distance_matrix.py index bb9f531bc8..b4d8394990 100644 --- a/ortools/routing/samples/tsp_distance_matrix.py +++ b/ortools/routing/samples/tsp_distance_matrix.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp.py b/ortools/routing/samples/vrp.py index 0e91e9d38b..f0a55575d8 100644 --- a/ortools/routing/samples/vrp.py +++ b/ortools/routing/samples/vrp.py @@ -95,6 +95,7 @@ def print_solution(manager, routing, solution): total_distance += route_distance print(f"Total Distance of all routes: {total_distance}m") + # [END solution_printer] diff --git a/ortools/routing/samples/vrp_breaks.py b/ortools/routing/samples/vrp_breaks.py index 6e3bb2f64e..f0e1c4c31f 100755 --- a/ortools/routing/samples/vrp_breaks.py +++ b/ortools/routing/samples/vrp_breaks.py @@ -26,6 +26,7 @@ Durations are in minutes. # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_capacity.py b/ortools/routing/samples/vrp_capacity.py index d380ad3a8f..e9650a0045 100644 --- a/ortools/routing/samples/vrp_capacity.py +++ b/ortools/routing/samples/vrp_capacity.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_drop_nodes.py b/ortools/routing/samples/vrp_drop_nodes.py index b74d827cd2..7b32bf5864 100644 --- a/ortools/routing/samples/vrp_drop_nodes.py +++ b/ortools/routing/samples/vrp_drop_nodes.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_global_span.py b/ortools/routing/samples/vrp_global_span.py index 7d474eff0b..d5f35b96cd 100644 --- a/ortools/routing/samples/vrp_global_span.py +++ b/ortools/routing/samples/vrp_global_span.py @@ -26,6 +26,7 @@ Distances are in meters. # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] @@ -84,6 +85,7 @@ def print_solution(data, manager, routing, solution): max_route_distance = max(route_distance, max_route_distance) print(f"Maximum of the route distances: {max_route_distance}m") + # [END solution_printer] diff --git a/ortools/routing/samples/vrp_initial_routes.py b/ortools/routing/samples/vrp_initial_routes.py index 97957fcbfe..e97258e8f3 100644 --- a/ortools/routing/samples/vrp_initial_routes.py +++ b/ortools/routing/samples/vrp_initial_routes.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] @@ -86,6 +87,7 @@ def print_solution(data, manager, routing, solution): max_route_distance = max(route_distance, max_route_distance) print(f"Maximum of the route distances: {max_route_distance}m") + # [END solution_printer] diff --git a/ortools/routing/samples/vrp_node_max.py b/ortools/routing/samples/vrp_node_max.py index 00b84fc805..75b1527d1c 100755 --- a/ortools/routing/samples/vrp_node_max.py +++ b/ortools/routing/samples/vrp_node_max.py @@ -22,6 +22,7 @@ road multiply by a constant factor (4200) # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] @@ -74,6 +75,7 @@ def create_data_model(): data["depot"] = 0 return data + # [END data_model] @@ -119,6 +121,7 @@ def print_solution(data, manager, routing, solution): max_route_distance = max(route_distance, max_route_distance) print(f"Maximum of the route distances: {max_route_distance}m") + # [END solution_printer] diff --git a/ortools/routing/samples/vrp_pickup_delivery.py b/ortools/routing/samples/vrp_pickup_delivery.py index 049b40f101..91bff0edfd 100755 --- a/ortools/routing/samples/vrp_pickup_delivery.py +++ b/ortools/routing/samples/vrp_pickup_delivery.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_pickup_delivery_fifo.py b/ortools/routing/samples/vrp_pickup_delivery_fifo.py index c35af1f7b0..a4997cd85a 100755 --- a/ortools/routing/samples/vrp_pickup_delivery_fifo.py +++ b/ortools/routing/samples/vrp_pickup_delivery_fifo.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_pickup_delivery_lifo.py b/ortools/routing/samples/vrp_pickup_delivery_lifo.py index 044bfd8b4e..8b3f1b294b 100755 --- a/ortools/routing/samples/vrp_pickup_delivery_lifo.py +++ b/ortools/routing/samples/vrp_pickup_delivery_lifo.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_resources.py b/ortools/routing/samples/vrp_resources.py index 941c173581..49dda8407b 100644 --- a/ortools/routing/samples/vrp_resources.py +++ b/ortools/routing/samples/vrp_resources.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_solution_callback.py b/ortools/routing/samples/vrp_solution_callback.py index b1ce1589f9..2970fd56c1 100755 --- a/ortools/routing/samples/vrp_solution_callback.py +++ b/ortools/routing/samples/vrp_solution_callback.py @@ -28,6 +28,7 @@ import weakref from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] @@ -90,6 +91,7 @@ def print_solution( total_distance += route_distance print(f"Total Distance of all routes: {total_distance}m") + # [END solution_callback_printer] @@ -123,6 +125,7 @@ class SolutionCallback: if self._counter > self._counter_limit: self._routing_model_ref().solver().FinishCurrentSearch() # pytype: disable=attribute-error + # [END solution_callback] diff --git a/ortools/routing/samples/vrp_starts_ends.py b/ortools/routing/samples/vrp_starts_ends.py index cd1cdded2d..354ed7ab12 100644 --- a/ortools/routing/samples/vrp_starts_ends.py +++ b/ortools/routing/samples/vrp_starts_ends.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_time_windows.py b/ortools/routing/samples/vrp_time_windows.py index eee9aca4a1..fd48a59de5 100644 --- a/ortools/routing/samples/vrp_time_windows.py +++ b/ortools/routing/samples/vrp_time_windows.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_tokens.py b/ortools/routing/samples/vrp_tokens.py index 4fcc44c271..4811ddc103 100755 --- a/ortools/routing/samples/vrp_tokens.py +++ b/ortools/routing/samples/vrp_tokens.py @@ -17,6 +17,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrp_with_time_limit.py b/ortools/routing/samples/vrp_with_time_limit.py index 831e5da316..8828190af0 100644 --- a/ortools/routing/samples/vrp_with_time_limit.py +++ b/ortools/routing/samples/vrp_with_time_limit.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] diff --git a/ortools/routing/samples/vrptw_store_solution_data.py b/ortools/routing/samples/vrptw_store_solution_data.py index 08b6c404bb..877be0544b 100644 --- a/ortools/routing/samples/vrptw_store_solution_data.py +++ b/ortools/routing/samples/vrptw_store_solution_data.py @@ -18,6 +18,7 @@ # [START import] from ortools.routing import enums_pb2 from ortools.routing import pywraprouting + # [END import] @@ -68,6 +69,7 @@ def create_data_model(): data["depot"] = 0 return data + # [END data_model] @@ -108,6 +110,7 @@ def print_solution(routes, cumul_data): route_str += f"Total time: {total_time}min" print(route_str) + # [END solution_printer] @@ -126,6 +129,7 @@ def get_routes(solution, routing, manager): routes.append(route) return routes + # [END get_routes] @@ -150,6 +154,7 @@ def get_cumul_data(solution, routing, dimension): cumul_data.append(route_data) return cumul_data + # [END get_cumulative_data] diff --git a/ortools/routing/sat.cc b/ortools/routing/sat.cc index 06ecbc589e..9395d86c36 100644 --- a/ortools/routing/sat.cc +++ b/ortools/routing/sat.cc @@ -33,7 +33,7 @@ #include "ortools/routing/types.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" -#include "ortools/sat/integer.h" +#include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/bitset.h" diff --git a/ortools/routing/search.cc b/ortools/routing/search.cc index 3c282e9b45..c39bff40b8 100644 --- a/ortools/routing/search.cc +++ b/ortools/routing/search.cc @@ -59,7 +59,9 @@ #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/graph/christofides.h" #include "ortools/routing/enums.pb.h" +#include "ortools/routing/heuristic_parameters.pb.h" #include "ortools/routing/parameters.pb.h" +#include "ortools/routing/parameters_utils.h" #include "ortools/routing/routing.h" #include "ortools/routing/types.h" #include "ortools/routing/utils.h" @@ -1055,6 +1057,9 @@ bool GlobalCheapestInsertionFilteredHeuristic::BuildSolutionInternal() { if (!InsertPairsAndNodesByRequirementTopologicalOrder()) { return unperform_unassigned_and_check(); } + if (!InsertPairsAndNodesByPrecedenceTopologicalOrder()) { + return unperform_unassigned_and_check(); + } // TODO(user): Adapt the pair insertions to also support seed and // sequential insertion. @@ -1103,6 +1108,46 @@ bool GlobalCheapestInsertionFilteredHeuristic:: return true; } +bool GlobalCheapestInsertionFilteredHeuristic:: + InsertPairsAndNodesByPrecedenceTopologicalOrder() { + const std::vector& pickup_delivery_pairs = + model()->GetPickupAndDeliveryPairs(); + for (const std::vector>& ordered_nodes : + model()->GetTopologicallySortedNodePrecedences()) { + for (const std::vector& nodes : ordered_nodes) { + std::map> pairs_to_insert_by_bucket; + for (int node : nodes) { + if (Contains(node)) continue; + if (!model()->IsPickup(node) && !model()->IsDelivery(node)) continue; + const std::optional + pickup_position = model()->GetPickupPosition(node); + if (pickup_position.has_value()) { + const int index = pickup_position->pd_pair_index; + pairs_to_insert_by_bucket[GetBucketOfPair( + pickup_delivery_pairs[index])] + .push_back(index); + } + const std::optional + delivery_position = model()->GetDeliveryPosition(node); + if (delivery_position.has_value()) { + const int index = delivery_position->pd_pair_index; + pairs_to_insert_by_bucket[GetBucketOfPair( + pickup_delivery_pairs[index])] + .push_back(index); + } + } + if (!InsertPairs(pairs_to_insert_by_bucket)) return false; + std::map> nodes_by_bucket; + for (int node : nodes) { + if (Contains(node)) continue; + nodes_by_bucket[GetBucketOfNode(node)].push_back(node); + } + if (!InsertNodesOnRoutes(nodes_by_bucket, {})) return false; + } + } + return true; +} + bool GlobalCheapestInsertionFilteredHeuristic::InsertPairs( const std::map>& pair_indices_by_bucket) { AdjustablePriorityQueue priority_queue; @@ -2330,7 +2375,7 @@ bool GlobalCheapestInsertionFilteredHeuristic::AddNodeEntriesAfter( const auto add_node_entries_for_neighbors = [this, &nodes, &queue, insert_after, vehicle, all_vehicles]( - const std::vector& neighbors, + absl::Span neighbors, const std::function& is_neighbor) { if (neighbors.size() < nodes.NumberOfSetCallsWithDifferentArguments()) { // Iterate on the neighbors of 'node'. @@ -2524,10 +2569,7 @@ LocalCheapestInsertionFilteredHeuristic:: LocalCheapestInsertionFilteredHeuristic( RoutingModel* model, std::function stop_search, std::function evaluator, - LocalCheapestInsertionParameters::PairInsertionStrategy - pair_insertion_strategy, - std::vector - insertion_sorting_properties, + LocalCheapestInsertionParameters lci_params, LocalSearchFilterManager* filter_manager, bool use_first_solution_hint, BinCapacities* bin_capacities, std::function&, @@ -2536,8 +2578,9 @@ LocalCheapestInsertionFilteredHeuristic:: : CheapestInsertionFilteredHeuristic(model, std::move(stop_search), std::move(evaluator), nullptr, filter_manager), - pair_insertion_strategy_(pair_insertion_strategy), - insertion_sorting_properties_(std::move(insertion_sorting_properties)), + pair_insertion_strategy_(lci_params.pickup_delivery_strategy()), + insertion_sorting_properties_(GetLocalCheapestInsertionSortingProperties( + lci_params.insertion_sorting_properties())), use_first_solution_hint_(use_first_solution_hint), bin_capacities_(bin_capacities), optimize_on_insertion_(std::move(optimize_on_insertion)), @@ -4176,11 +4219,11 @@ SavingsFilteredHeuristic::SavingsFilteredHeuristic( SavingsParameters parameters, LocalSearchFilterManager* filter_manager) : RoutingFilteredHeuristic(model, std::move(stop_search), filter_manager), vehicle_type_curator_(nullptr), - savings_params_(parameters) { - DCHECK_GT(savings_params_.neighbors_ratio, 0); - DCHECK_LE(savings_params_.neighbors_ratio, 1); - DCHECK_GT(savings_params_.max_memory_usage_bytes, 0); - DCHECK_GT(savings_params_.arc_coefficient, 0); + savings_params_(std::move(parameters)) { + DCHECK_GT(savings_params_.neighbors_ratio(), 0); + DCHECK_LE(savings_params_.neighbors_ratio(), 1); + DCHECK_GT(savings_params_.max_memory_usage_bytes(), 0); + DCHECK_GT(savings_params_.arc_coefficient(), 0); } SavingsFilteredHeuristic::~SavingsFilteredHeuristic() = default; @@ -4315,7 +4358,7 @@ bool SavingsFilteredHeuristic::ComputeSavings() { return cost_and_node.second; }); } - if (savings_params_.add_reverse_arcs) { + if (savings_params_.add_reverse_arcs()) { AddSymmetricArcsToAdjacencyLists(&adjacency_lists); } if (StopSearch()) return false; @@ -4342,7 +4385,7 @@ bool SavingsFilteredHeuristic::ComputeSavings() { model()->GetArcCostForClass(after_node, end, cost_class); const double weighted_arc_cost_fp = - savings_params_.arc_coefficient * arc_cost; + savings_params_.arc_coefficient() * arc_cost; const int64_t weighted_arc_cost = weighted_arc_cost_fp < std::numeric_limits::max() ? static_cast(weighted_arc_cost_fp) @@ -4370,14 +4413,14 @@ int64_t SavingsFilteredHeuristic::MaxNumNeighborsPerNode( const int64_t size = model()->Size(); const int64_t num_neighbors_with_ratio = - std::max(1.0, size * savings_params_.neighbors_ratio); + std::max(1.0, size * savings_params_.neighbors_ratio()); // A single Saving takes 2*8 bytes of memory. // max_memory_usage_in_savings_unit = num_savings * multiplicative_factor, // Where multiplicative_factor is the memory taken (in Savings unit) for each // computed Saving. const double max_memory_usage_in_savings_unit = - savings_params_.max_memory_usage_bytes / 16; + savings_params_.max_memory_usage_bytes() / 16; // In the SavingsContainer, for each Saving, the Savings are stored: // - Once in "sorted_savings_per_vehicle_type", and (at most) once in @@ -5189,7 +5232,7 @@ class RouteConstructor { return true; } - bool FeasibleMerge(const std::vector& route1, + bool FeasibleMerge(absl::Span route1, const std::vector& route2, int node1, int node2, int route_index1, int route_index2, int vehicle_class, int64_t start_depot, int64_t end_depot) { diff --git a/ortools/routing/search.h b/ortools/routing/search.h index aee5e5150e..95da515b49 100644 --- a/ortools/routing/search.h +++ b/ortools/routing/search.h @@ -41,6 +41,7 @@ #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/routing/enums.pb.h" +#include "ortools/routing/heuristic_parameters.pb.h" #include "ortools/routing/parameters.pb.h" #include "ortools/routing/routing.h" #include "ortools/routing/types.h" @@ -640,6 +641,13 @@ class GlobalCheapestInsertionFilteredHeuristic /// case nodes are inserted based on the topological order of their type, /// given by the routing model's GetTopologicallySortedVisitTypes() method. bool InsertPairsAndNodesByRequirementTopologicalOrder(); + /// Inserts non-inserted single nodes or pickup/delivery pairs which are in + /// precedence constraints. + /// These nodes are inserted iff the precedence graph is acyclic, in which + /// case nodes are inserted based on the topological order of the precedence + /// graph, given by the routing model's + /// GetTopologicallySortedNodePrecedences() method. + bool InsertPairsAndNodesByPrecedenceTopologicalOrder(); /// Inserts non-inserted pickup and delivery pairs. Maintains a priority /// queue of possible pair insertions, which is incrementally updated when a @@ -1130,10 +1138,7 @@ class LocalCheapestInsertionFilteredHeuristic LocalCheapestInsertionFilteredHeuristic( RoutingModel* model, std::function stop_search, std::function evaluator, - LocalCheapestInsertionParameters::PairInsertionStrategy - pair_insertion_strategy, - std::vector - insertion_sorting_properties, + LocalCheapestInsertionParameters lci_params, LocalSearchFilterManager* filter_manager, bool use_first_solution_hint, BinCapacities* bin_capacities = nullptr, std::function&, @@ -1335,21 +1340,6 @@ class ComparatorCheapestAdditionFilteredHeuristic /// and cost classes are taken into account. class SavingsFilteredHeuristic : public RoutingFilteredHeuristic { public: - struct SavingsParameters { - /// If neighbors_ratio < 1 then for each node only this ratio of its - /// neighbors leading to the smallest arc costs are considered. - double neighbors_ratio = 1.0; - /// The number of neighbors considered for each node is also adapted so that - /// the stored Savings don't use up more than max_memory_usage_bytes bytes. - double max_memory_usage_bytes = 6e9; - /// If add_reverse_arcs is true, the neighborhood relationships are - /// considered symmetrically. - bool add_reverse_arcs = false; - /// arc_coefficient is a strictly positive parameter indicating the - /// coefficient of the arc being considered in the Saving formula. - double arc_coefficient = 1.0; - }; - SavingsFilteredHeuristic(RoutingModel* model, std::function stop_search, SavingsParameters parameters, diff --git a/ortools/routing/types.h b/ortools/routing/types.h index 24422b7168..fc7b9197f4 100644 --- a/ortools/routing/types.h +++ b/ortools/routing/types.h @@ -18,8 +18,8 @@ #include #include -#include "ortools/base/int_type.h" #include "ortools/util/piecewise_linear_function.h" +#include "ortools/util/strong_integers.h" namespace operations_research::routing { @@ -32,12 +32,12 @@ namespace operations_research::routing { /// /// Users that depend on routing.{h,cc} should just use the /// RoutingModel:: equivalent, eg. RoutingModel::NodeIndex. -DEFINE_INT_TYPE(RoutingNodeIndex, int); -DEFINE_INT_TYPE(RoutingCostClassIndex, int); -DEFINE_INT_TYPE(RoutingDimensionIndex, int); -DEFINE_INT_TYPE(RoutingDisjunctionIndex, int); -DEFINE_INT_TYPE(RoutingVehicleClassIndex, int); -DEFINE_INT_TYPE(RoutingResourceClassIndex, int); +DEFINE_STRONG_INDEX_TYPE(RoutingNodeIndex); +DEFINE_STRONG_INDEX_TYPE(RoutingCostClassIndex); +DEFINE_STRONG_INDEX_TYPE(RoutingDimensionIndex); +DEFINE_STRONG_INDEX_TYPE(RoutingDisjunctionIndex); +DEFINE_STRONG_INDEX_TYPE(RoutingVehicleClassIndex); +DEFINE_STRONG_INDEX_TYPE(RoutingResourceClassIndex); /// Pickup and delivery pair representation, including alternatives for pickups /// and deliveries respectively. diff --git a/ortools/routing/utils.cc b/ortools/routing/utils.cc index ca34345170..0c42ba9af9 100644 --- a/ortools/routing/utils.cc +++ b/ortools/routing/utils.cc @@ -14,6 +14,8 @@ #include "ortools/routing/utils.h" #include +#include +#include #include #include #include From 3434ae4839589146ac75b983446bcb39816bb182 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Jun 2025 16:54:12 +0200 Subject: [PATCH 108/509] tools/release: fixup --- tools/release/amd64.Dockerfile | 6 +- tools/release/arm64.Dockerfile | 6 +- tools/release/build_delivery_macos.sh | 64 +++++---- tools/release/publish_delivery_linux.sh | 4 +- .../publish_delivery_manylinux_amd64.sh | 5 +- .../publish_delivery_manylinux_arm64.sh | 5 +- tools/release/publish_delivery_meta.sh | 126 ++++++++++++++++++ tools/release/publish_delivery_win.cmd | 4 +- 8 files changed, 182 insertions(+), 38 deletions(-) create mode 100755 tools/release/publish_delivery_meta.sh diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index b95ad2405e..1622d7368f 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -36,10 +36,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index b19b71c8fc..138c653bb5 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -41,10 +41,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 89f401031a..72f82ba5e5 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -63,12 +63,15 @@ function build_dotnet() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log - echo "check dotnet..." + echo "DONE" | tee -a build.log + + echo -n "check dotnet..." command -v dotnet command -v dotnet | xargs echo "dotnet: " | tee -a build.log + echo "DONE" | tee -a build.log # Install .Net SNK echo -n "Install .Net SNK..." | tee -a build.log @@ -76,7 +79,8 @@ function build_dotnet() { if [[ -x $(command -v openssl11) ]]; then OPENSSL_PRG=openssl11 fi - echo "check ${OPENSSL_PRG}..." + echo "DONE" | tee -a build.log + echo -n "check ${OPENSSL_PRG}..." command -v ${OPENSSL_PRG} | xargs echo "openssl: " | tee -a build.log $OPENSSL_PRG aes-256-cbc -iter 42 -pass pass:"$ORTOOLS_TOKEN" \ @@ -91,12 +95,12 @@ function build_dotnet() { rm -rf "${ROOT_DIR}/temp_dotnet" echo "DONE" | tee -a build.log - echo -n "Build .Net..." | tee -a build.log + echo "Build .Net..." | tee -a build.log cmake -S. -Btemp_dotnet -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_DOTNET=ON cmake --build temp_dotnet -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_dotnet/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_dotnet --target test #echo "cmake test: DONE" | tee -a build.log @@ -114,9 +118,11 @@ function build_java() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log + # maven require JAVA_HOME if [[ -z "${JAVA_HOME}" ]]; then echo "JAVA_HOME: not found !" | tee -a build.log @@ -172,21 +178,19 @@ function build_java() { rm -rf "${ROOT_DIR}/temp_java" echo "DONE" | tee -a build.log - echo -n "Build Java..." | tee -a build.log - + echo "Build Java..." | tee -a build.log if [[ ! -v GPG_ARGS ]]; then GPG_EXTRA="" else GPG_EXTRA="-DGPG_ARGS=${GPG_ARGS}" fi - # shellcheck disable=SC2086 # cmake fail to parse empty string "" cmake -S. -Btemp_java -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF \ -DBUILD_JAVA=ON -DSKIP_GPG=OFF ${GPG_EXTRA} cmake --build temp_java -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_java/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_java --target test #echo "cmake test: DONE" | tee -a build.log @@ -214,9 +218,10 @@ function build_python() { PATH_BCKP=${PATH} cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "arm64" ]]; then local -r PY=(3.9 3.10 3.11 3.12 3.13) @@ -275,7 +280,7 @@ function build_python() { echo -n "Cleaning Python ${PY_VERSION}..." | tee -a build.log rm -rf "temp_python${PY_VERSION}" echo "DONE" | tee -a build.log - + echo "Build Python ${PY_VERSION}..." | tee -a build.log echo -n " CMake configure..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" @@ -349,26 +354,27 @@ function build_archive() { echo -n "Clean previous archive..." | tee -a build.log make clean_archive + echo "DONE" | tee -a build.log - echo -n "Make cpp archive..." | tee -a build.log + echo "Make cpp archive..." | tee -a build.log make archive_cpp - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make dotnet archive..." | tee -a build.log + echo "Make dotnet archive..." | tee -a build.log make archive_dotnet - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make java archive..." | tee -a build.log + echo "Make java archive..." | tee -a build.log make archive_java - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log # move archive to export @@ -390,16 +396,26 @@ function build_examples() { echo "Check Sed version..." sed --version 2>&1 | head -n 1 | grep "GNU sed.*\b4" + echo -n "Clean previous example archives..." | tee -a build.log rm -rf temp ./*.tar.gz - echo -n "Build examples archives..." | tee -a build.log + echo "DONE" | tee -a build.log + + echo "Build examples archives..." | tee -a build.log + echo -n " Python examples archive..." | tee -a build.log make python_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " Java examples archive..." | tee -a build.log make java_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " .Net examples archive..." | tee -a build.log make dotnet_examples_archive UNIX_PYTHON_VER=3 echo "DONE" | tee -a build.log + echo "DONE" | tee -a build.log + # move example to export/ mv or-tools_*_examples_*.tar.gz export/ echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" > "${ROOT_DIR}/export/examples_build" diff --git a/tools/release/publish_delivery_linux.sh b/tools/release/publish_delivery_linux.sh index fcd132729e..a082bb3dc2 100755 --- a/tools/release/publish_delivery_linux.sh +++ b/tools/release/publish_delivery_linux.sh @@ -76,9 +76,9 @@ function publish_java() { if [[ -x "$(command -v openssl11)" ]]; then OPENSSL_PRG=openssl11 fi - command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a build.log + command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a publish.log command -v gpg - command -v gpg | xargs echo "gpg: " | tee -a build.log + command -v gpg | xargs echo "gpg: " | tee -a publish.log echo -n "Publish native Java..." | tee -a publish.log cmake --build temp_java --target java_native_deploy -v diff --git a/tools/release/publish_delivery_manylinux_amd64.sh b/tools/release/publish_delivery_manylinux_amd64.sh index 9f67999de1..17236e7688 100755 --- a/tools/release/publish_delivery_manylinux_amd64.sh +++ b/tools/release/publish_delivery_manylinux_amd64.sh @@ -109,7 +109,7 @@ function main() { local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" echo "RELEASE_DIR: '${RELEASE_DIR}'" | tee -a publish.log - (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a build.log) + (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a publish.log) local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) @@ -124,8 +124,9 @@ function main() { "publish_$1" exit ;; all) + #publish_dotnet publish_java - publish_python + #publish_python exit ;; *) >&2 echo "Target '${1}' unknown" diff --git a/tools/release/publish_delivery_manylinux_arm64.sh b/tools/release/publish_delivery_manylinux_arm64.sh index 5c444b2356..7c1e655f32 100755 --- a/tools/release/publish_delivery_manylinux_arm64.sh +++ b/tools/release/publish_delivery_manylinux_arm64.sh @@ -109,7 +109,7 @@ function main() { local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" echo "RELEASE_DIR: '${RELEASE_DIR}'" | tee -a publish.log - (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a build.log) + (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a publish.log) local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) @@ -124,8 +124,9 @@ function main() { "publish_$1" exit ;; all) + #publish_dotnet publish_java - publish_python + #publish_python exit ;; *) >&2 echo "Target '${1}' unknown" diff --git a/tools/release/publish_delivery_meta.sh b/tools/release/publish_delivery_meta.sh new file mode 100755 index 0000000000..14df6f295f --- /dev/null +++ b/tools/release/publish_delivery_meta.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function help() { + local -r NAME=$(basename "$0") + local -r BOLD="\e[1m" + local -r RESET="\e[0m" + local -r help=$(cat << EOF +${BOLD}NAME${RESET} +\t$NAME - Publish delivery using the ${BOLD}local host system${RESET}. +${BOLD}SYNOPSIS${RESET} +\t$NAME [-h|--help] [java] +${BOLD}DESCRIPTION${RESET} +\tPublish Google OR-Tools deliveries. +\tYou ${BOLD}MUST${RESET} define the following variables before running this script: +\t* ORTOOLS_TOKEN: secret use to decrypt key to sign Java package. + +${BOLD}OPTIONS${RESET} +\t-h --help: display this help text +\tjava: publish the Java runtime packages +\tall: publish everything (default) + +${BOLD}EXAMPLES${RESET} +Using export to define the ${BOLD}ORTOOLS_TOKEN${RESET} env and only publishing the Java packages: +export ORTOOLS_TOKEN=SECRET +$0 java + +note: the 'export ORTOOLS_TOKEN=...' should be placed in your bashrc to avoid any leak +of the secret in your bash history +EOF +) + echo -e "$help" +} + +function assert_defined(){ + if [[ -z "${!1}" ]]; then + >&2 echo "Variable '${1}' must be defined" + exit 1 + fi +} + +# Java publish +function publish_java() { + if echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" | cmp --silent "${ROOT_DIR}/export_meta/meta_java_publish" -; then + echo "publish Java up to date!" + return 0 + fi + + # maven require JAVA_HOME + if [[ -z "${JAVA_HOME}" ]]; then + echo "JAVA_HOME: not found !" | tee publish.log + exit 1 + else + echo "JAVA_HOME: ${JAVA_HOME}" | tee -a publish.log + command -v mvn + command -v mvn | xargs echo "mvn: " | tee -a publish.log + java -version 2>&1 | tee -a publish.log + java -version 2>&1 | head -n 1 | grep -q "1.8" + fi + # Maven central need gpg sign and we store the release key encoded using openssl + local OPENSSL_PRG=openssl + if [[ -x "$(command -v openssl11)" ]]; then + OPENSSL_PRG=openssl11 + fi + command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a publish.log + command -v gpg + command -v gpg | xargs echo "gpg: " | tee -a publish.log + + echo -n "Publish native Java..." | tee -a publish.log + cmake --build temp_meta_java --config Release --target java_deploy -v + echo "DONE" | tee -a publish.log + + echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" > "${ROOT_DIR}/export_meta/meta_java_publish" +} + +# Main +function main() { + case ${1} in + -h | --help) + help; exit ;; + esac + + assert_defined ORTOOLS_TOKEN + echo "ORTOOLS_TOKEN: FOUND" | tee publish.log + make print-OR_TOOLS_VERSION | tee -a publish.log + + local -r ROOT_DIR="$(cd -P -- "$(dirname -- "$0")/../.." && pwd -P)" + echo "ROOT_DIR: '${ROOT_DIR}'" + + local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" + echo "RELEASE_DIR: '${RELEASE_DIR}'" + + local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) + local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) + local -r PLATFORM=$(uname -m) + + mkdir -p export + case ${1} in + java) + "publish_$1" + exit ;; + all) + publish_java + exit ;; + *) + >&2 echo "Target '${1}' unknown" + exit 1 + esac + exit 0 +} + +main "${1:-all}" + diff --git a/tools/release/publish_delivery_win.cmd b/tools/release/publish_delivery_win.cmd index 4af40864b2..644871aba2 100644 --- a/tools/release/publish_delivery_win.cmd +++ b/tools/release/publish_delivery_win.cmd @@ -83,9 +83,9 @@ which.exe mvn || exit 1 which.exe mvn | tee.exe -a publish.log which.exe gpg || exit 1 -which.exe gpg | tee.exe -a build.log +which.exe gpg | tee.exe -a publish.log which.exe openssl || exit 1 -which.exe openssl | tee.exe -a build.log +which.exe openssl | tee.exe -a publish.log echo Publish native Java... | tee.exe -a publish.log cmake --build temp_java --config Release --target java_native_deploy -v From 9888ce3da1a03a0d4d6a2bc705f622f2edfef660 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 19 Jun 2025 14:08:49 +0200 Subject: [PATCH 109/509] polish third_party_solvers; speed up cp-sat on no_ovrlap_2d --- ortools/math_opt/solvers/gurobi_solver.cc | 2 +- ortools/math_opt/solvers/gurobi_solver.h | 1 - ortools/sat/2d_distances_propagator.cc | 94 ++++++++++--------- ortools/sat/2d_distances_propagator.h | 15 ++- ortools/sat/BUILD.bazel | 1 - ortools/sat/precedences.cc | 7 +- ortools/sat/precedences.h | 73 +++++++++++++- ortools/third_party_solvers/BUILD.bazel | 1 + ortools/third_party_solvers/CMakeLists.txt | 4 +- .../third_party_solvers/gurobi_environment.cc | 62 +++++++----- .../third_party_solvers/gurobi_environment.h | 1 - 11 files changed, 177 insertions(+), 84 deletions(-) diff --git a/ortools/math_opt/solvers/gurobi_solver.cc b/ortools/math_opt/solvers/gurobi_solver.cc index 964864db0e..9c809f5e3f 100644 --- a/ortools/math_opt/solvers/gurobi_solver.cc +++ b/ortools/math_opt/solvers/gurobi_solver.cc @@ -31,7 +31,6 @@ #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/memory/memory.h" -#include "absl/meta/type_traits.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/escaping.h" @@ -2756,6 +2755,7 @@ absl::StatusOr GurobiSolver::Update( absl::StatusOr> GurobiSolver::New( const ModelProto& input_model, const SolverInterface::InitArgs& init_args) { + // TODO(user): Correctly load the gurobi library in open source. RETURN_IF_ERROR( ModelIsSupported(input_model, kGurobiSupportedStructures, "Gurobi")); if (!input_model.auxiliary_objectives().empty() && diff --git a/ortools/math_opt/solvers/gurobi_solver.h b/ortools/math_opt/solvers/gurobi_solver.h index 03478bcd76..81a8b24558 100644 --- a/ortools/math_opt/solvers/gurobi_solver.h +++ b/ortools/math_opt/solvers/gurobi_solver.h @@ -46,7 +46,6 @@ #include "ortools/third_party_solvers/gurobi_environment.h" #include "ortools/util/solve_interrupter.h" - namespace operations_research { namespace math_opt { diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 65e949c206..2053e29581 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -32,7 +32,6 @@ #include "ortools/sat/precedences.h" #include "ortools/sat/scheduling_helpers.h" #include "ortools/sat/synchronization.h" -#include "ortools/util/bitset.h" namespace operations_research { namespace sat { @@ -42,22 +41,14 @@ Precedences2DPropagator::Precedences2DPropagator( : helper_(*helper), linear2_bounds_(model->GetOrCreate()), linear2_watcher_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()) { + shared_stats_(model->GetOrCreate()), + non_trivial_bounds_( + model->GetOrCreate()) { model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); } -void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { - helper_.SynchronizeAndSetDirection(); - non_trivial_pairs_.clear(); - - struct VarUsage { - // boxes[0=x, 1=y][0=start, 1=end] - std::vector boxes[2][2]; - }; - absl::flat_hash_map var_to_box_and_coeffs; - SparseBitset& var_set = - *linear2_bounds_->GetTemporyClearedAndResizedBitset(); - +void Precedences2DPropagator::UpdateVarLookups() { + var_to_box_and_coeffs_.clear(); for (int dim = 0; dim < 2; ++dim) { const SchedulingConstraintHelper& dim_helper = dim == 0 ? helper_.x_helper() : helper_.y_helper(); @@ -67,41 +58,52 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { for (int i = 0; i < helper_.NumBoxes(); ++i) { const IntegerVariable var = interval_points[i].var; if (var != kNoIntegerVariable) { - var_set.Set(PositiveVariable(var)); - var_to_box_and_coeffs[PositiveVariable(var)].boxes[dim][j].push_back( + var_to_box_and_coeffs_[PositiveVariable(var)].boxes[dim][j].push_back( i); } } } } +} +void Precedences2DPropagator::CollectNewPairsOfBoxesWithNonTrivialDistance() { const absl::Span exprs = - linear2_bounds_->GetAllExpressionsWithPotentialNonTrivialBounds( - var_set.BitsetConstView()); - VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " - << exprs.size(); - for (const LinearExpression2& expr : exprs) { - auto it1 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[0])); - auto it2 = var_to_box_and_coeffs.find(PositiveVariable(expr.vars[1])); - DCHECK(it1 != var_to_box_and_coeffs.end()); - DCHECK(it2 != var_to_box_and_coeffs.end()); + non_trivial_bounds_->GetLinear2WithPotentialNonTrivalBounds(); + if (exprs.size() != num_known_linear2_) { + VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " + << exprs.size(); + } + for (; num_known_linear2_ < exprs.size(); ++num_known_linear2_) { + const LinearExpression2& positive_expr = exprs[num_known_linear2_]; + LinearExpression2 negated_expr = positive_expr; + negated_expr.Negate(); + for (const LinearExpression2& expr : {positive_expr, negated_expr}) { + auto it1 = var_to_box_and_coeffs_.find(PositiveVariable(expr.vars[0])); + auto it2 = var_to_box_and_coeffs_.find(PositiveVariable(expr.vars[1])); + if (it1 == var_to_box_and_coeffs_.end()) { + continue; + } + if (it2 == var_to_box_and_coeffs_.end()) { + continue; + } - const VarUsage& usage1 = it1->second; - const VarUsage& usage2 = it2->second; - for (int dim = 0; dim < 2; ++dim) { - const SchedulingConstraintHelper& dim_helper = - dim == 0 ? helper_.x_helper() : helper_.y_helper(); - for (const int box1 : usage1.boxes[dim][0 /* start */]) { - for (const int box2 : usage2.boxes[dim][1 /* end */]) { - if (box1 == box2) continue; - const auto [expr2, unused] = EncodeDifferenceLowerThan( - dim_helper.Starts()[box1], dim_helper.Ends()[box2], - /*ub=unused*/ 0); - if (expr == expr2) { - if (box1 < box2) { - non_trivial_pairs_.push_back({box1, box2}); - } else { - non_trivial_pairs_.push_back({box2, box1}); + const VarUsage& usage1 = it1->second; + const VarUsage& usage2 = it2->second; + for (int dim = 0; dim < 2; ++dim) { + const SchedulingConstraintHelper& dim_helper = + dim == 0 ? helper_.x_helper() : helper_.y_helper(); + for (const int box1 : usage1.boxes[dim][0 /* start */]) { + for (const int box2 : usage2.boxes[dim][1 /* end */]) { + if (box1 == box2) continue; + const auto [expr2, unused] = EncodeDifferenceLowerThan( + dim_helper.Starts()[box1], dim_helper.Ends()[box2], + /*ub=unused*/ 0); + if (expr == expr2) { + if (box1 < box2) { + non_trivial_pairs_.push_back({box1, box2}); + } else { + non_trivial_pairs_.push_back({box2, box1}); + } } } } @@ -114,13 +116,13 @@ void Precedences2DPropagator::CollectPairsOfBoxesWithNonTrivialDistance() { bool Precedences2DPropagator::Propagate() { if (!helper_.SynchronizeAndSetDirection()) return false; - if (last_helper_inprocessing_count_ != helper_.InProcessingCount() || - helper_.x_helper().CurrentDecisionLevel() == 0 || - last_linear2_timestamp_ != linear2_watcher_->Timestamp()) { + if (last_helper_inprocessing_count_ != helper_.InProcessingCount()) { last_helper_inprocessing_count_ = helper_.InProcessingCount(); - last_linear2_timestamp_ = linear2_watcher_->Timestamp(); - CollectPairsOfBoxesWithNonTrivialDistance(); + UpdateVarLookups(); + num_known_linear2_ = 0; + non_trivial_pairs_.clear(); } + CollectNewPairsOfBoxesWithNonTrivialDistance(); num_calls_++; diff --git a/ortools/sat/2d_distances_propagator.h b/ortools/sat/2d_distances_propagator.h index e8ca1066c9..6c47f37f64 100644 --- a/ortools/sat/2d_distances_propagator.h +++ b/ortools/sat/2d_distances_propagator.h @@ -18,7 +18,9 @@ #include #include +#include "absl/container/flat_hash_map.h" #include "ortools/sat/integer.h" +#include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" #include "ortools/sat/no_overlap_2d_helper.h" #include "ortools/sat/precedences.h" @@ -30,7 +32,7 @@ namespace sat { // This class implements a propagator for non_overlap_2d constraints that uses // the Linear2Bounds to detect precedences between pairs of boxes and // detect a conflict if the precedences implies an overlap between the two -// boxes. For doing this efficiently, it keep track of pairs of boxes that have +// boxes. For doing this efficiently, it keeps track of pairs of boxes that have // non-fixed precedences in the Linear2Bounds and only check those in the // propagation. class Precedences2DPropagator : public PropagatorInterface { @@ -43,16 +45,25 @@ class Precedences2DPropagator : public PropagatorInterface { int RegisterWith(GenericLiteralWatcher* watcher); private: - void CollectPairsOfBoxesWithNonTrivialDistance(); + void CollectNewPairsOfBoxesWithNonTrivialDistance(); + void UpdateVarLookups(); std::vector> non_trivial_pairs_; + struct VarUsage { + // boxes[0=x, 1=y][0=start, 1=end] + std::vector boxes[2][2]; + }; + + absl::flat_hash_map var_to_box_and_coeffs_; NoOverlap2DConstraintHelper& helper_; Linear2Bounds* linear2_bounds_; Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; + Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; int last_helper_inprocessing_count_ = -1; + int num_known_linear2_ = 0; int64_t last_linear2_timestamp_ = -1; int64_t num_conflicts_ = 0; diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index a3321cdee7..771c6c010a 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -129,7 +129,6 @@ cc_library( ":scheduling_helpers", ":synchronization", "//ortools/base:stl_util", - "//ortools/util:bitset", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 33a3c29318..017cc45550 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -100,6 +100,7 @@ std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED; if (!lb_restricted && !ub_restricted) return {false, false}; + non_trivial_bounds_->AddOrGet(expr); ++num_updates_; linear2_watcher_->NotifyBoundChanged(expr); @@ -348,6 +349,7 @@ void EnforcedLinear2Bounds::PushConditionalRelation( const int new_index = conditional_stack_.size(); const auto [it, inserted] = conditional_relations_.insert({expr, new_index}); if (inserted) { + non_trivial_bounds_->AddOrGet(expr); CreateLevelEntryIfNeeded(); conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, expr, enforcements); @@ -1815,7 +1817,9 @@ Linear2BoundsFromLinear3::Linear2BoundsFromLinear3(Model* model) linear2_watcher_(model->GetOrCreate()), watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - root_level_bounds_(model->GetOrCreate()) {} + root_level_bounds_(model->GetOrCreate()), + non_trivial_bounds_( + model->GetOrCreate()) {} // Note that for speed we do not compare to the trivial or root level bounds. // @@ -1857,6 +1861,7 @@ bool Linear2BoundsFromLinear3::AddAffineUpperBound(LinearExpression2 expr, it->second = {affine_ub, divisor}; // Overwrite. } else { // Note that this should almost never happen (only once per lin2). + non_trivial_bounds_->AddOrGet(expr); best_affine_ub_[expr] = {affine_ub, divisor}; } diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 57fd147999..392943ce63 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -14,10 +14,10 @@ #ifndef OR_TOOLS_SAT_PRECEDENCES_H_ #define OR_TOOLS_SAT_PRECEDENCES_H_ -#include #include #include #include +#include #include #include #include @@ -45,6 +45,63 @@ namespace operations_research { namespace sat { +DEFINE_STRONG_INDEX_TYPE(LinearExpression2Index); +const LinearExpression2Index kNoLinearExpression2Index(-1); +inline LinearExpression2Index NegationOf(LinearExpression2Index i) { + return LinearExpression2Index(i.value() ^ 1); +} + +inline bool Linear2IsPositive(LinearExpression2Index i) { + return (i.value() & 1) == 0; +} + +inline LinearExpression2Index PositiveLinear2(LinearExpression2Index i) { + return LinearExpression2Index(i.value() & (~1)); +} + +// Class to hold a list of LinearExpression2 that have (potentially) non-trivial +// bounds. This class is overzealous, in the sense that if a linear2 is in the +// list, it does not necessarily mean that it has a non-trivial bound, but the +// converse is true: if a linear2 is not in the list, +// Linear2Bounds::GetUpperBound() will return a trivial bound. +class Linear2WithPotentialNonTrivalBounds { + public: + Linear2WithPotentialNonTrivalBounds() = default; + + // Returns a never-changing index for the given linear expression. + // The expression must already be canonicalized and divided by its GCD. + LinearExpression2Index AddOrGet(LinearExpression2 expr) { + DCHECK(expr.IsCanonicalized()); + DCHECK_EQ(expr.DivideByGcd(), 1); + const bool negated = expr.NegateForCanonicalization(); + auto [it, inserted] = expr_to_index_.insert({expr, exprs_.size()}); + if (inserted) { + CHECK_LT(2 * exprs_.size() + 1, + std::numeric_limits::max()); + exprs_.push_back(expr); + } + const LinearExpression2Index positive_index(2 * it->second); + if (negated) { + return NegationOf(positive_index); + } else { + return positive_index; + } + } + + // Return all positive linear2 expressions that have a potentially non-trivial + // bound. When calling this code it is often a good idea to check both the + // expression on the span and its negation. The order is fixed forever and + // this span can only grow by appending new expressions. + absl::Span GetLinear2WithPotentialNonTrivalBounds() + const { + return exprs_; + } + + private: + util_intops::StrongVector exprs_; + absl::flat_hash_map expr_to_index_; +}; + // Simple "watcher" class that will be notified if a linear2 bound changed. It // can also be queried to see if LinearExpression2 involving a specific variable // changed since last time. @@ -79,7 +136,9 @@ class RootLevelLinear2Bounds { explicit RootLevelLinear2Bounds(Model* model) : integer_trail_(model->GetOrCreate()), linear2_watcher_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()) {} + shared_stats_(model->GetOrCreate()), + non_trivial_bounds_( + model->GetOrCreate()) {} ~RootLevelLinear2Bounds(); @@ -148,6 +207,7 @@ class RootLevelLinear2Bounds { IntegerTrail* integer_trail_; Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; + Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; // Lookup table to find all the LinearExpression2 with a given variable and // having both coefficient 1. @@ -258,7 +318,9 @@ class EnforcedLinear2Bounds : public ReversibleInterface { integer_trail_(model->GetOrCreate()), linear2_watcher_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()), - shared_stats_(model->GetOrCreate()) { + shared_stats_(model->GetOrCreate()), + non_trivial_bounds_( + model->GetOrCreate()) { integer_trail_->RegisterReversibleClass(this); } @@ -324,6 +386,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { Linear2Watcher* linear2_watcher_; RootLevelLinear2Bounds* root_level_bounds_; SharedStatistics* shared_stats_; + Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; int64_t num_conditional_relation_updates_ = 0; @@ -476,6 +539,7 @@ class Linear2BoundsFromLinear3 { GenericLiteralWatcher* watcher_; SharedStatistics* shared_stats_; RootLevelLinear2Bounds* root_level_bounds_; + Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; int64_t num_affine_updates_ = 0; @@ -558,7 +622,8 @@ class Linear2Bounds { GetAllExpressionsWithPotentialNonTrivialBounds( Bitset64::ConstView var_set) const; - // Returns a temporay bitset, cleared, and resized for all existing variables. + // Returns a temporary bitset, cleared, and resized for all existing + // variables. // // If we have many class calling // GetAllExpressionsWithPotentialNonTrivialBounds() it is important that not diff --git a/ortools/third_party_solvers/BUILD.bazel b/ortools/third_party_solvers/BUILD.bazel index a931395271..7b2ee8fefb 100644 --- a/ortools/third_party_solvers/BUILD.bazel +++ b/ortools/third_party_solvers/BUILD.bazel @@ -57,6 +57,7 @@ cc_library( "//ortools/base", "//ortools/base:file", "//ortools/base:status_macros", + "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", "@abseil-cpp//absl/strings", diff --git a/ortools/third_party_solvers/CMakeLists.txt b/ortools/third_party_solvers/CMakeLists.txt index ac5bf08d0d..97232c4000 100644 --- a/ortools/third_party_solvers/CMakeLists.txt +++ b/ortools/third_party_solvers/CMakeLists.txt @@ -32,5 +32,5 @@ target_link_libraries(${NAME} PRIVATE absl::status absl::strings absl::str_format - absl::synchronization - ) + absl::synchronization) + diff --git a/ortools/third_party_solvers/gurobi_environment.cc b/ortools/third_party_solvers/gurobi_environment.cc index ba2d3ae212..007e766527 100644 --- a/ortools/third_party_solvers/gurobi_environment.cc +++ b/ortools/third_party_solvers/gurobi_environment.cc @@ -13,16 +13,16 @@ #include "ortools/third_party_solvers/gurobi_environment.h" +#include #include -#include #include #include +#include "absl/base/no_destructor.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" -#include "absl/synchronization/mutex.h" #include "ortools/base/logging.h" #include "ortools/third_party_solvers/dynamic_library.h" @@ -332,12 +332,14 @@ void LoadGurobiFunctions(DynamicLibrary* gurobi_dynamic_library) { gurobi_dynamic_library->GetFunction(&GRBplatform, "GRBplatform"); } +// clang-format on + std::vector GurobiDynamicLibraryPotentialPaths() { std::vector potential_paths; const std::vector kGurobiVersions = { - "1202", "1201", "1200", "1103", "1102", "1101", "1100", "1003", - "1002", "1001", "1000", "952", "951", "950", "911", - "910", "903", "902", "811", "801", "752"}; + "1202", "1201", "1200", "1103", "1102", "1101", "1100", + "1003", "1002", "1001", "1000", "952", "951", "950", + "911", "910", "903", "902", "811", "801", "752"}; potential_paths.reserve(kGurobiVersions.size() * 3); // Look for libraries pointed by GUROBI_HOME first. @@ -396,7 +398,7 @@ std::vector GurobiDynamicLibraryPotentialPaths() { #if defined(__GNUC__) // path in linux64 gurobi/optimizer docker image. for (const absl::string_view version : - {"12.0.2","12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", + {"12.0.2", "12.0.1", "12.0.0", "11.0.3", "11.0.2", "11.0.1", "11.0.0", "10.0.3", "10.0.2", "10.0.1", "10.0.0", "9.5.2", "9.5.1", "9.5.0"}) { potential_paths.push_back( absl::StrCat("/opt/gurobi/linux64/lib/libgurobi.so.", version)); @@ -407,37 +409,47 @@ std::vector GurobiDynamicLibraryPotentialPaths() { absl::Status LoadGurobiDynamicLibrary( std::vector potential_paths) { - static std::once_flag gurobi_loading_done; - static absl::Status gurobi_load_status; - static DynamicLibrary gurobi_library; - static absl::Mutex mutex; + struct GurobiLibraryStruct { + absl::Status gurobi_load_status; + DynamicLibrary gurobi_library; + }; - absl::MutexLock lock(&mutex); - - std::call_once(gurobi_loading_done, [&potential_paths]() { - const std::vector canonical_paths = - GurobiDynamicLibraryPotentialPaths(); - potential_paths.insert(potential_paths.end(), canonical_paths.begin(), - canonical_paths.end()); + static absl::NoDestructor loaded([&potential_paths]() { + GurobiLibraryStruct result; + // Try to load the library from the potential paths. for (const absl::string_view path : potential_paths) { - if (gurobi_library.TryToLoad(path)) { - LOG(INFO) << "Found the Gurobi library in '" << path << "."; + if (result.gurobi_library.TryToLoad(path)) { + VLOG(1) << "Found the Gurobi library in '" << path << "."; break; } } - if (gurobi_library.LibraryIsLoaded()) { - LoadGurobiFunctions(&gurobi_library); - gurobi_load_status = absl::OkStatus(); + // Fallback to the canonical paths. + if (!result.gurobi_library.LibraryIsLoaded()) { + const std::vector canonical_paths = + GurobiDynamicLibraryPotentialPaths(); + for (const absl::string_view path : canonical_paths) { + if (result.gurobi_library.TryToLoad(path)) { + VLOG(1) << "Found the Gurobi library in '" << path << "."; + break; + } + } + } + + if (result.gurobi_library.LibraryIsLoaded()) { + LoadGurobiFunctions(&result.gurobi_library); + result.gurobi_load_status = absl::OkStatus(); } else { - gurobi_load_status = absl::NotFoundError(absl::StrCat( + result.gurobi_load_status = absl::NotFoundError(absl::StrCat( "Could not find the Gurobi shared library. Looked in: [", absl::StrJoin(potential_paths, "', '"), "]. If you know where it" " is, pass the full path to 'LoadGurobiDynamicLibrary()'.")); } - }); - return gurobi_load_status; + return result; + }()); + + return loaded->gurobi_load_status; } } // namespace operations_research diff --git a/ortools/third_party_solvers/gurobi_environment.h b/ortools/third_party_solvers/gurobi_environment.h index 6bb4e10ec4..c1985daf3f 100644 --- a/ortools/third_party_solvers/gurobi_environment.h +++ b/ortools/third_party_solvers/gurobi_environment.h @@ -27,7 +27,6 @@ #endif extern "C" { - typedef struct _GRBmodel GRBmodel; typedef struct _GRBenv GRBenv; typedef struct _GRBsvec { From 6ceb6d401e68d3a85cf6ab274c80b0a6fdbc0d74 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 19 Jun 2025 14:48:50 +0200 Subject: [PATCH 110/509] tools/release: more cleanup --- tools/release/build_delivery_linux.sh | 3 ++- tools/release/build_delivery_macos.sh | 12 +++++++----- tools/release/build_delivery_manylinux_amd64.sh | 1 + tools/release/build_delivery_manylinux_arm64.sh | 1 + tools/release/build_delivery_win.cmd | 4 ++-- 5 files changed, 13 insertions(+), 8 deletions(-) diff --git a/tools/release/build_delivery_linux.sh b/tools/release/build_delivery_linux.sh index 1a9431662a..6c6558f880 100755 --- a/tools/release/build_delivery_linux.sh +++ b/tools/release/build_delivery_linux.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages @@ -208,7 +209,7 @@ function build_python() { command -v python3 | xargs echo "python3: " | tee -a build.log python3 -c "import platform as p; print(p.platform())" | tee -a build.log python3 -m pip install --upgrade --user --break-system-package pip - python3 -m pip install --upgrade --user --break-system-package wheel absl-py mypy mypy-protobuf virtualenv + python3 -m pip install --upgrade --user --break-system-package wheel absl-py mypy mypy-protobuf virtualenv "typing-extensions>=4.12" echo "check protoc-gen-mypy..." command -v protoc-gen-mypy | xargs echo "protoc-gen-mypy: " | tee -a build.log protoc-gen-mypy --version | xargs echo "protoc-gen-mypy version: " | tee -a build.log diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 72f82ba5e5..f03ddf3577 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages @@ -291,13 +292,14 @@ function build_python() { echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "x86_64" ]]; then - echo -n " Build all..." | tee -a build.log - # on macos X86_64 stubgen will timeout -> need to build 2 times - cmake --build "temp_python${PY_VERSION}" -j8 -v || true + echo -n " Build all few times..." | tee -a build.log + # on macos X86_64 stubgen will timeout -> need to build few times + cmake --build "temp_python${PY_VERSION}" -j4 -v || true + sleep 10 + cmake --build "temp_python${PY_VERSION}" -v || true echo "DONE" | tee -a build.log - sleep 5 echo -n " ReBuild all..." | tee -a build.log - cmake --build "temp_python${PY_VERSION}" -j8 -v + cmake --build "temp_python${PY_VERSION}" -j4 -v echo "DONE" | tee -a build.log else echo -n " Build all..." | tee -a build.log diff --git a/tools/release/build_delivery_manylinux_amd64.sh b/tools/release/build_delivery_manylinux_amd64.sh index ab0a580856..892745755e 100755 --- a/tools/release/build_delivery_manylinux_amd64.sh +++ b/tools/release/build_delivery_manylinux_amd64.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_manylinux_arm64.sh b/tools/release/build_delivery_manylinux_arm64.sh index d87d4b7bdf..81961b19d4 100755 --- a/tools/release/build_delivery_manylinux_arm64.sh +++ b/tools/release/build_delivery_manylinux_arm64.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_win.cmd b/tools/release/build_delivery_win.cmd index d50a83f831..940e0ff7bd 100644 --- a/tools/release/build_delivery_win.cmd +++ b/tools/release/build_delivery_win.cmd @@ -93,7 +93,7 @@ echo help: show this help text (default) echo dotnet: Build dotnet packages echo java: Build java packages echo python: Build python packages -echo archive: Build archive +echo archive: Build all (C++, .Net, Java) archives echo examples: Build examples archives echo all: build everything echo reset: delete all artifacts and suppress cache file @@ -284,7 +284,7 @@ FOR %%v IN (9 10 11 12 13) DO ( echo Check python3.%%v... | tee.exe -a build.log which.exe "C:\python3%%v-64\python.exe" || exit 1 echo "C:\python3%%v-64\python.exe: FOUND" | tee.exe -a build.log - C:\python3%%v-64\python.exe -m pip install --upgrade --user absl-py mypy mypy-protobuf protobuf numpy pandas + C:\python3%%v-64\python.exe -m pip install --upgrade --user absl-py mypy mypy-protobuf protobuf numpy pandas "typing-extensions>=4.12" call :subroutine %%v From 330a0efa28ae7aff2de0de9593786a5dee15883d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 20 Jun 2025 15:11:01 +0200 Subject: [PATCH 111/509] print a solution after a SIGTERM --- ortools/sat/sat_runner.cc | 180 ++++++++++++++++++++++++++++---------- ortools/util/sigint.cc | 36 ++++++-- ortools/util/sigint.h | 20 ++++- 3 files changed, 176 insertions(+), 60 deletions(-) diff --git a/ortools/sat/sat_runner.cc b/ortools/sat/sat_runner.cc index c31a0e2b27..c1dceb038b 100644 --- a/ortools/sat/sat_runner.cc +++ b/ortools/sat/sat_runner.cc @@ -16,9 +16,11 @@ #include #include #include +#include #include #include +#include "absl/base/thread_annotations.h" #include "absl/flags/flag.h" #include "absl/flags/parse.h" #include "absl/flags/usage.h" @@ -30,6 +32,8 @@ #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/types/span.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" #include "ortools/base/helpers.h" @@ -45,6 +49,7 @@ #include "ortools/sat/synchronization.h" #include "ortools/util/file_util.h" #include "ortools/util/logging.h" +#include "ortools/util/sigint.h" #include "ortools/util/sorted_interval_list.h" ABSL_FLAG( @@ -102,8 +107,69 @@ std::string ExtractName(absl::string_view full_filename) { return filename; } -void LogInPbCompetitionFormat(int num_variables, bool has_objective, - Model* model, SatParameters* parameters) { +class LastSolutionPrinter { + public: + // Note that is prints the solution in the PB competition format. + void MaybePrintLastSolution() { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_printed_ = true; + + if (last_solution_.empty()) { + std::cout << "s UNKNOWN" << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + std::string line; + for (int i = 0; i < num_variables_; ++i) { + if (last_solution_[i]) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } + } + } + + void set_num_variables(int num_variables) { num_variables_ = num_variables; } + + void set_last_solution(absl::Span solution) { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_.assign(solution.begin(), solution.end()); + } + + // Returns false if the solution has already been printed, else mark it as + // printed by caller code. + bool mark_last_solution_printed() { + const absl::MutexLock lock(&mutex_); + if (last_solution_printed_) { + return false; + } + last_solution_printed_ = true; + return true; + } + + private: + int num_variables_ = 0; + std::vector last_solution_ ABSL_GUARDED_BY(mutex_); + bool last_solution_printed_ ABSL_GUARDED_BY(mutex_) = false; + absl::Mutex mutex_; +}; + +void LogInPbCompetitionFormat( + int num_variables, bool has_objective, Model* model, + SatParameters* parameters, + std::shared_ptr last_solution_printer) { + CHECK(last_solution_printer != nullptr); + last_solution_printer->set_num_variables(num_variables); + const auto log_callback = [](const std::string& multi_line_input) { if (multi_line_input.empty()) { std::cout << "c" << std::endl; @@ -118,55 +184,60 @@ void LogInPbCompetitionFormat(int num_variables, bool has_objective, model->GetOrCreate()->AddInfoLoggingCallback(log_callback); parameters->set_log_to_stdout(false); - const auto response_callback = [](const CpSolverResponse& r) { + const auto response_callback = [last_solution_printer]( + const CpSolverResponse& r) { std::cout << "o " << static_cast(r.objective_value()) << std::endl; + last_solution_printer->set_last_solution(r.solution()); }; model->Add(NewFeasibleSolutionObserver(response_callback)); - const auto final_response_callback = [num_variables, - has_objective](CpSolverResponse* r) { - switch (r->status()) { - case CpSolverStatus::OPTIMAL: - if (has_objective) { - std::cout << "s OPTIMUM FOUND " << std::endl; - } else { - std::cout << "s SATISFIABLE" << std::endl; + const auto final_response_callback = + [num_variables, has_objective, + last_solution_printer](CpSolverResponse* r) { + if (!last_solution_printer->mark_last_solution_printed()) return; + + switch (r->status()) { + case CpSolverStatus::OPTIMAL: + if (has_objective) { + std::cout << "s OPTIMUM FOUND " << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + } + break; + case CpSolverStatus::FEASIBLE: + std::cout << "s SATISFIABLE" << std::endl; + break; + case CpSolverStatus::INFEASIBLE: + std::cout << "s UNSATISFIABLE" << std::endl; + break; + case CpSolverStatus::MODEL_INVALID: + std::cout << "s UNSUPPORTED" << std::endl; + break; + case CpSolverStatus::UNKNOWN: + std::cout << "s UNKNOWN" << std::endl; + break; + default: + break; } - break; - case CpSolverStatus::FEASIBLE: - std::cout << "s SATISFIABLE" << std::endl; - break; - case CpSolverStatus::INFEASIBLE: - std::cout << "s UNSATISFIABLE" << std::endl; - break; - case CpSolverStatus::MODEL_INVALID: - std::cout << "s UNSUPPORTED" << std::endl; - break; - case CpSolverStatus::UNKNOWN: - std::cout << "s UNKNOWN" << std::endl; - break; - default: - break; - } - if (r->status() == CpSolverStatus::OPTIMAL || - r->status() == CpSolverStatus::FEASIBLE) { - std::string line; - for (int i = 0; i < num_variables; ++i) { - if (r->solution(i)) { - absl::StrAppend(&line, "x", i + 1, " "); - } else { - absl::StrAppend(&line, "-x", i + 1, " "); + if (r->status() == CpSolverStatus::OPTIMAL || + r->status() == CpSolverStatus::FEASIBLE) { + std::string line; + for (int i = 0; i < num_variables; ++i) { + if (r->solution(i)) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } } - if (line.size() >= 75) { - std::cout << "v " << line << std::endl; - line.clear(); - } - } - if (!line.empty()) { - std::cout << "v " << line << std::endl; - } - } - }; + }; model->GetOrCreate()->AddFinalResponsePostprocessor( final_response_callback); } @@ -186,7 +257,8 @@ void SetInterleavedWorkers(SatParameters* parameters) { bool LoadProblem(const std::string& filename, absl::string_view hint_file, absl::string_view domain_file, CpModelProto* cp_model, - Model* model, SatParameters* parameters) { + Model* model, SatParameters* parameters, + std::shared_ptr last_solution_printer) { if (absl::EndsWith(filename, ".opb") || absl::EndsWith(filename, ".opb.bz2") || absl::EndsWith(filename, ".opb.gz") || absl::EndsWith(filename, ".wbo") || @@ -217,7 +289,7 @@ bool LoadProblem(const std::string& filename, absl::string_view hint_file, const int num_variables = reader.model_is_supported() ? reader.num_variables() : 1; LogInPbCompetitionFormat(num_variables, cp_model->has_objective(), model, - parameters); + parameters, last_solution_printer); } if (absl::GetFlag(FLAGS_force_interleave_search)) { SetInterleavedWorkers(parameters); @@ -310,9 +382,13 @@ int Run() { google::protobuf::Arena arena; CpModelProto* cp_model = google::protobuf::Arena::Create(&arena); + std::shared_ptr last_solution_printer; + if (absl::GetFlag(FLAGS_competition_mode)) { + last_solution_printer = std::make_shared(); + } if (!LoadProblem(absl::GetFlag(FLAGS_input), absl::GetFlag(FLAGS_hint_file), absl::GetFlag(FLAGS_domain_file), cp_model, &model, - ¶meters)) { + ¶meters, last_solution_printer)) { if (!absl::GetFlag(FLAGS_competition_mode)) { LOG(FATAL) << "Cannot load file '" << absl::GetFlag(FLAGS_input) << "'."; } @@ -329,6 +405,14 @@ int Run() { FingerprintRepeatedField(r.solution(), kDefaultFingerprintSeed)); })); } + + if (absl::GetFlag(FLAGS_competition_mode)) { + model.GetOrCreate()->Register([last_solution_printer]() { + last_solution_printer->MaybePrintLastSolution(); + exit(EXIT_SUCCESS); + }); + } + const CpSolverResponse response = SolveCpModel(*cp_model, &model); if (!absl::GetFlag(FLAGS_output).empty()) { diff --git a/ortools/util/sigint.cc b/ortools/util/sigint.cc index 601f4983cc..bd4f40cfac 100644 --- a/ortools/util/sigint.cc +++ b/ortools/util/sigint.cc @@ -23,29 +23,47 @@ namespace operations_research { void SigintHandler::Register(const std::function& f) { handler_ = [this, f]() -> void { - const int num_sigint_calls = ++num_sigint_calls_; - if (num_sigint_calls < 3) { + const int num_calls = ++num_calls_; + if (num_calls < 3) { LOG(INFO) - << "^C pressed " << num_sigint_calls << " times. " + << "^C pressed " << num_calls << " times. " << "Interrupting the solver. Press 3 times to force termination."; - if (num_sigint_calls == 1) f(); - } else if (num_sigint_calls == 3) { + if (num_calls == 1) f(); + } else if (num_calls == 3) { LOG(INFO) << "^C pressed 3 times. Forcing termination."; exit(EXIT_FAILURE); } else { // Another thread is already running exit(), do nothing. } }; - signal(SIGINT, &ControlCHandler); + signal(SIGINT, &SigHandler); } // This method will be called by the system after the SIGINT signal. // The parameter is the signal received. -void SigintHandler::ControlCHandler(int sig) { handler_(); } +void SigintHandler::SigHandler(int) { handler_(); } -// Unregister the SIGINT handler. -SigintHandler::~SigintHandler() { signal(SIGINT, SIG_DFL); } +// Unregister the signal handlers. +SigintHandler::~SigintHandler() { + if (handler_ != nullptr) signal(SIGINT, SIG_DFL); +} thread_local std::function SigintHandler::handler_; +void SigtermHandler::Register(const std::function& f) { + handler_ = [f]() -> void { f(); }; + signal(SIGTERM, &SigHandler); +} + +// This method will be called by the system after the SIGTERM signal. +// The parameter is the signal received. +void SigtermHandler::SigHandler(int) { handler_(); } + +// Unregister the signal handlers. +SigtermHandler::~SigtermHandler() { + if (handler_ != nullptr) signal(SIGTERM, SIG_DFL); +} + +thread_local std::function SigtermHandler::handler_; + } // namespace operations_research diff --git a/ortools/util/sigint.h b/ortools/util/sigint.h index 7b3098033e..1d9fcd1b81 100644 --- a/ortools/util/sigint.h +++ b/ortools/util/sigint.h @@ -21,7 +21,7 @@ namespace operations_research { class SigintHandler { public: - SigintHandler() {} + SigintHandler() = default; ~SigintHandler(); // Catches ^C and call f() the first time this happen. If ^C is pressed 3 @@ -29,9 +29,23 @@ class SigintHandler { void Register(const std::function& f); private: - static void ControlCHandler(int s); + std::atomic num_calls_ = 0; - std::atomic num_sigint_calls_ = 0; + static void SigHandler(int s); + thread_local static std::function handler_; +}; + +class SigtermHandler { + public: + SigtermHandler() = default; + ~SigtermHandler(); + + // Catches SIGTERM and call f(). It is recommended that f() calls exit() to + // terminate the program. + void Register(const std::function& f); + + private: + static void SigHandler(int s); thread_local static std::function handler_; }; From c14e54cf82442a6ca327b99a070f3a5d50b8ef9d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 20 Jun 2025 15:11:37 +0200 Subject: [PATCH 112/509] [CP-SAT] print a solution after a SIGTERM; improve precedences --- ortools/sat/2d_distances_propagator.cc | 35 ++- ortools/sat/BUILD.bazel | 6 +- ortools/sat/cp_model_mapping.h | 1 - ortools/sat/cp_model_solver.cc | 45 +-- ortools/sat/cp_model_solver_helpers.cc | 94 +++++- ortools/sat/cp_model_solver_helpers.h | 8 + ortools/sat/integer_base.cc | 37 +-- ortools/sat/integer_base.h | 10 +- ortools/sat/precedences.cc | 402 ++++++++++++------------- ortools/sat/precedences.h | 269 +++++++++++------ ortools/sat/precedences_test.cc | 17 +- ortools/sat/sat_parameters.proto | 9 +- ortools/sat/sat_runner.cc | 180 ++++++++--- ortools/sat/synchronization.cc | 132 ++++++-- ortools/sat/synchronization.h | 99 +++++- ortools/sat/synchronization_test.cc | 20 +- ortools/util/sigint.cc | 36 ++- ortools/util/sigint.h | 20 +- ortools/util/sorted_interval_list.h | 4 +- 19 files changed, 934 insertions(+), 490 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 2053e29581..3d455420a5 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -13,6 +13,7 @@ #include "ortools/sat/2d_distances_propagator.h" +#include #include #include #include @@ -69,10 +70,12 @@ void Precedences2DPropagator::UpdateVarLookups() { void Precedences2DPropagator::CollectNewPairsOfBoxesWithNonTrivialDistance() { const absl::Span exprs = non_trivial_bounds_->GetLinear2WithPotentialNonTrivalBounds(); - if (exprs.size() != num_known_linear2_) { - VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " - << exprs.size(); + if (exprs.size() == num_known_linear2_) { + return; } + VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " + << exprs.size(); + const int previous_num_pairs = non_trivial_pairs_.size(); for (; num_known_linear2_ < exprs.size(); ++num_known_linear2_) { const LinearExpression2& positive_expr = exprs[num_known_linear2_]; LinearExpression2 negated_expr = positive_expr; @@ -111,7 +114,31 @@ void Precedences2DPropagator::CollectNewPairsOfBoxesWithNonTrivialDistance() { } } - gtl::STLSortAndRemoveDuplicates(&non_trivial_pairs_); + // Sort the new pairs. + std::sort(non_trivial_pairs_.begin() + previous_num_pairs, + non_trivial_pairs_.end()); + + // Remove duplicates from new pairs. + non_trivial_pairs_.erase( + std::unique(non_trivial_pairs_.begin() + previous_num_pairs, + non_trivial_pairs_.end()), + non_trivial_pairs_.end()); + + // Merge with the old pairs keeping sorted. + std::inplace_merge(non_trivial_pairs_.begin(), + non_trivial_pairs_.begin() + previous_num_pairs, + non_trivial_pairs_.end()); + + // Remove newly-added duplicates. + non_trivial_pairs_.erase( + std::unique(non_trivial_pairs_.begin(), non_trivial_pairs_.end()), + non_trivial_pairs_.end()); + + // Result should be sorted and without duplicates. + DCHECK(std::is_sorted(non_trivial_pairs_.begin(), non_trivial_pairs_.end())); + DCHECK(std::adjacent_find(non_trivial_pairs_.begin(), + non_trivial_pairs_.end()) == + non_trivial_pairs_.end()); } bool Precedences2DPropagator::Propagate() { diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 771c6c010a..4dc706800d 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -815,7 +815,6 @@ cc_library( deps = [ ":cp_model_cc_proto", ":cp_model_utils", - ":integer", ":integer_base", ":linear_constraint", ":model", @@ -2056,6 +2055,7 @@ cc_library( deps = [ ":clause", ":cp_constraints", + ":cp_model_mapping", ":integer", ":integer_base", ":model", @@ -4023,13 +4023,17 @@ cc_binary( "//ortools/base:path", "//ortools/util:file_util", "//ortools/util:logging", + "//ortools/util:sigint", "//ortools/util:sorted_interval_list", + "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/log", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/log:flags", "@abseil-cpp//absl/strings", "@abseil-cpp//absl/strings:str_format", + "@abseil-cpp//absl/synchronization", + "@abseil-cpp//absl/types:span", "@protobuf", ], ) diff --git a/ortools/sat/cp_model_mapping.h b/ortools/sat/cp_model_mapping.h index 1a82e4263e..5cf63e3e2f 100644 --- a/ortools/sat/cp_model_mapping.h +++ b/ortools/sat/cp_model_mapping.h @@ -24,7 +24,6 @@ #include "ortools/base/strong_vector.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_utils.h" -#include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 647d15efb0..e4c60ab3de 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -793,40 +793,6 @@ void LogSubsolverNames(absl::Span> subsolvers, SOLVER_LOG(logger, ""); } -void LogFinalStatistics(SharedClasses* shared) { - if (!shared->logger->LoggingIsEnabled()) return; - - shared->logger->FlushPendingThrottledLogs(/*ignore_rates=*/true); - SOLVER_LOG(shared->logger, ""); - - shared->stat_tables->Display(shared->logger); - shared->response->DisplayImprovementStatistics(); - - std::vector> table; - table.push_back({"Solution repositories", "Added", "Queried", "Synchro"}); - shared->response->SolutionPool().AddTableStats(&table); - table.push_back(shared->ls_hints->TableLineStats()); - if (shared->lp_solutions != nullptr) { - table.push_back(shared->lp_solutions->TableLineStats()); - } - if (shared->incomplete_solutions != nullptr) { - table.push_back(shared->incomplete_solutions->TableLineStats()); - } - SOLVER_LOG(shared->logger, FormatTable(table)); - - if (shared->bounds) { - shared->bounds->LogStatistics(shared->logger); - } - - if (shared->clauses) { - shared->clauses->LogStatistics(shared->logger); - } - - // Extra logging if needed. Note that these are mainly activated on - // --vmodule *some_file*=1 and are here for development. - shared->stats->Log(shared->logger); -} - void LaunchSubsolvers(const SatParameters& params, SharedClasses* shared, std::vector>& subsolvers, absl::Span ignored) { @@ -868,7 +834,7 @@ void LaunchSubsolvers(const SatParameters& params, SharedClasses* shared, for (int i = 0; i < subsolvers.size(); ++i) { subsolvers[i].reset(); } - LogFinalStatistics(shared); + shared->LogFinalStatistics(); } bool VarIsFixed(const CpModelProto& model_proto, int i) { @@ -1124,13 +1090,18 @@ class FullProblemSolver : public SubSolver { shared_->model_proto, shared_->bounds.get(), &local_model_); } + if (shared_->linear2_bounds != nullptr) { + RegisterLinear2BoundsImport(shared_->linear2_bounds.get(), + &local_model_); + } + // Note that this is done after the loading, so we will never export // problem clauses. if (shared_->clauses != nullptr) { const int id = shared_->clauses->RegisterNewId( + local_model_.Name(), /*may_terminate_early=*/stop_at_first_solution_ && - local_model_.GetOrCreate()->has_objective()); - shared_->clauses->SetWorkerNameForId(id, local_model_.Name()); + local_model_.GetOrCreate()->has_objective()); RegisterClausesLevelZeroImport(id, shared_->clauses.get(), &local_model_); diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 083d657587..3e22dabbf5 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -847,6 +847,59 @@ void RegisterVariableBoundsLevelZeroImport( import_level_zero_bounds); } +void RegisterLinear2BoundsImport(SharedLinear2Bounds* shared_linear2_bounds, + Model* model) { + CHECK(shared_linear2_bounds != nullptr); + auto* cp_model_mapping = model->GetOrCreate(); + auto* root_linear2 = model->GetOrCreate(); + auto* sat_solver = model->GetOrCreate(); + const int import_id = + shared_linear2_bounds->RegisterNewImportId(model->Name()); + const auto& import_function = [import_id, shared_linear2_bounds, root_linear2, + cp_model_mapping, sat_solver, model]() { + const auto new_bounds = + shared_linear2_bounds->NewlyUpdatedBounds(import_id); + int num_imported = 0; + for (const auto& [proto_expr, bounds] : new_bounds) { + // Lets create the corresponding LinearExpression2. + LinearExpression2 expr; + for (const int i : {0, 1}) { + expr.vars[i] = cp_model_mapping->Integer(proto_expr.vars[i]); + expr.coeffs[i] = proto_expr.coeffs[i]; + } + const auto [lb, ub] = bounds; + const auto [lb_added, ub_added] = root_linear2->Add(expr, lb, ub); + if (!lb_added && !ub_added) continue; + ++num_imported; + + // TODO(user): Is it a good idea to add the linear constraint ? + // We might have many redundant linear2 relations that don't need + // propagation when we have chains of precedences. The root_linear2 should + // be up-to-date with transitive closure to avoid adding such relations + // (recompute it at level zero before this?). + // + // TODO(user): use IntegerValure directly in + // AddWeightedSumGreaterOrEqual() or use a lower-level API. + const std::vector coeffs = {expr.coeffs[0].value(), + expr.coeffs[1].value()}; + if (lb_added) { + AddWeightedSumGreaterOrEqual({}, absl::MakeSpan(expr.vars, 2), coeffs, + lb.value(), model); + if (sat_solver->ModelIsUnsat()) return false; + } + if (ub_added) { + AddWeightedSumLowerOrEqual({}, absl::MakeSpan(expr.vars, 2), coeffs, + ub.value(), model); + if (sat_solver->ModelIsUnsat()) return false; + } + } + shared_linear2_bounds->NotifyNumImported(import_id, num_imported); + return true; + }; + model->GetOrCreate()->callbacks.push_back( + import_function); +} + // Registers a callback that will report improving objective best bound. // It will be called each time new objective bound are propagated at level zero. void RegisterObjectiveBestBoundExport( @@ -2086,6 +2139,10 @@ SharedClasses::SharedClasses(const CpModelProto* proto, Model* global_model) bounds->LoadDebugSolution(response->DebugSolution()); } + if (params.share_linear2_bounds()) { + linear2_bounds = std::make_unique(); + } + // Create extra shared classes if needed. Note that while these parameters // are true by default, we disable them if we don't have enough workers for // them in AdaptGlobalParameters(). @@ -2120,7 +2177,7 @@ void SharedClasses::RegisterSharedClassesInLocalModel(Model* local_model) { local_model->Register(stat_tables); // TODO(user): Use parameters and not the presence/absence of these class - // to decide when to use them. + // to decide when to use them? this is not clear. if (lp_solutions != nullptr) { local_model->Register(lp_solutions.get()); } @@ -2134,6 +2191,9 @@ void SharedClasses::RegisterSharedClassesInLocalModel(Model* local_model) { if (clauses != nullptr) { local_model->Register(clauses.get()); } + if (linear2_bounds != nullptr) { + local_model->Register(linear2_bounds.get()); + } } bool SharedClasses::SearchIsDone() { @@ -2146,5 +2206,37 @@ bool SharedClasses::SearchIsDone() { return false; } +void SharedClasses::LogFinalStatistics() { + if (!logger->LoggingIsEnabled()) return; + + logger->FlushPendingThrottledLogs(/*ignore_rates=*/true); + SOLVER_LOG(logger, ""); + + stat_tables->Display(logger); + response->DisplayImprovementStatistics(); + + std::vector> table; + table.push_back({"Solution repositories", "Added", "Queried", "Synchro"}); + response->SolutionPool().AddTableStats(&table); + table.push_back(ls_hints->TableLineStats()); + if (lp_solutions != nullptr) { + table.push_back(lp_solutions->TableLineStats()); + } + if (incomplete_solutions != nullptr) { + table.push_back(incomplete_solutions->TableLineStats()); + } + SOLVER_LOG(logger, FormatTable(table)); + + // TODO(user): we can combine the "bounds table" into one for shorter logs. + if (bounds != nullptr) bounds->LogStatistics(logger); + if (linear2_bounds != nullptr) linear2_bounds->LogStatistics(logger); + + if (clauses != nullptr) clauses->LogStatistics(logger); + + // Extra logging if needed. Note that these are mainly activated on + // --vmodule *some_file*=1 and are here for development. + stats->Log(logger); +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_solver_helpers.h b/ortools/sat/cp_model_solver_helpers.h index 1f46f77495..af00cb3213 100644 --- a/ortools/sat/cp_model_solver_helpers.h +++ b/ortools/sat/cp_model_solver_helpers.h @@ -60,12 +60,15 @@ struct SharedClasses { std::unique_ptr lp_solutions; std::unique_ptr incomplete_solutions; std::unique_ptr clauses; + std::unique_ptr linear2_bounds; // call local_model->Register() on most of the class here, this allow to // more easily depends on one of the shared class deep within the solver. void RegisterSharedClassesInLocalModel(Model* local_model); bool SearchIsDone(); + + void LogFinalStatistics(); }; // Loads a CpModelProto inside the given model. @@ -119,6 +122,11 @@ int RegisterClausesLevelZeroImport(int id, SharedClausesManager* shared_clauses_manager, Model* model); +// This will register a level zero callback to imports new linear2 from the +// SharedLinear2Bounds. +void RegisterLinear2BoundsImport(SharedLinear2Bounds* shared_linear2_bounds, + Model* model); + void PostsolveResponseWrapper(const SatParameters& params, int num_variable_in_original_model, const CpModelProto& mapping_proto, diff --git a/ortools/sat/integer_base.cc b/ortools/sat/integer_base.cc index 740c02bd0d..29a7d8d186 100644 --- a/ortools/sat/integer_base.cc +++ b/ortools/sat/integer_base.cc @@ -84,22 +84,18 @@ bool LinearExpression2::NegateForCanonicalization() { } bool LinearExpression2::CanonicalizeAndUpdateBounds(IntegerValue& lb, - IntegerValue& ub, - bool allow_negation) { + IntegerValue& ub) { SimpleCanonicalization(); if (coeffs[0] == 0 || coeffs[1] == 0) return false; // abort. - bool negated = false; - if (allow_negation) { - negated = NegateForCanonicalization(); - if (negated) { - // We need to be able to negate without overflow. - CHECK_GE(lb, kMinIntegerValue); - CHECK_LE(ub, kMaxIntegerValue); - std::swap(lb, ub); - lb = -lb; - ub = -ub; - } + const bool negated = NegateForCanonicalization(); + if (negated) { + // We need to be able to negate without overflow. + CHECK_GE(lb, kMinIntegerValue); + CHECK_LE(ub, kMaxIntegerValue); + std::swap(lb, ub); + lb = -lb; + ub = -ub; } // Do gcd division. @@ -144,8 +140,7 @@ std::pair BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) { - const bool negated = - expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/true); + const bool negated = expr.CanonicalizeAndUpdateBounds(lb, ub); // We only store proper linear2. if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { @@ -184,7 +179,7 @@ BestBinaryRelationBounds::Add(LinearExpression2 expr, IntegerValue lb, RelationStatus BestBinaryRelationBounds::GetStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const { - expr.CanonicalizeAndUpdateBounds(lb, ub, /*allow_negation=*/true); + expr.CanonicalizeAndUpdateBounds(lb, ub); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { return RelationStatus::IS_UNKNOWN; } @@ -245,14 +240,4 @@ BestBinaryRelationBounds::GetSortedNonTrivialBounds() const { return root_relations_sorted; } -void BestBinaryRelationBounds::AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const { - for (const auto& [expr, unused] : best_bounds_) { - if (!var_set[PositiveVariable(expr.vars[0])]) continue; - if (!var_set[PositiveVariable(expr.vars[1])]) continue; - result->push_back(expr); - } -} - } // namespace operations_research::sat diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index 9eb30219cc..ba4f04cdff 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -384,8 +384,7 @@ struct LinearExpression2 { // accordingly. This is the same as SimpleCanonicalization(), DivideByGcd() // and the NegateForCanonicalization() with a proper updates of the bounds. // Returns whether the expression was negated. - bool CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub, - bool allow_negation = false); + bool CanonicalizeAndUpdateBounds(IntegerValue& lb, IntegerValue& ub); // Divides the expression by the gcd of both coefficients, and returns it. // Note that we always return something >= 1 even if both coefficients are @@ -493,7 +492,7 @@ class BestBinaryRelationBounds { IntegerValue GetUpperBound(LinearExpression2 expr) const; // Same as GetUpperBound() but assume the expression is already canonicalized. - // This is slighlty faster. + // This is slightly faster. IntegerValue UpperBoundWhenCanonicalized(LinearExpression2 expr) const; int64_t num_bounds() const { return best_bounds_.size(); } @@ -504,11 +503,6 @@ class BestBinaryRelationBounds { std::vector> GetSortedNonTrivialBounds() const; - // Note that this is non-deterministic and in O(num_relations). - void AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const; - private: // The best bound on the given "canonicalized" expression. absl::flat_hash_map> diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 017cc45550..5618fb304a 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -54,6 +55,53 @@ namespace operations_research { namespace sat { +LinearExpression2Index Linear2WithPotentialNonTrivalBounds::AddOrGet( + LinearExpression2 original_expr) { + LinearExpression2 expr = original_expr; + DCHECK(expr.IsCanonicalized()); + DCHECK_EQ(expr.DivideByGcd(), 1); + DCHECK_NE(expr.coeffs[0], 0); + DCHECK_NE(expr.coeffs[1], 0); + const bool negated = expr.NegateForCanonicalization(); + auto [it, inserted] = expr_to_index_.insert({expr, exprs_.size()}); + if (inserted) { + CHECK_LT(2 * exprs_.size() + 1, + std::numeric_limits::max()); + exprs_.push_back(expr); + } + const LinearExpression2Index result = + negated ? NegationOf(LinearExpression2Index(2 * it->second)) + : LinearExpression2Index(2 * it->second); + + if (!inserted) return result; + + // Update our special coeff=1 lookup table. + if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { + // +2 to handle possible negation. + const int new_size = + std::max(expr.vars[0].value(), expr.vars[1].value()) + 2; + if (new_size > coeff_one_var_lookup_.size()) { + coeff_one_var_lookup_.resize(new_size); + } + LinearExpression2 neg_expr = original_expr; + neg_expr.Negate(); + coeff_one_var_lookup_[original_expr.vars[0]].push_back(result); + coeff_one_var_lookup_[original_expr.vars[1]].push_back(result); + coeff_one_var_lookup_[neg_expr.vars[1]].push_back(NegationOf(result)); + coeff_one_var_lookup_[neg_expr.vars[0]].push_back(NegationOf(result)); + } + + // Update our per-variable and per-pair lookup tables. + IntegerVariable var1 = PositiveVariable(expr.vars[0]); + IntegerVariable var2 = PositiveVariable(expr.vars[1]); + if (var1 > var2) std::swap(var1, var2); + var_pair_to_bounds_[{var1, var2}].push_back(result); + var_to_bounds_[var1].push_back(result); + var_to_bounds_[var2].push_back(result); + + return result; +} + void Linear2Watcher::NotifyBoundChanged(LinearExpression2 expr) { DCHECK(expr.IsCanonicalized()); DCHECK_EQ(expr.DivideByGcd(), 1); @@ -75,115 +123,51 @@ int64_t Linear2Watcher::VarTimestamp(IntegerVariable var) { return var < var_timestamp_.size() ? var_timestamp_[var] : 0; } -std::pair RootLevelLinear2Bounds::Add(LinearExpression2 expr, - IntegerValue lb, - IntegerValue ub) { - using AddResult = BestBinaryRelationBounds::AddResult; - const IntegerValue zero_level_lb = integer_trail_->LevelZeroLowerBound(expr); +bool RootLevelLinear2Bounds::AddUpperBound(LinearExpression2Index index, + IntegerValue ub) { + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); const IntegerValue zero_level_ub = integer_trail_->LevelZeroUpperBound(expr); - if (lb <= zero_level_lb && ub >= zero_level_ub) { - return {false, false}; - } - // Don't store one of the bounds if it is trivial. - if (lb <= zero_level_lb) { - lb = kMinIntegerValue; - } if (ub >= zero_level_ub) { - ub = kMaxIntegerValue; + return false; } - expr.CanonicalizeAndUpdateBounds(lb, ub); - const auto [status_lb, status_ub] = root_level_relations_.Add(expr, lb, ub); + if (best_upper_bounds_.size() <= index) { + best_upper_bounds_.resize(index.value() + 1, kMaxIntegerValue); + } + if (ub >= best_upper_bounds_[index]) { + return false; + } + best_upper_bounds_[index] = ub; - const bool lb_restricted = - status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED; - const bool ub_restricted = - status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED; - if (!lb_restricted && !ub_restricted) return {false, false}; - - non_trivial_bounds_->AddOrGet(expr); ++num_updates_; linear2_watcher_->NotifyBoundChanged(expr); - // Update our special coeff=1 lookup table. - if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { - // +2 to handle possible negation. - const int new_size = - std::max(expr.vars[0].value(), expr.vars[1].value()) + 2; - if (new_size > coeff_one_var_lookup_.size()) { - coeff_one_var_lookup_.resize(new_size); - } - if (status_lb == AddResult::ADDED) { - // First time added to root_level_relations_. - coeff_one_var_lookup_[NegationOf(expr.vars[0])].push_back( - NegationOf(expr.vars[1])); - coeff_one_var_lookup_[NegationOf(expr.vars[1])].push_back( - NegationOf(expr.vars[0])); - } - if (status_ub == AddResult::ADDED) { - coeff_one_var_lookup_[expr.vars[0]].push_back(expr.vars[1]); - coeff_one_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); + // Share. + // + // TODO(user): It seems we could change the canonicalization to only use + // positive variable? that would simplify a bit the code here and not make it + // worse elsewhere? + if (shared_linear2_bounds_ != nullptr) { + const IntegerValue lb = -LevelZeroUpperBound(NegationOf(index)); + const int proto_var0 = + cp_model_mapping_->GetProtoVariableFromIntegerVariable( + PositiveVariable(expr.vars[0])); + const int proto_var1 = + cp_model_mapping_->GetProtoVariableFromIntegerVariable( + PositiveVariable(expr.vars[1])); + if (proto_var0 >= 0 && proto_var1 >= 0) { + // This is also a relation between cp_model proto variable. Share it! + // Note that since expr is canonicalized, this one should too. + SharedLinear2Bounds::Key key; + key.vars[0] = proto_var0; + key.coeffs[0] = + VariableIsPositive(expr.vars[0]) ? expr.coeffs[0] : -expr.coeffs[0]; + key.vars[1] = proto_var1; + key.coeffs[1] = + VariableIsPositive(expr.vars[1]) ? expr.coeffs[1] : -expr.coeffs[1]; + shared_linear2_bounds_->Add(shared_linear2_bounds_id_, key, lb, ub); } } - - // Update our per-variable and per-pair lookup tables. - IntegerVariable var1 = PositiveVariable(expr.vars[0]); - IntegerVariable var2 = PositiveVariable(expr.vars[1]); - if (var1 > var2) std::swap(var1, var2); - - auto [it_var, inserted] = var_to_bounds_vector_index_.insert({expr, {0, 0}}); - for (const IntegerVariable var : {var1, var2}) { - auto& var_bounds = var_to_bounds_[var]; - if (inserted) { - if (var == var1) { - it_var->second.first = var_bounds.size(); - } else { - it_var->second.second = var_bounds.size(); - } - var_bounds.push_back({expr, lb, ub}); - } else { - const int index = - (var == var1) ? it_var->second.first : it_var->second.second; - DCHECK_LT(index, var_bounds.size()); - std::tuple& var_bound = - var_bounds[index]; - if (status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED) { - std::get<1>(var_bound) = lb; - } - if (status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED) { - std::get<2>(var_bound) = ub; - } - } - } - - auto [it_pair, pair_inserted] = - var_pair_to_bounds_vector_index_.insert({expr, 0}); - DCHECK_EQ(inserted, pair_inserted); - auto& pair_bounds = var_pair_to_bounds_[{var1, var2}]; - if (pair_inserted) { - it_pair->second = pair_bounds.size(); - pair_bounds.push_back({expr, lb, ub}); - } else { - const int index = it_pair->second; - DCHECK_LT(index, pair_bounds.size()); - std::tuple& pair_bound = - pair_bounds[index]; - if (status_lb == AddResult::ADDED || status_lb == AddResult::UPDATED) { - std::get<1>(pair_bound) = lb; - } - if (status_ub == AddResult::ADDED || status_ub == AddResult::UPDATED) { - std::get<2>(pair_bound) = ub; - } - } - - return {lb_restricted, ub_restricted}; -} - -IntegerValue RootLevelLinear2Bounds::LevelZeroUpperBound( - LinearExpression2 expr) const { - // TODO(user): Remove the expression from the root_level_relations_ if the - // zero-level bound got more restrictive. - return std::min(integer_trail_->LevelZeroUpperBound(expr), - root_level_relations_.GetUpperBound(expr)); + return true; } RootLevelLinear2Bounds::~RootLevelLinear2Bounds() { @@ -209,38 +193,38 @@ RelationStatus RootLevelLinear2Bounds::GetLevelZeroStatus( } IntegerValue RootLevelLinear2Bounds::GetUpperBoundNoTrail( - LinearExpression2 expr) const { - DCHECK_EQ(expr.DivideByGcd(), 1); - DCHECK(expr.IsCanonicalized()); - return root_level_relations_.UpperBoundWhenCanonicalized(expr); + LinearExpression2Index index) const { + if (best_upper_bounds_.size() <= index) { + return kMaxIntegerValue; + } + return best_upper_bounds_[index]; } std::vector> RootLevelLinear2Bounds::GetSortedNonTrivialUpperBounds() const { - std::vector> result = - root_level_relations_.GetSortedNonTrivialUpperBounds(); - int new_size = 0; - for (int i = 0; i < result.size(); ++i) { - const auto& [expr, ub] = result[i]; + std::vector> result; + for (LinearExpression2Index index = LinearExpression2Index{0}; + index < best_upper_bounds_.size(); ++index) { + const IntegerValue ub = best_upper_bounds_[index]; + if (ub == kMaxIntegerValue) continue; + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); if (ub < integer_trail_->LevelZeroUpperBound(expr)) { - result[new_size] = {expr, ub}; - ++new_size; + result.push_back({expr, ub}); } } - result.resize(new_size); + std::sort(result.begin(), result.end()); return result; } -// Return a list of (lb <= expr <= ub), with expr.vars[0] = var, where at -// least one of the bounds is non-trivial and the potential other non-trivial -// bound is tight. std::vector> RootLevelLinear2Bounds::GetAllBoundsContainingVariable( IntegerVariable var) const { std::vector> result; - auto it = var_to_bounds_.find(PositiveVariable(var)); - if (it == var_to_bounds_.end()) return {}; - for (const auto& [expr, lb, ub] : it->second) { + for (const LinearExpression2Index index : + non_trivial_bounds_->GetAllLinear2ContainingVariable(var)) { + const IntegerValue lb = -GetUpperBoundNoTrail(NegationOf(index)); + const IntegerValue ub = GetUpperBoundNoTrail(index); + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); if (lb <= trail_lb && ub >= trail_ub) continue; @@ -271,12 +255,11 @@ std::vector> RootLevelLinear2Bounds::GetAllBoundsContainingVariables( IntegerVariable var1, IntegerVariable var2) const { std::vector> result; - std::pair key = {PositiveVariable(var1), - PositiveVariable(var2)}; - if (key.first > key.second) std::swap(key.first, key.second); - auto it = var_pair_to_bounds_.find(key); - if (it == var_pair_to_bounds_.end()) return {}; - for (const auto& [expr, lb, ub] : it->second) { + for (const LinearExpression2Index index : + non_trivial_bounds_->GetAllLinear2ContainingVariables(var1, var2)) { + const IntegerValue lb = -GetUpperBoundNoTrail(NegationOf(index)); + const IntegerValue ub = GetUpperBoundNoTrail(index); + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); if (lb <= trail_lb && ub >= trail_ub) continue; @@ -304,10 +287,25 @@ RootLevelLinear2Bounds::GetAllBoundsContainingVariables( return result; } -void RootLevelLinear2Bounds::AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const { - root_level_relations_.AppendAllExpressionContaining(var_set, result); +std::vector +RootLevelLinear2Bounds::GetVariablesInSimpleRelation( + IntegerVariable var) const { + std::vector result; + for (const LinearExpression2Index index : + non_trivial_bounds_->GetAllLinear2ContainingVariableWithCoeffOne(var)) { + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const IntegerVariable other = + (expr.vars[0] == var ? expr.vars[1] : expr.vars[0]); + DCHECK_EQ(expr.coeffs[0], 1); + DCHECK_EQ(expr.coeffs[1], 1); + DCHECK((expr.vars[0] == var && expr.vars[1] == other) || + (expr.vars[0] == other && expr.vars[1] == var)); + if (GetUpperBoundNoTrail(index) < + integer_trail_->LevelZeroUpperBound(expr)) { + result.push_back(other); + } + } + return result; } EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { @@ -319,13 +317,8 @@ EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { } void EnforcedLinear2Bounds::PushConditionalRelation( - absl::Span enforcements, LinearExpression2 expr, + absl::Span enforcements, LinearExpression2Index index, IntegerValue rhs) { - expr.SimpleCanonicalization(); - if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { - return; - } - // This must be currently true. if (DEBUG_MODE) { for (const Literal l : enforcements) { @@ -334,24 +327,25 @@ void EnforcedLinear2Bounds::PushConditionalRelation( } if (enforcements.empty() || trail_->CurrentDecisionLevel() == 0) { - root_level_bounds_->AddUpperBound(expr, rhs); + root_level_bounds_->AddUpperBound(index, rhs); return; } - const IntegerValue gcd = expr.DivideByGcd(); - rhs = FloorRatio(rhs, gcd); - - if (rhs >= root_level_bounds_->LevelZeroUpperBound(expr)) return; + if (rhs >= root_level_bounds_->LevelZeroUpperBound(index)) return; + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); linear2_watcher_->NotifyBoundChanged(expr); ++num_conditional_relation_updates_; const int new_index = conditional_stack_.size(); - const auto [it, inserted] = conditional_relations_.insert({expr, new_index}); - if (inserted) { - non_trivial_bounds_->AddOrGet(expr); + if (conditional_relations_.size() <= index) { + conditional_relations_.resize(index.value() + 1, -1); + } + if (conditional_relations_[index] == -1) { + conditional_relations_[index] = new_index; CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, expr, enforcements); + conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, index, + enforcements); if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { const int new_size = @@ -363,13 +357,13 @@ void EnforcedLinear2Bounds::PushConditionalRelation( conditional_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); } } else { - const int prev_entry = it->second; + const int prev_entry = conditional_relations_[index]; if (rhs >= conditional_stack_[prev_entry].rhs) return; // Update. - it->second = new_index; + conditional_relations_[index] = new_index; CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(prev_entry, rhs, expr, enforcements); + conditional_stack_.emplace_back(prev_entry, rhs, index, enforcements); } } @@ -392,15 +386,15 @@ void EnforcedLinear2Bounds::SetLevel(int level) { if (back.prev_entry != -1) { conditional_relations_[back.key] = back.prev_entry; } else { - conditional_relations_.erase(back.key); + conditional_relations_[back.key] = -1; + const LinearExpression2 expr = + non_trivial_bounds_->GetExpression(back.key); - if (back.key.coeffs[0] == 1 && back.key.coeffs[1] == 1) { - DCHECK_EQ(conditional_var_lookup_[back.key.vars[0]].back(), - back.key.vars[1]); - DCHECK_EQ(conditional_var_lookup_[back.key.vars[1]].back(), - back.key.vars[0]); - conditional_var_lookup_[back.key.vars[0]].pop_back(); - conditional_var_lookup_[back.key.vars[1]].pop_back(); + if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { + DCHECK_EQ(conditional_var_lookup_[expr.vars[0]].back(), expr.vars[1]); + DCHECK_EQ(conditional_var_lookup_[expr.vars[1]].back(), expr.vars[0]); + conditional_var_lookup_[expr.vars[0]].pop_back(); + conditional_var_lookup_[expr.vars[1]].pop_back(); } } conditional_stack_.pop_back(); @@ -410,42 +404,42 @@ void EnforcedLinear2Bounds::SetLevel(int level) { } void EnforcedLinear2Bounds::AddReasonForUpperBoundLowerThan( - LinearExpression2 expr, IntegerValue ub, + LinearExpression2Index index, IntegerValue ub, std::vector* literal_reason, std::vector* /*unused*/) const { - expr.SimpleCanonicalization(); - if (ub >= root_level_bounds_->LevelZeroUpperBound(expr)) return; - const IntegerValue gcd = expr.DivideByGcd(); - const auto it = conditional_relations_.find(expr); - DCHECK(it != conditional_relations_.end()); + if (ub >= root_level_bounds_->LevelZeroUpperBound(index)) return; + DCHECK_LT(index, conditional_relations_.size()); + const int entry_index = conditional_relations_[index]; + DCHECK_NE(entry_index, -1); - const ConditionalEntry& entry = conditional_stack_[it->second]; + const ConditionalEntry& entry = conditional_stack_[entry_index]; if (DEBUG_MODE) { for (const Literal l : entry.enforcements) { CHECK(trail_->Assignment().LiteralIsTrue(l)); } } - DCHECK_LE(CapProdI(gcd, entry.rhs), ub); + DCHECK_LE(entry.rhs, ub); for (const Literal l : entry.enforcements) { literal_reason->push_back(l.Negated()); } } IntegerValue EnforcedLinear2Bounds::GetUpperBoundFromEnforced( - LinearExpression2 expr) const { - DCHECK_EQ(expr.DivideByGcd(), 1); - DCHECK(expr.IsCanonicalized()); - const auto it = conditional_relations_.find(expr); - if (it == conditional_relations_.end()) { + LinearExpression2Index index) const { + if (index >= conditional_relations_.size()) { + return kMaxIntegerValue; + } + const int entry_index = conditional_relations_[index]; + if (entry_index == -1) { return kMaxIntegerValue; } else { - const ConditionalEntry& entry = conditional_stack_[it->second]; + const ConditionalEntry& entry = conditional_stack_[entry_index]; if (DEBUG_MODE) { for (const Literal l : entry.enforcements) { CHECK(trail_->Assignment().LiteralIsTrue(l)); } } - DCHECK_LT(entry.rhs, root_level_bounds_->LevelZeroUpperBound(expr)); + DCHECK_LT(entry.rhs, root_level_bounds_->LevelZeroUpperBound(index)); return entry.rhs; } } @@ -569,7 +563,7 @@ void TransitivePrecedencesEvaluator::Build() { } VLOG(2) << "Full precedences. Work=" << work - << " Relations=" << root_level_bounds_->num_bounds(); + << " Relations=" << root_relations_sorted.size(); } void TransitivePrecedencesEvaluator::ComputeFullPrecedences( @@ -738,16 +732,6 @@ void EnforcedLinear2Bounds::CollectPrecedences( } } -void EnforcedLinear2Bounds::AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const { - for (const auto& entry : conditional_stack_) { - if (!var_set[PositiveVariable(entry.key.vars[0])]) continue; - if (!var_set[PositiveVariable(entry.key.vars[1])]) continue; - result->push_back(entry.key); - } -} - namespace { void AppendLowerBoundReasonIfValid(IntegerVariable var, @@ -1828,6 +1812,7 @@ Linear2BoundsFromLinear3::Linear2BoundsFromLinear3(Model* model) bool Linear2BoundsFromLinear3::AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub) { expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; // At level zero, just add it to root_level_bounds_. if (trail_->CurrentDecisionLevel() == 0) { @@ -1900,16 +1885,6 @@ void Linear2BoundsFromLinear3::AddReasonForUpperBoundLowerThan( integer_reason->push_back(affine.LowerOrEqual(CapProdI(ub + 1, divisor) - 1)); } -void Linear2BoundsFromLinear3::AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const { - for (const auto& [expr, unused] : best_affine_ub_) { - if (!var_set[PositiveVariable(expr.vars[0])]) continue; - if (!var_set[PositiveVariable(expr.vars[1])]) continue; - result->push_back(expr); - } -} - IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { expr.SimpleCanonicalization(); if (expr.coeffs[0] == 0) { @@ -1918,8 +1893,11 @@ IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { DCHECK_NE(expr.coeffs[1], 0); const IntegerValue gcd = expr.DivideByGcd(); IntegerValue ub = integer_trail_->UpperBound(expr); - ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(expr)); - ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(expr)); + const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); + if (index != kNoLinearExpression2Index) { + ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(index)); + ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(index)); + } ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); return CapProdI(gcd, ub); } @@ -1932,8 +1910,12 @@ IntegerValue Linear2Bounds::NonTrivialUpperBoundForGcd1( } DCHECK_NE(expr.coeffs[1], 0); DCHECK_EQ(1, expr.DivideByGcd()); - IntegerValue ub = root_level_bounds_->GetUpperBoundNoTrail(expr); - ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(expr)); + IntegerValue ub = kMaxIntegerValue; + const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); + if (index != kNoLinearExpression2Index) { + ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(index)); + ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(index)); + } ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); return ub; } @@ -1942,20 +1924,25 @@ void Linear2Bounds::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const { - expr.SimpleCanonicalization(); - const IntegerValue gcd = expr.DivideByGcd(); - ub = FloorRatio(ub, gcd); DCHECK_LE(UpperBound(expr), ub); // Explanation are by order of preference, with no reason needed first. - if (root_level_bounds_->LevelZeroUpperBound(expr) <= ub) { + if (integer_trail_->LevelZeroUpperBound(expr) <= ub) { return; } - + expr.SimpleCanonicalization(); + const IntegerValue gcd = expr.DivideByGcd(); + ub = FloorRatio(ub, gcd); + const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); // This one is a single literal. - if (enforced_bounds_->GetUpperBoundFromEnforced(expr) <= ub) { - return enforced_bounds_->AddReasonForUpperBoundLowerThan( - expr, ub, literal_reason, integer_reason); + if (index != kNoLinearExpression2Index) { + if (root_level_bounds_->GetUpperBoundNoTrail(index) <= ub) { + return; + } + if (enforced_bounds_->GetUpperBoundFromEnforced(index) <= ub) { + return enforced_bounds_->AddReasonForUpperBoundLowerThan( + index, ub, literal_reason, integer_reason); + } } // This one is a single var upper bound. @@ -1975,16 +1962,5 @@ void Linear2Bounds::AddReasonForUpperBoundLowerThan( integer_reason); } -absl::Span -Linear2Bounds::GetAllExpressionsWithPotentialNonTrivialBounds( - Bitset64::ConstView var_set) const { - tmp_expressions_.clear(); - root_level_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); - enforced_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); - linear3_bounds_->AppendAllExpressionContaining(var_set, &tmp_expressions_); - gtl::STLSortAndRemoveDuplicates(&tmp_expressions_); - return tmp_expressions_; -} - } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 392943ce63..586b28dd89 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -14,10 +14,10 @@ #ifndef OR_TOOLS_SAT_PRECEDENCES_H_ #define OR_TOOLS_SAT_PRECEDENCES_H_ +#include #include #include #include -#include #include #include #include @@ -31,6 +31,7 @@ #include "absl/types/span.h" #include "ortools/base/strong_vector.h" #include "ortools/graph/graph.h" +#include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" @@ -70,23 +71,14 @@ class Linear2WithPotentialNonTrivalBounds { // Returns a never-changing index for the given linear expression. // The expression must already be canonicalized and divided by its GCD. - LinearExpression2Index AddOrGet(LinearExpression2 expr) { - DCHECK(expr.IsCanonicalized()); - DCHECK_EQ(expr.DivideByGcd(), 1); - const bool negated = expr.NegateForCanonicalization(); - auto [it, inserted] = expr_to_index_.insert({expr, exprs_.size()}); - if (inserted) { - CHECK_LT(2 * exprs_.size() + 1, - std::numeric_limits::max()); - exprs_.push_back(expr); - } - const LinearExpression2Index positive_index(2 * it->second); - if (negated) { - return NegationOf(positive_index); - } else { - return positive_index; - } - } + LinearExpression2Index AddOrGet(LinearExpression2 expr); + + // Returns a never-changing index for the given linear expression if it is + // potentially non-trivial, otherwise returns kNoLinearExpression2Index. The + // expression must already be canonicalized and divided by its GCD. + LinearExpression2Index GetIndex(LinearExpression2 expr) const; + + LinearExpression2 GetExpression(LinearExpression2Index index) const; // Return all positive linear2 expressions that have a potentially non-trivial // bound. When calling this code it is often a good idea to check both the @@ -97,9 +89,45 @@ class Linear2WithPotentialNonTrivalBounds { return exprs_; } + // Return a list of all potentially non-trivial LinearExpression2Indexes + // containing a given variable. + absl::Span GetAllLinear2ContainingVariable( + IntegerVariable var) const; + + // Return a list of all potentially non-trivial LinearExpression2Indexes + // containing a given pair of variables. + absl::Span GetAllLinear2ContainingVariables( + IntegerVariable var1, IntegerVariable var2) const; + + // For a given variable `var`, return all linear expressions with both + // coefficients 1 that have a potentially non trivial upper bound. For + // convenience it also returns the other variable to cheaply build the + // linear2. Note that using negation one can also recover x + y >= lb and x - + // y <= ub. + absl::Span + GetAllLinear2ContainingVariableWithCoeffOne(IntegerVariable var) const { + if (var >= coeff_one_var_lookup_.size()) return {}; + return coeff_one_var_lookup_[var]; + } + private: - util_intops::StrongVector exprs_; + std::vector exprs_; absl::flat_hash_map expr_to_index_; + + // Lookup table to find all the LinearExpression2 with a given variable and + // having both coefficient 1. + util_intops::StrongVector> + coeff_one_var_lookup_; + + // Map to implement GetAllBoundsContainingVariable(). + absl::flat_hash_map> + var_to_bounds_; + // Map to implement GetAllBoundsContainingVariables(). + absl::flat_hash_map, + absl::InlinedVector> + var_pair_to_bounds_; }; // Simple "watcher" class that will be notified if a linear2 bound changed. It @@ -138,7 +166,13 @@ class RootLevelLinear2Bounds { linear2_watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), non_trivial_bounds_( - model->GetOrCreate()) {} + model->GetOrCreate()), + cp_model_mapping_(model->GetOrCreate()), + shared_linear2_bounds_(model->Mutable()), + shared_linear2_bounds_id_( + shared_linear2_bounds_ == nullptr + ? 0 + : shared_linear2_bounds_->RegisterNewId(model->Name())) {} ~RootLevelLinear2Bounds(); @@ -147,16 +181,49 @@ class RootLevelLinear2Bounds { // Returns a pair saying whether the lower/upper bounds for this expr became // more restricted than what was currently stored. std::pair Add(LinearExpression2 expr, IntegerValue lb, - IntegerValue ub); + IntegerValue ub) { + const bool negated = expr.CanonicalizeAndUpdateBounds(lb, ub); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return {false, false}; + const LinearExpression2Index index = non_trivial_bounds_->AddOrGet(expr); + bool ub_changed = AddUpperBound(index, ub); + bool lb_changed = AddUpperBound(NegationOf(index), -lb); + if (negated) { + std::swap(lb_changed, ub_changed); + } + return {lb_changed, ub_changed}; + } + + bool AddUpperBound(LinearExpression2Index index, IntegerValue ub); // Same as above, but only update the upper bound. bool AddUpperBound(LinearExpression2 expr, IntegerValue ub) { - return Add(expr, kMinIntegerValue, ub).second; + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; + const IntegerValue gcd = expr.DivideByGcd(); + ub = FloorRatio(ub, gcd); + return AddUpperBound(non_trivial_bounds_->AddOrGet(expr), ub); } - IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const; + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + return integer_trail_->LevelZeroUpperBound(expr); + } + const IntegerValue gcd = expr.DivideByGcd(); + const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); + if (index == kNoLinearExpression2Index) { + return integer_trail_->LevelZeroUpperBound(expr); + } + return CapProdI(gcd, LevelZeroUpperBound(index)); + } - int64_t num_bounds() const { return root_level_relations_.num_bounds(); } + IntegerValue LevelZeroUpperBound(LinearExpression2Index index) const { + const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + // TODO(user): Remove the expression from the root_level_relations_ if + // the zero-level bound got more restrictive. + return std::min(integer_trail_->LevelZeroUpperBound(expr), + GetUpperBoundNoTrail(index)); + } // Return a list of (expr <= ub) sorted by expr. They are guaranteed to be // better than the trivial upper bound. @@ -183,11 +250,8 @@ class RootLevelLinear2Bounds { // For a given variable `var`, return all variables `other` so that // LinearExpression2(var, other, 1, 1) has a non trivial upper bound. // Note that using negation one can also recover x + y >= lb and x - y <= ub. - absl::Span GetVariablesInSimpleRelation( - IntegerVariable var) const { - if (var >= coeff_one_var_lookup_.size()) return {}; - return coeff_one_var_lookup_[var]; - } + std::vector GetVariablesInSimpleRelation( + IntegerVariable var) const; RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const; @@ -197,47 +261,21 @@ class RootLevelLinear2Bounds { // behavior from LevelZeroUpperBound() that would return the implied // zero-level bound from the trail for trivial ones. `expr` must be // canonicalized and gcd-reduced. - IntegerValue GetUpperBoundNoTrail(LinearExpression2 expr) const; - - void AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const; + IntegerValue GetUpperBoundNoTrail(LinearExpression2Index index) const; private: IntegerTrail* integer_trail_; Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + CpModelMapping* cp_model_mapping_; + SharedLinear2Bounds* shared_linear2_bounds_; // Might be nullptr. - // Lookup table to find all the LinearExpression2 with a given variable and - // having both coefficient 1. - util_intops::StrongVector> - coeff_one_var_lookup_; + const int shared_linear2_bounds_id_; - // TODO(user): use data structures that consume less memory. A single - // std::vector and hash maps having the index as value - // could be enough. - absl::flat_hash_map< - IntegerVariable, - absl::InlinedVector< - std::tuple, 2>> - var_to_bounds_; - // Map to implement GetAllBoundsContainingVariables(). - absl::flat_hash_map< - std::pair, - absl::InlinedVector< - std::tuple, 1>> - var_pair_to_bounds_; - // Data structure to quickly update var_to_bounds_. Return the index where - // this linear expression appear in the vector for the first and second - // variable. - absl::flat_hash_map> - var_to_bounds_vector_index_; - absl::flat_hash_map var_pair_to_bounds_vector_index_; + util_intops::StrongVector + best_upper_bounds_; - // TODO(user): Also push them to a global shared repository after - // remapping IntegerVariable to proto indices. - BestBinaryRelationBounds root_level_relations_; int64_t num_updates_ = 0; }; @@ -338,7 +376,17 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // If expr is not a proper linear2 expression (e.g. 0*x + y, y + y, y - y) it // will be ignored. void PushConditionalRelation(absl::Span enforcements, - LinearExpression2 expr, IntegerValue rhs); + LinearExpression2Index index, IntegerValue rhs); + + void PushConditionalRelation(absl::Span enforcements, + LinearExpression2 expr, IntegerValue rhs) { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return; + const IntegerValue gcd = expr.DivideByGcd(); + rhs = FloorRatio(rhs, gcd); + return PushConditionalRelation(enforcements, + non_trivial_bounds_->AddOrGet(expr), rhs); + } // Called each time we change decision level. void SetLevel(int level) final; @@ -365,18 +413,13 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // Low-level function that returns the upper bound if there is some enforced // relations only. Otherwise always returns kMaxIntegerValue. // `expr` must be canonicalized and gcd-reduced. - IntegerValue GetUpperBoundFromEnforced(LinearExpression2 expr) const; + IntegerValue GetUpperBoundFromEnforced(LinearExpression2Index index) const; void AddReasonForUpperBoundLowerThan( - LinearExpression2 expr, IntegerValue ub, + LinearExpression2Index index, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const; - // Note: might contain duplicate expressions. - void AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const; - private: void CreateLevelEntryIfNeeded(); @@ -395,13 +438,13 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // TODO(user): this kind of reversible hash_map is already implemented in // other part of the code. Consolidate. struct ConditionalEntry { - ConditionalEntry(int p, IntegerValue r, LinearExpression2 k, + ConditionalEntry(int p, IntegerValue r, LinearExpression2Index k, absl::Span e) : prev_entry(p), rhs(r), key(k), enforcements(e.begin(), e.end()) {} int prev_entry; IntegerValue rhs; - LinearExpression2 key; + LinearExpression2Index key; absl::InlinedVector enforcements; }; std::vector conditional_stack_; @@ -409,7 +452,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // This is always stored in the form (expr <= rhs). // The conditional relations contains indices in the conditional_stack_. - absl::flat_hash_map conditional_relations_; + util_intops::StrongVector conditional_relations_; // Store for each variable x, the variables y that appears alongside it in // lit => x + y <= ub. Note that conditional_var_lookup_ is updated on @@ -510,11 +553,6 @@ class Linear2BoundsFromLinear3 { // will replace it and returns true, otherwise it returns false. bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); - // Warning, the order will not be deterministic. - void AppendAllExpressionContaining( - Bitset64::ConstView var_set, - std::vector* result) const; - // Most users should just use Linear2Bounds::UpperBound() instead. // // Returns the upper bound only if there is some relations coming from a @@ -601,7 +639,9 @@ class Linear2Bounds { : integer_trail_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()), enforced_bounds_(model->GetOrCreate()), - linear3_bounds_(model->GetOrCreate()) {} + linear3_bounds_(model->GetOrCreate()), + non_trivial_bounds_( + model->GetOrCreate()) {} // Returns the best known upper-bound of the given LinearExpression2 at the // current decision level. If its explanation is needed, it can be queried @@ -616,31 +656,12 @@ class Linear2Bounds { // don't want the trivial bounds. IntegerValue NonTrivialUpperBoundForGcd1(LinearExpression2 expr) const; - // Returns all known expressions with potentially non-trivial bounds that - // involves two variable whose positive version is marked in 'vars'. - absl::Span - GetAllExpressionsWithPotentialNonTrivialBounds( - Bitset64::ConstView var_set) const; - - // Returns a temporary bitset, cleared, and resized for all existing - // variables. - // - // If we have many class calling - // GetAllExpressionsWithPotentialNonTrivialBounds() it is important that not - // all of them have a O(num_variables) vector when the same one can be used. - SparseBitset* GetTemporyClearedAndResizedBitset() { - tmp_bitset_.ClearAndResize(integer_trail_->NumIntegerVariables()); - return &tmp_bitset_; - } - private: IntegerTrail* integer_trail_; RootLevelLinear2Bounds* root_level_bounds_; EnforcedLinear2Bounds* enforced_bounds_; Linear2BoundsFromLinear3* linear3_bounds_; - - mutable std::vector tmp_expressions_; - SparseBitset tmp_bitset_; + Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the @@ -1000,6 +1021,58 @@ inline std::function ConditionalLowerOrEqualWithOffset( }; } +inline LinearExpression2Index Linear2WithPotentialNonTrivalBounds::GetIndex( + LinearExpression2 expr) const { + DCHECK(expr.IsCanonicalized()); + DCHECK_EQ(expr.DivideByGcd(), 1); + const bool negated = expr.NegateForCanonicalization(); + auto it = expr_to_index_.find(expr); + if (it == expr_to_index_.end()) return kNoLinearExpression2Index; + + const LinearExpression2Index positive_index(2 * it->second); + if (negated) { + return NegationOf(positive_index); + } else { + return positive_index; + } +} + +inline LinearExpression2 Linear2WithPotentialNonTrivalBounds::GetExpression( + LinearExpression2Index index) const { + DCHECK_NE(index, kNoLinearExpression2Index); + const int lookup_index = index.value() / 2; + DCHECK_LT(lookup_index, exprs_.size()); + if (Linear2IsPositive(index)) { + return exprs_[lookup_index]; + } else { + LinearExpression2 result = exprs_[lookup_index]; + result.Negate(); + return result; + } +} + +inline absl::Span +Linear2WithPotentialNonTrivalBounds::GetAllLinear2ContainingVariable( + IntegerVariable var) const { + const IntegerVariable positive_var = PositiveVariable(var); + auto it = var_to_bounds_.find(positive_var); + if (it == var_to_bounds_.end()) return {}; + return it->second; +} + +inline absl::Span +Linear2WithPotentialNonTrivalBounds::GetAllLinear2ContainingVariables( + IntegerVariable var1, IntegerVariable var2) const { + IntegerVariable positive_var1 = PositiveVariable(var1); + IntegerVariable positive_var2 = PositiveVariable(var2); + if (positive_var1 > positive_var2) { + std::swap(positive_var1, positive_var2); + } + auto it = var_pair_to_bounds_.find({positive_var1, positive_var2}); + if (it == var_pair_to_bounds_.end()) return {}; + return it->second; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 0f911b9144..159469f659 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -190,6 +190,8 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { auto* lin2_bounds = model.GetOrCreate(); auto* integer_trail = model.GetOrCreate(); auto* precedences = model.GetOrCreate(); + auto* non_trivial_bounds = + model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); const Literal l(model.Add(NewBooleanVariable()), true); @@ -200,26 +202,25 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { precedences->PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 15); precedences->PushConditionalRelation({l}, LinearExpression2(a, b, 1, 1), 20); + LinearExpression2 expr_a_plus_b = + LinearExpression2::Difference(a, NegationOf(b)); + expr_a_plus_b.SimpleCanonicalization(); // We only keep the best one. - EXPECT_EQ( - lin2_bounds->UpperBound(LinearExpression2::Difference(a, NegationOf(b))), - 15); + EXPECT_EQ(lin2_bounds->UpperBound(expr_a_plus_b), 15); std::vector literal_reason; std::vector integer_reason; precedences->AddReasonForUpperBoundLowerThan( - LinearExpression2::Difference(a, NegationOf(b)), 15, &literal_reason, + non_trivial_bounds->AddOrGet(expr_a_plus_b), 15, &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre(l.Negated())); // Backtrack works. EXPECT_TRUE(sat_solver->ResetToLevelZero()); - EXPECT_EQ( - lin2_bounds->UpperBound(LinearExpression2::Difference(a, NegationOf(b))), - 200); + EXPECT_EQ(lin2_bounds->UpperBound(expr_a_plus_b), 200); literal_reason.clear(); integer_reason.clear(); precedences->AddReasonForUpperBoundLowerThan( - LinearExpression2::Difference(a, NegationOf(b)), kMaxIntegerValue, + non_trivial_bounds->AddOrGet(expr_a_plus_b), kMaxIntegerValue, &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, IsEmpty()); } diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index b013e7d314..60901fc1c0 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 326 +// NEXT TAG: 327 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -703,6 +703,13 @@ message SatParameters { // Allows sharing of the bounds of modified variables at level 0. optional bool share_level_zero_bounds = 114 [default = true]; + // Allows sharing of the bounds on linear2 discovered at level 0. This is + // mainly interesting on scheduling type of problems when we branch on + // precedences. + // + // Warning: This currently non-deterministic. + optional bool share_linear2_bounds = 326 [default = false]; + // Allows sharing of new learned binary clause between workers. optional bool share_binary_clauses = 203 [default = true]; diff --git a/ortools/sat/sat_runner.cc b/ortools/sat/sat_runner.cc index c31a0e2b27..c1dceb038b 100644 --- a/ortools/sat/sat_runner.cc +++ b/ortools/sat/sat_runner.cc @@ -16,9 +16,11 @@ #include #include #include +#include #include #include +#include "absl/base/thread_annotations.h" #include "absl/flags/flag.h" #include "absl/flags/parse.h" #include "absl/flags/usage.h" @@ -30,6 +32,8 @@ #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/types/span.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" #include "ortools/base/helpers.h" @@ -45,6 +49,7 @@ #include "ortools/sat/synchronization.h" #include "ortools/util/file_util.h" #include "ortools/util/logging.h" +#include "ortools/util/sigint.h" #include "ortools/util/sorted_interval_list.h" ABSL_FLAG( @@ -102,8 +107,69 @@ std::string ExtractName(absl::string_view full_filename) { return filename; } -void LogInPbCompetitionFormat(int num_variables, bool has_objective, - Model* model, SatParameters* parameters) { +class LastSolutionPrinter { + public: + // Note that is prints the solution in the PB competition format. + void MaybePrintLastSolution() { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_printed_ = true; + + if (last_solution_.empty()) { + std::cout << "s UNKNOWN" << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + std::string line; + for (int i = 0; i < num_variables_; ++i) { + if (last_solution_[i]) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } + } + } + + void set_num_variables(int num_variables) { num_variables_ = num_variables; } + + void set_last_solution(absl::Span solution) { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_.assign(solution.begin(), solution.end()); + } + + // Returns false if the solution has already been printed, else mark it as + // printed by caller code. + bool mark_last_solution_printed() { + const absl::MutexLock lock(&mutex_); + if (last_solution_printed_) { + return false; + } + last_solution_printed_ = true; + return true; + } + + private: + int num_variables_ = 0; + std::vector last_solution_ ABSL_GUARDED_BY(mutex_); + bool last_solution_printed_ ABSL_GUARDED_BY(mutex_) = false; + absl::Mutex mutex_; +}; + +void LogInPbCompetitionFormat( + int num_variables, bool has_objective, Model* model, + SatParameters* parameters, + std::shared_ptr last_solution_printer) { + CHECK(last_solution_printer != nullptr); + last_solution_printer->set_num_variables(num_variables); + const auto log_callback = [](const std::string& multi_line_input) { if (multi_line_input.empty()) { std::cout << "c" << std::endl; @@ -118,55 +184,60 @@ void LogInPbCompetitionFormat(int num_variables, bool has_objective, model->GetOrCreate()->AddInfoLoggingCallback(log_callback); parameters->set_log_to_stdout(false); - const auto response_callback = [](const CpSolverResponse& r) { + const auto response_callback = [last_solution_printer]( + const CpSolverResponse& r) { std::cout << "o " << static_cast(r.objective_value()) << std::endl; + last_solution_printer->set_last_solution(r.solution()); }; model->Add(NewFeasibleSolutionObserver(response_callback)); - const auto final_response_callback = [num_variables, - has_objective](CpSolverResponse* r) { - switch (r->status()) { - case CpSolverStatus::OPTIMAL: - if (has_objective) { - std::cout << "s OPTIMUM FOUND " << std::endl; - } else { - std::cout << "s SATISFIABLE" << std::endl; + const auto final_response_callback = + [num_variables, has_objective, + last_solution_printer](CpSolverResponse* r) { + if (!last_solution_printer->mark_last_solution_printed()) return; + + switch (r->status()) { + case CpSolverStatus::OPTIMAL: + if (has_objective) { + std::cout << "s OPTIMUM FOUND " << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + } + break; + case CpSolverStatus::FEASIBLE: + std::cout << "s SATISFIABLE" << std::endl; + break; + case CpSolverStatus::INFEASIBLE: + std::cout << "s UNSATISFIABLE" << std::endl; + break; + case CpSolverStatus::MODEL_INVALID: + std::cout << "s UNSUPPORTED" << std::endl; + break; + case CpSolverStatus::UNKNOWN: + std::cout << "s UNKNOWN" << std::endl; + break; + default: + break; } - break; - case CpSolverStatus::FEASIBLE: - std::cout << "s SATISFIABLE" << std::endl; - break; - case CpSolverStatus::INFEASIBLE: - std::cout << "s UNSATISFIABLE" << std::endl; - break; - case CpSolverStatus::MODEL_INVALID: - std::cout << "s UNSUPPORTED" << std::endl; - break; - case CpSolverStatus::UNKNOWN: - std::cout << "s UNKNOWN" << std::endl; - break; - default: - break; - } - if (r->status() == CpSolverStatus::OPTIMAL || - r->status() == CpSolverStatus::FEASIBLE) { - std::string line; - for (int i = 0; i < num_variables; ++i) { - if (r->solution(i)) { - absl::StrAppend(&line, "x", i + 1, " "); - } else { - absl::StrAppend(&line, "-x", i + 1, " "); + if (r->status() == CpSolverStatus::OPTIMAL || + r->status() == CpSolverStatus::FEASIBLE) { + std::string line; + for (int i = 0; i < num_variables; ++i) { + if (r->solution(i)) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } } - if (line.size() >= 75) { - std::cout << "v " << line << std::endl; - line.clear(); - } - } - if (!line.empty()) { - std::cout << "v " << line << std::endl; - } - } - }; + }; model->GetOrCreate()->AddFinalResponsePostprocessor( final_response_callback); } @@ -186,7 +257,8 @@ void SetInterleavedWorkers(SatParameters* parameters) { bool LoadProblem(const std::string& filename, absl::string_view hint_file, absl::string_view domain_file, CpModelProto* cp_model, - Model* model, SatParameters* parameters) { + Model* model, SatParameters* parameters, + std::shared_ptr last_solution_printer) { if (absl::EndsWith(filename, ".opb") || absl::EndsWith(filename, ".opb.bz2") || absl::EndsWith(filename, ".opb.gz") || absl::EndsWith(filename, ".wbo") || @@ -217,7 +289,7 @@ bool LoadProblem(const std::string& filename, absl::string_view hint_file, const int num_variables = reader.model_is_supported() ? reader.num_variables() : 1; LogInPbCompetitionFormat(num_variables, cp_model->has_objective(), model, - parameters); + parameters, last_solution_printer); } if (absl::GetFlag(FLAGS_force_interleave_search)) { SetInterleavedWorkers(parameters); @@ -310,9 +382,13 @@ int Run() { google::protobuf::Arena arena; CpModelProto* cp_model = google::protobuf::Arena::Create(&arena); + std::shared_ptr last_solution_printer; + if (absl::GetFlag(FLAGS_competition_mode)) { + last_solution_printer = std::make_shared(); + } if (!LoadProblem(absl::GetFlag(FLAGS_input), absl::GetFlag(FLAGS_hint_file), absl::GetFlag(FLAGS_domain_file), cp_model, &model, - ¶meters)) { + ¶meters, last_solution_printer)) { if (!absl::GetFlag(FLAGS_competition_mode)) { LOG(FATAL) << "Cannot load file '" << absl::GetFlag(FLAGS_input) << "'."; } @@ -329,6 +405,14 @@ int Run() { FingerprintRepeatedField(r.solution(), kDefaultFingerprintSeed)); })); } + + if (absl::GetFlag(FLAGS_competition_mode)) { + model.GetOrCreate()->Register([last_solution_printer]() { + last_solution_printer->MaybePrintLastSolution(); + exit(EXIT_SUCCESS); + }); + } + const CpSolverResponse response = SolveCpModel(*cp_model, &model); if (!absl::GetFlag(FLAGS_output).empty()) { diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index 0c1ed51803..18f37e7cfb 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -1386,14 +1386,27 @@ int UniqueClauseStream::NumLiteralsOfSize(int size) const { SharedClausesManager::SharedClausesManager(bool always_synchronize) : always_synchronize_(always_synchronize) {} -int SharedClausesManager::RegisterNewId(bool may_terminate_early) { +int SharedClausesManager::RegisterNewId(absl::string_view worker_name, + bool may_terminate_early) { absl::MutexLock mutex_lock(&mutex_); num_full_workers_ += may_terminate_early ? 0 : 1; const int id = id_to_last_processed_binary_clause_.size(); id_to_last_processed_binary_clause_.resize(id + 1, 0); id_to_last_returned_batch_.resize(id + 1, -1); id_to_last_finished_batch_.resize(id + 1, -1); - id_to_clauses_exported_.resize(id + 1, 0); + id_to_num_exported_.resize(id + 1, 0); + id_to_worker_name_.resize(id + 1); + id_to_worker_name_[id] = worker_name; + return id; +} + +int SharedLinear2Bounds::RegisterNewId(std::string worker_name) { + absl::MutexLock mutex_lock(&mutex_); + const int id = id_to_worker_name_.size(); + + id_to_stats_.resize(id + 1); + id_to_worker_name_.resize(id + 1); + id_to_worker_name_[id] = worker_name; return id; } @@ -1401,12 +1414,6 @@ bool SharedClausesManager::ShouldReadBatch(int reader_id, int writer_id) { return reader_id != writer_id; } -void SharedClausesManager::SetWorkerNameForId(int id, - absl::string_view worker_name) { - absl::MutexLock mutex_lock(&mutex_); - id_to_worker_name_[id] = worker_name; -} - void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { if (lit2 < lit1) std::swap(lit1, lit2); const auto p = std::make_pair(lit1, lit2); @@ -1416,7 +1423,7 @@ void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { if (inserted) { added_binary_clauses_.push_back(p); if (always_synchronize_) ++last_visible_binary_clause_; - id_to_clauses_exported_[id]++; + id_to_num_exported_[id]++; // Small optim. If the worker is already up to date with clauses to import, // we can mark this new clause as already seen. @@ -1429,7 +1436,7 @@ void SharedClausesManager::AddBinaryClause(int id, int lit1, int lit2) { void SharedClausesManager::AddBatch(int id, CompactVectorVector batch) { absl::MutexLock mutex_lock(&mutex_); - id_to_clauses_exported_[id] += batch.size(); + id_to_num_exported_[id] += batch.size(); pending_batches_.push_back(std::move(batch)); } @@ -1463,16 +1470,44 @@ void SharedClausesManager::GetUnseenBinaryClauses( void SharedClausesManager::LogStatistics(SolverLogger* logger) { absl::MutexLock mutex_lock(&mutex_); - absl::btree_map name_to_clauses; - for (int id = 0; id < id_to_clauses_exported_.size(); ++id) { - if (id_to_clauses_exported_[id] == 0) continue; - name_to_clauses[id_to_worker_name_[id]] = id_to_clauses_exported_[id]; + absl::btree_map name_to_table_line; + for (int id = 0; id < id_to_num_exported_.size(); ++id) { + if (id_to_num_exported_[id] == 0) continue; + name_to_table_line[id_to_worker_name_[id]] = id_to_num_exported_[id]; } - if (!name_to_clauses.empty()) { + if (!name_to_table_line.empty()) { std::vector> table; table.push_back({"Clauses shared", "Num"}); - for (const auto& entry : name_to_clauses) { - table.push_back({FormatName(entry.first), FormatCounter(entry.second)}); + for (const auto& [name, count] : name_to_table_line) { + table.push_back({FormatName(name), FormatCounter(count)}); + } + SOLVER_LOG(logger, FormatTable(table)); + } +} + +// TODO(user): Add some library to simplify this "transposition". Ideally we +// could merge small table with few columns. I am thinking list (row_name, +// col_name, count) + function that create table? +void SharedLinear2Bounds::LogStatistics(SolverLogger* logger) { + absl::MutexLock mutex_lock(&mutex_); + absl::btree_map name_to_table_line; + for (int id = 0; id < id_to_stats_.size(); ++id) { + const Stats stats = id_to_stats_[id]; + if (!stats.empty()) { + name_to_table_line[id_to_worker_name_[id]] = stats; + } + } + for (int import_id = 0; import_id < import_id_to_index_.size(); ++import_id) { + name_to_table_line[import_id_to_name_[import_id]].num_imported = + import_id_to_num_imported_[import_id]; + } + if (!name_to_table_line.empty()) { + std::vector> table; + table.push_back({"Linear2 shared", "New", "Updated", "Imported"}); + for (const auto& [name, stats] : name_to_table_line) { + table.push_back({FormatName(name), FormatCounter(stats.num_new), + FormatCounter(stats.num_update), + FormatCounter(stats.num_imported)}); } SOLVER_LOG(logger, FormatTable(table)); } @@ -1522,6 +1557,69 @@ void SharedClausesManager::Synchronize() { } } +void SharedLinear2Bounds::Add(int id, Key expr, IntegerValue lb, + IntegerValue ub) { + DCHECK(expr.IsCanonicalized()); + + absl::MutexLock mutex_lock(&mutex_); + auto [it, inserted] = shared_bounds_.insert({expr, {lb, ub}}); + if (inserted) { + // It is new. + id_to_stats_[id].num_new++; + newly_updated_keys_.push_back(expr); + } else { + // Update the individual bounds if the new ones are better. + auto& bounds = it->second; + const bool update_lb = lb > bounds.first; + if (update_lb) bounds.first = lb; + const bool update_ub = ub < bounds.second; + if (update_ub) bounds.second = ub; + if (update_lb || update_ub) { + id_to_stats_[id].num_update++; + newly_updated_keys_.push_back(expr); + } + } +} + +int SharedLinear2Bounds::RegisterNewImportId(std::string name) { + absl::MutexLock mutex_lock(&mutex_); + const int import_id = import_id_to_index_.size(); + import_id_to_name_.push_back(name); + import_id_to_index_.push_back(0); + import_id_to_num_imported_.push_back(0); + return import_id; +} + +std::vector< + std::pair>> +SharedLinear2Bounds::NewlyUpdatedBounds(int import_id) { + std::vector>> result; + + absl::MutexLock mutex_lock(&mutex_); + MaybeCompressNewlyUpdateKeys(); + const int size = newly_updated_keys_.size(); + for (int i = import_id_to_index_[import_id]; i < size; ++i) { + const auto& key = newly_updated_keys_[i]; + result.push_back({key, shared_bounds_[key]}); + } + import_id_to_index_[import_id] = size; + return result; +} + +void SharedLinear2Bounds::MaybeCompressNewlyUpdateKeys() { + int min_index = 0; + for (const int index : import_id_to_index_) { + min_index = std::min(index, min_index); + } + if (min_index == 0) return; + + newly_updated_keys_.erase(newly_updated_keys_.begin(), + newly_updated_keys_.begin() + min_index); + for (int& index_ref : import_id_to_index_) { + index_ref -= min_index; + } +} + void SharedStatistics::AddStats( absl::Span> stats) { absl::MutexLock mutex_lock(&mutex_); diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index 38722c2264..a9cd377fdb 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -848,8 +848,7 @@ class SharedClausesManager { std::vector>* new_clauses); // Ids are used to identify which worker is exporting/importing clauses. - int RegisterNewId(bool may_terminate_early); - void SetWorkerNameForId(int id, absl::string_view worker_name); + int RegisterNewId(absl::string_view worker_name, bool may_terminate_early); // Search statistics. void LogStatistics(SolverLogger* logger); @@ -893,8 +892,100 @@ class SharedClausesManager { const bool always_synchronize_ = true; // Stats: - std::vector id_to_clauses_exported_; - absl::flat_hash_map id_to_worker_name_; + std::vector id_to_num_exported_ ABSL_GUARDED_BY(mutex_); + std::vector id_to_num_updated_ ABSL_GUARDED_BY(mutex_); + std::vector id_to_worker_name_ ABSL_GUARDED_BY(mutex_); +}; + +// A class that allows to exchange root level bounds on linear2. +// +// TODO(user): Add Synchronize() support and only publish new bounds when this +// is called. +class SharedLinear2Bounds { + public: + int RegisterNewId(std::string worker_name); + void LogStatistics(SolverLogger* logger); + + // This should only contain canonicalized expression. + // See the code for IsCanonicalized() for the definition. + struct Key { + int vars[2]; + IntegerValue coeffs[2]; + + bool IsCanonicalized() { + return coeffs[0] > 0 && coeffs[1] != 0 && vars[0] < vars[1] && + std::gcd(coeffs[0].value(), coeffs[1].value()) == 1; + } + + bool operator==(const Key& o) const { + return vars[0] == o.vars[0] && vars[1] == o.vars[1] && + coeffs[0] == o.coeffs[0] && coeffs[1] == o.coeffs[1]; + } + + template + friend H AbslHashValue(H h, const Key& k) { + return H::combine(std::move(h), k.vars[0], k.vars[1], k.coeffs[0], + k.coeffs[1]); + } + }; + + // Exports new bounds on the given expr (should be canonicalized). + void Add(int id, Key expr, IntegerValue lb, IntegerValue ub); + + // This is called less often, and maybe not every-worker that exports want to + // export, so we use a separate id space. Because we rely on hash map to + // check if a bound is new, it is not such a big deal that a worker re-read + // once the bounds it exported. + int RegisterNewImportId(std::string name); + + // Returns the linear2 and their bounds. + // We only return changes since the last call with the same id. + std::vector>> + NewlyUpdatedBounds(int import_id); + + // This is not filled by NewlyUpdatedBounds() because we want to track the + // bounds that were not already known by the worker at the time of the import, + // and we don't have this information here. + void NotifyNumImported(int import_id, int num) { + absl::MutexLock mutex_lock(&mutex_); + import_id_to_num_imported_[import_id] += num; + } + + private: + void MaybeCompressNewlyUpdateKeys() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_); + + absl::Mutex mutex_; + + // The best known bounds for each key. + absl::flat_hash_map> shared_bounds_ + ABSL_GUARDED_BY(mutex_); + + // Ever growing list of updated position in shared_bounds_. + // Note that we do reduce it in MaybeCompressNewlyUpdateKeys(), but that + // requires all registered workers to have at least imported some bounds. + // + // TODO(user): use indirect addressing so that newly_updated_keys_ can just + // deal with indices, and it is a bit tighter memory wise? We also avoid + // hash-lookups on NewlyUpdatedBounds(). But since this is only called at + // level zero on new bounds, I don't think we care. + std::vector newly_updated_keys_; + + // For import. + std::vector import_id_to_name_ ABSL_GUARDED_BY(mutex_); + std::vector import_id_to_index_ ABSL_GUARDED_BY(mutex_); + std::vector import_id_to_num_imported_ ABSL_GUARDED_BY(mutex_); + + // Just for reporting at the end of the solve. + struct Stats { + int64_t num_new = 0; + int64_t num_update = 0; + int64_t num_imported = 0; // Copy of import_id_to_num_imported_. + bool empty() const { + return num_new == 0 && num_update == 0 && num_imported == 0; + } + }; + std::vector id_to_stats_ ABSL_GUARDED_BY(mutex_); + std::vector id_to_worker_name_ ABSL_GUARDED_BY(mutex_); }; // Simple class to add statistics by name and print them at the end. diff --git a/ortools/sat/synchronization_test.cc b/ortools/sat/synchronization_test.cc index 00dd4a2550..1ab19d6cbc 100644 --- a/ortools/sat/synchronization_test.cc +++ b/ortools/sat/synchronization_test.cc @@ -834,8 +834,8 @@ TEST(SharedResponseManagerTest, Callback) { TEST(SharedClausesManagerTest, SyncApi) { SharedClausesManager manager(/*always_synchronize=*/true); - EXPECT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); - EXPECT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + EXPECT_EQ(0, manager.RegisterNewId("", /*may_terminate_early=*/false)); + EXPECT_EQ(1, manager.RegisterNewId("", /*may_terminate_early=*/false)); manager.AddBinaryClause(/*id=*/0, 1, 2); std::vector> new_clauses; @@ -922,8 +922,8 @@ TEST(UniqueClauseStreamTest, DropsClauses) { TEST(SharedClausesManagerTest, NonSyncApi) { SharedClausesManager manager(/*always_synchronize=*/false); - EXPECT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); - EXPECT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + EXPECT_EQ(0, manager.RegisterNewId("", /*may_terminate_early=*/false)); + EXPECT_EQ(1, manager.RegisterNewId("", /*may_terminate_early=*/false)); manager.AddBinaryClause(/*id=*/0, 1, 2); std::vector> new_clauses; @@ -971,8 +971,8 @@ TEST(SharedClausesManagerTest, NonSyncApi) { TEST(SharedClausesManagerTest, ShareGlueClauses) { SharedClausesManager manager(/*always_synchronize=*/true); - ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); - ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(0, manager.RegisterNewId("", /*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId("", /*may_terminate_early=*/false)); UniqueClauseStream stream0; UniqueClauseStream stream1; // Add a bunch of clauses that will be skipped batch. @@ -999,8 +999,8 @@ TEST(SharedClausesManagerTest, ShareGlueClauses) { TEST(SharedClausesManagerTest, LbdThresholdIncrease) { SharedClausesManager manager(/*always_synchronize=*/true); - ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); - ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(0, manager.RegisterNewId("", /*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId("", /*may_terminate_early=*/false)); UniqueClauseStream stream0; UniqueClauseStream stream1; const int kExpectedClauses = UniqueClauseStream::kMaxLiteralsPerBatch / 5; @@ -1027,8 +1027,8 @@ TEST(SharedClausesManagerTest, LbdThresholdIncrease) { TEST(SharedClausesManagerTest, LbdThresholdDecrease) { SharedClausesManager manager(/*always_synchronize=*/true); - ASSERT_EQ(0, manager.RegisterNewId(/*may_terminate_early=*/false)); - ASSERT_EQ(1, manager.RegisterNewId(/*may_terminate_early=*/false)); + ASSERT_EQ(0, manager.RegisterNewId("", /*may_terminate_early=*/false)); + ASSERT_EQ(1, manager.RegisterNewId("", /*may_terminate_early=*/false)); UniqueClauseStream stream0; UniqueClauseStream stream1; diff --git a/ortools/util/sigint.cc b/ortools/util/sigint.cc index 601f4983cc..bd4f40cfac 100644 --- a/ortools/util/sigint.cc +++ b/ortools/util/sigint.cc @@ -23,29 +23,47 @@ namespace operations_research { void SigintHandler::Register(const std::function& f) { handler_ = [this, f]() -> void { - const int num_sigint_calls = ++num_sigint_calls_; - if (num_sigint_calls < 3) { + const int num_calls = ++num_calls_; + if (num_calls < 3) { LOG(INFO) - << "^C pressed " << num_sigint_calls << " times. " + << "^C pressed " << num_calls << " times. " << "Interrupting the solver. Press 3 times to force termination."; - if (num_sigint_calls == 1) f(); - } else if (num_sigint_calls == 3) { + if (num_calls == 1) f(); + } else if (num_calls == 3) { LOG(INFO) << "^C pressed 3 times. Forcing termination."; exit(EXIT_FAILURE); } else { // Another thread is already running exit(), do nothing. } }; - signal(SIGINT, &ControlCHandler); + signal(SIGINT, &SigHandler); } // This method will be called by the system after the SIGINT signal. // The parameter is the signal received. -void SigintHandler::ControlCHandler(int sig) { handler_(); } +void SigintHandler::SigHandler(int) { handler_(); } -// Unregister the SIGINT handler. -SigintHandler::~SigintHandler() { signal(SIGINT, SIG_DFL); } +// Unregister the signal handlers. +SigintHandler::~SigintHandler() { + if (handler_ != nullptr) signal(SIGINT, SIG_DFL); +} thread_local std::function SigintHandler::handler_; +void SigtermHandler::Register(const std::function& f) { + handler_ = [f]() -> void { f(); }; + signal(SIGTERM, &SigHandler); +} + +// This method will be called by the system after the SIGTERM signal. +// The parameter is the signal received. +void SigtermHandler::SigHandler(int) { handler_(); } + +// Unregister the signal handlers. +SigtermHandler::~SigtermHandler() { + if (handler_ != nullptr) signal(SIGTERM, SIG_DFL); +} + +thread_local std::function SigtermHandler::handler_; + } // namespace operations_research diff --git a/ortools/util/sigint.h b/ortools/util/sigint.h index 7b3098033e..1d9fcd1b81 100644 --- a/ortools/util/sigint.h +++ b/ortools/util/sigint.h @@ -21,7 +21,7 @@ namespace operations_research { class SigintHandler { public: - SigintHandler() {} + SigintHandler() = default; ~SigintHandler(); // Catches ^C and call f() the first time this happen. If ^C is pressed 3 @@ -29,9 +29,23 @@ class SigintHandler { void Register(const std::function& f); private: - static void ControlCHandler(int s); + std::atomic num_calls_ = 0; - std::atomic num_sigint_calls_ = 0; + static void SigHandler(int s); + thread_local static std::function handler_; +}; + +class SigtermHandler { + public: + SigtermHandler() = default; + ~SigtermHandler(); + + // Catches SIGTERM and call f(). It is recommended that f() calls exit() to + // terminate the program. + void Register(const std::function& f); + + private: + static void SigHandler(int s); thread_local static std::function handler_; }; diff --git a/ortools/util/sorted_interval_list.h b/ortools/util/sorted_interval_list.h index f07dca7c71..fb62e30d27 100644 --- a/ortools/util/sorted_interval_list.h +++ b/ortools/util/sorted_interval_list.h @@ -724,7 +724,9 @@ class ClosedInterval::Iterator { // arithmetic. uint64_t current_; }; - +#if __cplusplus >= 202002L +static_assert(std::input_iterator); +#endif // begin()/end() are required for iteration over ClosedInterval in a range for // loop. inline ClosedInterval::Iterator begin(ClosedInterval interval) { From 6d8c32ca7b517d44b79efe186aa578f58e9be005 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 20 Jun 2025 15:45:27 +0200 Subject: [PATCH 113/509] fix --- ortools/routing/lp_scheduling.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/routing/lp_scheduling.cc b/ortools/routing/lp_scheduling.cc index c6bcdf9481..bc61dc3bec 100644 --- a/ortools/routing/lp_scheduling.cc +++ b/ortools/routing/lp_scheduling.cc @@ -1331,8 +1331,8 @@ DimensionCumulOptimizerCore::OptimizeSingleRouteWithTransitTargets( const int64_t fixed_transit = CapSub(transit_target, variable_transit_ub); DCHECK_GT(transit_target, fixed_transit); DCHECK_GE(fixed_transit, 0); - const int64_t threshold = - std::max(CapSub(threshold_ratio * transit_target, fixed_transit), 0L); + const int64_t threshold = std::max( + CapSub(threshold_ratio * transit_target, fixed_transit), 0L); DCHECK_GT(variable_transit_ub, threshold); const int violation_above_threshold = From 9541e49c22cbd7f724142f02ac8b9b202f13126a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 19 Jun 2025 15:07:38 +0200 Subject: [PATCH 114/509] tools/release: fix scripts --- tools/release/amd64.Dockerfile | 6 +-- tools/release/arm64.Dockerfile | 6 +-- tools/release/build_delivery_macos.sh | 66 +++++++++++++++++---------- 3 files changed, 47 insertions(+), 31 deletions(-) diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index b95ad2405e..1622d7368f 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -36,10 +36,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index b19b71c8fc..138c653bb5 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -41,10 +41,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 7070cd050b..f03ddf3577 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -64,12 +64,15 @@ function build_dotnet() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log - echo "check dotnet..." + echo "DONE" | tee -a build.log + + echo -n "check dotnet..." command -v dotnet command -v dotnet | xargs echo "dotnet: " | tee -a build.log + echo "DONE" | tee -a build.log # Install .Net SNK echo -n "Install .Net SNK..." | tee -a build.log @@ -77,7 +80,8 @@ function build_dotnet() { if [[ -x $(command -v openssl11) ]]; then OPENSSL_PRG=openssl11 fi - echo "check ${OPENSSL_PRG}..." + echo "DONE" | tee -a build.log + echo -n "check ${OPENSSL_PRG}..." command -v ${OPENSSL_PRG} | xargs echo "openssl: " | tee -a build.log $OPENSSL_PRG aes-256-cbc -iter 42 -pass pass:"$ORTOOLS_TOKEN" \ @@ -92,12 +96,12 @@ function build_dotnet() { rm -rf "${ROOT_DIR}/temp_dotnet" echo "DONE" | tee -a build.log - echo -n "Build .Net..." | tee -a build.log + echo "Build .Net..." | tee -a build.log cmake -S. -Btemp_dotnet -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_DOTNET=ON cmake --build temp_dotnet -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_dotnet/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_dotnet --target test #echo "cmake test: DONE" | tee -a build.log @@ -115,9 +119,11 @@ function build_java() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log + # maven require JAVA_HOME if [[ -z "${JAVA_HOME}" ]]; then echo "JAVA_HOME: not found !" | tee -a build.log @@ -173,21 +179,19 @@ function build_java() { rm -rf "${ROOT_DIR}/temp_java" echo "DONE" | tee -a build.log - echo -n "Build Java..." | tee -a build.log - + echo "Build Java..." | tee -a build.log if [[ ! -v GPG_ARGS ]]; then GPG_EXTRA="" else GPG_EXTRA="-DGPG_ARGS=${GPG_ARGS}" fi - # shellcheck disable=SC2086 # cmake fail to parse empty string "" cmake -S. -Btemp_java -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF \ -DBUILD_JAVA=ON -DSKIP_GPG=OFF ${GPG_EXTRA} cmake --build temp_java -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_java/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_java --target test #echo "cmake test: DONE" | tee -a build.log @@ -215,9 +219,10 @@ function build_python() { PATH_BCKP=${PATH} cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "arm64" ]]; then local -r PY=(3.9 3.10 3.11 3.12 3.13) @@ -242,7 +247,7 @@ function build_python() { command -v "python${PY_VERSION}" | xargs echo "python${PY_VERSION}: " | tee -a build.log "python${PY_VERSION}" -c "import platform as p; print(p.platform())" | tee -a build.log "python${PY_VERSION}" -m pip install --upgrade --user pip - "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv "typing-extensions>=4.12" + "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv echo "check protoc-gen-mypy..." command -v protoc-gen-mypy | xargs echo "protoc-gen-mypy: " | tee -a build.log protoc-gen-mypy --version | xargs echo "protoc-gen-mypy version: " | tee -a build.log @@ -276,7 +281,7 @@ function build_python() { echo -n "Cleaning Python ${PY_VERSION}..." | tee -a build.log rm -rf "temp_python${PY_VERSION}" echo "DONE" | tee -a build.log - + echo "Build Python ${PY_VERSION}..." | tee -a build.log echo -n " CMake configure..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" @@ -351,26 +356,27 @@ function build_archive() { echo -n "Clean previous archive..." | tee -a build.log make clean_archive + echo "DONE" | tee -a build.log - echo -n "Make cpp archive..." | tee -a build.log + echo "Make cpp archive..." | tee -a build.log make archive_cpp - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make dotnet archive..." | tee -a build.log + echo "Make dotnet archive..." | tee -a build.log make archive_dotnet - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make java archive..." | tee -a build.log + echo "Make java archive..." | tee -a build.log make archive_java - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log # move archive to export @@ -392,16 +398,26 @@ function build_examples() { echo "Check Sed version..." sed --version 2>&1 | head -n 1 | grep "GNU sed.*\b4" + echo -n "Clean previous example archives..." | tee -a build.log rm -rf temp ./*.tar.gz - echo -n "Build examples archives..." | tee -a build.log + echo "DONE" | tee -a build.log + + echo "Build examples archives..." | tee -a build.log + echo -n " Python examples archive..." | tee -a build.log make python_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " Java examples archive..." | tee -a build.log make java_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " .Net examples archive..." | tee -a build.log make dotnet_examples_archive UNIX_PYTHON_VER=3 echo "DONE" | tee -a build.log + echo "DONE" | tee -a build.log + # move example to export/ mv or-tools_*_examples_*.tar.gz export/ echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" > "${ROOT_DIR}/export/examples_build" From 9fa309b358d1e49476114af533db27b69e0dccc8 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Jun 2025 10:29:31 +0200 Subject: [PATCH 115/509] cmake: Fix cmake_minimum_required to 3.24 (#4692) --- CMakeLists.txt | 2 +- cmake/README.md | 2 +- cmake/dependencies/CMakeLists.txt | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b06184dc01..02cee4997c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,7 +12,7 @@ # limitations under the License. # This file is just an orchestration -cmake_minimum_required(VERSION 3.20) +cmake_minimum_required(VERSION 3.24) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") # Enable output of compile commands during generation. diff --git a/cmake/README.md b/cmake/README.md index 816f67d067..ff84170a29 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -88,7 +88,7 @@ CMake as a standalone project or incorporate it into an existing CMake project. ## Requirement You'll need: -* `CMake >= 3.18`. +* `CMake >= 3.24`. * A C++20 compiler (GCC 10 or above) ## Solvers supported diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 80fdbb1b37..184fed78eb 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -11,6 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# We are using FetchContent OVERRIDE_FIND_PACKAGE introduced in 3.24 +cmake_minimum_required(VERSION 3.24) + # ############################################################################## # SWIG (WIN32) # ############################################################################## From 78b662a2b223d101e9c291d10f37d4e49c2bef0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 18:05:38 +0200 Subject: [PATCH 116/509] ortools: utils: keep compatibility with protobuf < 26 --- ortools/util/file_util.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ortools/util/file_util.cc b/ortools/util/file_util.cc index 6ee86f6e52..62d0aaa314 100644 --- a/ortools/util/file_util.cc +++ b/ortools/util/file_util.cc @@ -166,7 +166,11 @@ absl::Status WriteProtoToFile(absl::string_view filename, case ProtoWriteFormat::kJson: { google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; +#if PROTOBUF_VERSION >= 5026000 // Version 26.0.0 options.always_print_fields_with_no_presence = true; +#else + options.always_print_primitive_fields = true; +#endif options.preserve_proto_field_names = true; if (!google::protobuf::util::MessageToJsonString(proto, &output_string, options) From 507f1d82f6c8d13a04d2dc554124b02e4aea6cbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 17:22:11 +0200 Subject: [PATCH 117/509] graph: fix iterator compatibility since C++17 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing iterator typedefs to custom iterator classes when std::iterator inheritance is deprecated since C++17. Signed-off-by: Clément Péron --- ortools/base/proto_enum_utils.h | 13 ++++++++++++- ortools/graph/graph.h | 8 +++++++- ortools/graph/iterators.h | 14 ++++++++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h index a78dd61a72..bdf0331056 100644 --- a/ortools/base/proto_enum_utils.h +++ b/ortools/base/proto_enum_utils.h @@ -175,8 +175,19 @@ namespace internal { template class RepeatedEnumView { public: - class Iterator : public std::iterator { + class Iterator +#if __cplusplus < 201703L + : public std::iterator +#endif + { public: + using difference_type = ptrdiff_t; + using value_type = E; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = E*; + using reference = E&; +#endif explicit Iterator(RepeatedField::const_iterator ptr) : ptr_(ptr) {} bool operator==(const Iterator& it) const { return ptr_ == it.ptr_; } bool operator!=(const Iterator& it) const { return ptr_ != it.ptr_; } diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index c8b7ef0b83..db3f0e2bcb 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -315,7 +315,7 @@ class BaseGraph { template class ArcPropertyIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { @@ -324,6 +324,11 @@ class ArcPropertyIterator // TODO(b/385094969): This should be `NodeIndex` for integers, // `NodeIndex::value_type` for strong signed integer types. using difference_type = std::ptrdiff_t; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = PropertyT*; + using reference = PropertyT&; +#endif ArcPropertyIterator() = default; @@ -346,6 +351,7 @@ class ArcPropertyIterator const ArcPropertyIterator& r) { return l.arc_it_ == r.arc_it_; } + friend bool operator!=(const ArcPropertyIterator& l, const ArcPropertyIterator& r) { return !(l == r); diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index 73f67a07bd..50fd5335b0 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -124,13 +124,18 @@ class IntegerRangeIterator // TODO(b/385094969): In C++17, `std::iterator_traits` required // explicitly specifying the iterator category. Remove this when backwards // compatibility with C++17 is no longer needed. -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IntegerType; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = IntegerType*; + using reference = IntegerType&; +#endif IntegerRangeIterator() : index_{} {} @@ -243,13 +248,18 @@ class IntegerRange : public BeginEndWrapper> { // different iterators with the same index type and sentinel. template class ChasingIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IndexT; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = IndexT*; + using reference = IndexT&; +#endif ChasingIterator() : index_(sentinel), next_(nullptr) {} From 6c973a4d53ca0768d3e0d1b641bef1451bd88f8b Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 18 Jun 2025 17:22:11 +0200 Subject: [PATCH 118/509] graph: fix iterator compilation in C++20 --- ortools/graph/graph.h | 3 ++- ortools/graph/iterators.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index 6df4c13379..db3f0e2bcb 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -324,7 +324,7 @@ class ArcPropertyIterator // TODO(b/385094969): This should be `NodeIndex` for integers, // `NodeIndex::value_type` for strong signed integer types. using difference_type = std::ptrdiff_t; -#if __cplusplus >= 201703L +#if __cplusplus >= 201703L && __cplusplus < 202002L using iterator_category = std::input_iterator_tag; using pointer = PropertyT*; using reference = PropertyT&; @@ -351,6 +351,7 @@ class ArcPropertyIterator const ArcPropertyIterator& r) { return l.arc_it_ == r.arc_it_; } + friend bool operator!=(const ArcPropertyIterator& l, const ArcPropertyIterator& r) { return !(l == r); diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index 2506c0478b..50fd5335b0 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -131,7 +131,7 @@ class IntegerRangeIterator public: using difference_type = ptrdiff_t; using value_type = IntegerType; -#if __cplusplus >= 201703L +#if __cplusplus >= 201703L && __cplusplus < 202002L using iterator_category = std::input_iterator_tag; using pointer = IntegerType*; using reference = IntegerType&; @@ -255,7 +255,7 @@ class ChasingIterator public: using difference_type = ptrdiff_t; using value_type = IndexT; -#if __cplusplus >= 201703L +#if __cplusplus >= 201703L && __cplusplus < 202002L using iterator_category = std::input_iterator_tag; using pointer = IndexT*; using reference = IndexT&; From 73bfae57e8134ec0f7b7bfc87acc3f47a34ce09f Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Jun 2025 13:57:57 +0200 Subject: [PATCH 119/509] Fix dockerfile ENV usage --- .../challenge/minizinc-challenge-ls.Dockerfile | 4 ++-- .../flatzinc/challenge/minizinc-challenge.Dockerfile | 2 +- tools/docker/images/almalinux-9.Dockerfile | 4 ++-- tools/docker/images/alpine-edge.Dockerfile | 4 ++-- tools/docker/images/archlinux.Dockerfile | 4 ++-- tools/docker/images/debian-11.Dockerfile | 4 ++-- tools/docker/images/debian-12.Dockerfile | 4 ++-- tools/docker/images/debian-13.Dockerfile | 4 ++-- tools/docker/images/debian-sid.Dockerfile | 4 ++-- tools/docker/images/fedora-40.Dockerfile | 4 ++-- tools/docker/images/fedora-41.Dockerfile | 4 ++-- tools/docker/images/fedora-42.Dockerfile | 4 ++-- tools/docker/images/opensuse-leap.Dockerfile | 4 ++-- tools/docker/images/rockylinux-9.Dockerfile | 4 ++-- tools/docker/images/ubuntu-20.04.Dockerfile | 4 ++-- tools/docker/images/ubuntu-22.04.Dockerfile | 4 ++-- tools/docker/images/ubuntu-24.04.Dockerfile | 4 ++-- tools/docker/images/ubuntu-24.10.Dockerfile | 4 ++-- tools/docker/python/amd64/manylinux.Dockerfile | 10 +++++----- tools/docker/python/amd64/musllinux.Dockerfile | 10 +++++----- tools/docker/python/arm64/manylinux.Dockerfile | 10 +++++----- tools/docker/python/arm64/musllinux.Dockerfile | 10 +++++----- 22 files changed, 55 insertions(+), 55 deletions(-) diff --git a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile index 86b9c8034c..d5d6d02634 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH v99bugfix +ENV SRC_GIT_BRANCH=v99bugfix ENV TZ=America/Los_Angeles @@ -31,4 +31,4 @@ RUN cp /root/or-tools/ortools/flatzinc/mznlib/*mzn /entry_data/mzn-lib # Patch the run scripts RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -p 1 -G/g" /minizinc/mzn-exec-fd RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true,num_workers:3 -G/g" /minizinc/mzn-exec-free -RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-par \ No newline at end of file +RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-par diff --git a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile index 1113ff8778..0fdfc256e6 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH v99bugfix +ENV SRC_GIT_BRANCH=v99bugfix ENV TZ=America/Los_Angeles diff --git a/tools/docker/images/almalinux-9.Dockerfile b/tools/docker/images/almalinux-9.Dockerfile index 4a6102159c..cf1d51d85f 100644 --- a/tools/docker/images/almalinux-9.Dockerfile +++ b/tools/docker/images/almalinux-9.Dockerfile @@ -62,12 +62,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/alpine-edge.Dockerfile b/tools/docker/images/alpine-edge.Dockerfile index 3e57be822e..9bb26f931b 100644 --- a/tools/docker/images/alpine-edge.Dockerfile +++ b/tools/docker/images/alpine-edge.Dockerfile @@ -40,12 +40,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/archlinux.Dockerfile b/tools/docker/images/archlinux.Dockerfile index 5ad355cfe5..92aec56c97 100644 --- a/tools/docker/images/archlinux.Dockerfile +++ b/tools/docker/images/archlinux.Dockerfile @@ -41,12 +41,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/debian-11.Dockerfile b/tools/docker/images/debian-11.Dockerfile index 2c514de3f8..1315560940 100644 --- a/tools/docker/images/debian-11.Dockerfile +++ b/tools/docker/images/debian-11.Dockerfile @@ -55,12 +55,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/debian-12.Dockerfile b/tools/docker/images/debian-12.Dockerfile index 424fd31282..da883df0ff 100644 --- a/tools/docker/images/debian-12.Dockerfile +++ b/tools/docker/images/debian-12.Dockerfile @@ -49,12 +49,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/debian-13.Dockerfile b/tools/docker/images/debian-13.Dockerfile index 5bb761afed..7c1e9fc1a9 100644 --- a/tools/docker/images/debian-13.Dockerfile +++ b/tools/docker/images/debian-13.Dockerfile @@ -50,12 +50,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/debian-sid.Dockerfile b/tools/docker/images/debian-sid.Dockerfile index 27fc2bb075..613a715a3e 100644 --- a/tools/docker/images/debian-sid.Dockerfile +++ b/tools/docker/images/debian-sid.Dockerfile @@ -52,12 +52,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/fedora-40.Dockerfile b/tools/docker/images/fedora-40.Dockerfile index 0fd6b0c905..7c487641d7 100644 --- a/tools/docker/images/fedora-40.Dockerfile +++ b/tools/docker/images/fedora-40.Dockerfile @@ -48,12 +48,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/fedora-41.Dockerfile b/tools/docker/images/fedora-41.Dockerfile index 9db9337a66..cc95fe4018 100644 --- a/tools/docker/images/fedora-41.Dockerfile +++ b/tools/docker/images/fedora-41.Dockerfile @@ -50,12 +50,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/fedora-42.Dockerfile b/tools/docker/images/fedora-42.Dockerfile index 485022bca2..be16996329 100644 --- a/tools/docker/images/fedora-42.Dockerfile +++ b/tools/docker/images/fedora-42.Dockerfile @@ -50,12 +50,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/opensuse-leap.Dockerfile b/tools/docker/images/opensuse-leap.Dockerfile index 300efe9555..52168a1c6a 100644 --- a/tools/docker/images/opensuse-leap.Dockerfile +++ b/tools/docker/images/opensuse-leap.Dockerfile @@ -48,12 +48,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/rockylinux-9.Dockerfile b/tools/docker/images/rockylinux-9.Dockerfile index c885bb3d87..86be1c55d5 100644 --- a/tools/docker/images/rockylinux-9.Dockerfile +++ b/tools/docker/images/rockylinux-9.Dockerfile @@ -62,12 +62,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/ubuntu-20.04.Dockerfile b/tools/docker/images/ubuntu-20.04.Dockerfile index 01564cf3ff..187e76077e 100644 --- a/tools/docker/images/ubuntu-20.04.Dockerfile +++ b/tools/docker/images/ubuntu-20.04.Dockerfile @@ -66,12 +66,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/ubuntu-22.04.Dockerfile b/tools/docker/images/ubuntu-22.04.Dockerfile index 1b32512b26..cd063522a4 100644 --- a/tools/docker/images/ubuntu-22.04.Dockerfile +++ b/tools/docker/images/ubuntu-22.04.Dockerfile @@ -64,12 +64,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/ubuntu-24.04.Dockerfile b/tools/docker/images/ubuntu-24.04.Dockerfile index 9cc69b1380..6cf39fa36e 100644 --- a/tools/docker/images/ubuntu-24.04.Dockerfile +++ b/tools/docker/images/ubuntu-24.04.Dockerfile @@ -58,12 +58,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/images/ubuntu-24.10.Dockerfile b/tools/docker/images/ubuntu-24.10.Dockerfile index 290a38decb..5e6b972a16 100644 --- a/tools/docker/images/ubuntu-24.10.Dockerfile +++ b/tools/docker/images/ubuntu-24.10.Dockerfile @@ -58,12 +58,12 @@ COPY or-tools.snk /root/or-tools.snk ENV DOTNET_SNK=/root/or-tools.snk ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} +ENV SRC_GIT_BRANCH=${SRC_GIT_BRANCH:-main} ARG SRC_GIT_SHA1 ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} +ENV OR_TOOLS_PATCH=${OR_TOOLS_PATCH:-9999} # Download sources # use SRC_GIT_SHA1 to modify the command diff --git a/tools/docker/python/amd64/manylinux.Dockerfile b/tools/docker/python/amd64/manylinux.Dockerfile index 1933738a40..415bcc52ce 100644 --- a/tools/docker/python/amd64/manylinux.Dockerfile +++ b/tools/docker/python/amd64/manylinux.Dockerfile @@ -22,12 +22,12 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ## OR-TOOLS ## ################ FROM env AS devel -ENV GIT_URL https://github.com/google/or-tools +ENV GIT_URL=https://github.com/google/or-tools ARG GIT_BRANCH -ENV GIT_BRANCH ${GIT_BRANCH:-main} +ENV GIT_BRANCH=${GIT_BRANCH:-main} ARG GIT_SHA1 -ENV GIT_SHA1 ${GIT_SHA1:-unknown} +ENV GIT_SHA1=${GIT_SHA1:-unknown} # Download sources # use GIT_SHA1 to modify the command @@ -38,9 +38,9 @@ RUN git clone -b "${GIT_BRANCH}" --single-branch "$GIT_URL" /project \ WORKDIR /project # Copy build script and setup env -ENV PLATFORM x86_64 +ENV PLATFORM=x86_64 ARG PYTHON_VERSION -ENV PYTHON_VERSION ${PYTHON_VERSION:-3} +ENV PYTHON_VERSION=${PYTHON_VERSION:-3} COPY build-manylinux.sh . RUN chmod a+x "build-manylinux.sh" diff --git a/tools/docker/python/amd64/musllinux.Dockerfile b/tools/docker/python/amd64/musllinux.Dockerfile index 4c2f64fdbf..4cc1706983 100644 --- a/tools/docker/python/amd64/musllinux.Dockerfile +++ b/tools/docker/python/amd64/musllinux.Dockerfile @@ -17,12 +17,12 @@ CMD ["/bin/sh"] ## OR-TOOLS ## ################ FROM env AS devel -ENV GIT_URL https://github.com/google/or-tools +ENV GIT_URL=https://github.com/google/or-tools ARG GIT_BRANCH -ENV GIT_BRANCH ${GIT_BRANCH:-main} +ENV GIT_BRANCH=${GIT_BRANCH:-main} ARG GIT_SHA1 -ENV GIT_SHA1 ${GIT_SHA1:-unknown} +ENV GIT_SHA1=${GIT_SHA1:-unknown} # Download sources # use GIT_SHA1 to modify the command @@ -33,9 +33,9 @@ RUN git clone -b "${GIT_BRANCH}" --single-branch "$GIT_URL" /project \ WORKDIR /project # Copy build script and setup env -ENV PLATFORM x86_64 +ENV PLATFORM=x86_64 ARG PYTHON_VERSION -ENV PYTHON_VERSION ${PYTHON_VERSION:-3} +ENV PYTHON_VERSION=${PYTHON_VERSION:-3} COPY build-musllinux.sh . RUN chmod a+x "build-musllinux.sh" diff --git a/tools/docker/python/arm64/manylinux.Dockerfile b/tools/docker/python/arm64/manylinux.Dockerfile index 116fe68de2..8edd33f373 100644 --- a/tools/docker/python/arm64/manylinux.Dockerfile +++ b/tools/docker/python/arm64/manylinux.Dockerfile @@ -24,12 +24,12 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ## OR-TOOLS ## ################ FROM env AS devel -ENV GIT_URL https://github.com/google/or-tools +ENV GIT_URL=https://github.com/google/or-tools ARG GIT_BRANCH -ENV GIT_BRANCH ${GIT_BRANCH:-main} +ENV GIT_BRANCH=${GIT_BRANCH:-main} ARG GIT_SHA1 -ENV GIT_SHA1 ${GIT_SHA1:-unknown} +ENV GIT_SHA1=${GIT_SHA1:-unknown} # Download sources # use GIT_SHA1 to modify the command @@ -40,9 +40,9 @@ RUN git clone -b "${GIT_BRANCH}" --single-branch "$GIT_URL" /project \ WORKDIR /project # Copy build script and setup env -ENV PLATFORM aarch64 +ENV PLATFORM=aarch64 ARG PYTHON_VERSION -ENV PYTHON_VERSION ${PYTHON_VERSION:-3} +ENV PYTHON_VERSION=${PYTHON_VERSION:-3} COPY build-manylinux.sh . RUN chmod a+x "build-manylinux.sh" diff --git a/tools/docker/python/arm64/musllinux.Dockerfile b/tools/docker/python/arm64/musllinux.Dockerfile index 719dc6417e..bbe93685f1 100644 --- a/tools/docker/python/arm64/musllinux.Dockerfile +++ b/tools/docker/python/arm64/musllinux.Dockerfile @@ -17,12 +17,12 @@ CMD ["/bin/sh"] ## OR-TOOLS ## ################ FROM env AS devel -ENV GIT_URL https://github.com/google/or-tools +ENV GIT_URL=https://github.com/google/or-tools ARG GIT_BRANCH -ENV GIT_BRANCH ${GIT_BRANCH:-main} +ENV GIT_BRANCH=${GIT_BRANCH:-main} ARG GIT_SHA1 -ENV GIT_SHA1 ${GIT_SHA1:-unknown} +ENV GIT_SHA1=${GIT_SHA1:-unknown} # Download sources # use GIT_SHA1 to modify the command @@ -33,9 +33,9 @@ RUN git clone -b "${GIT_BRANCH}" --single-branch "${GIT_URL}" /project \ WORKDIR /project # Copy build script and setup env -ENV PLATFORM aarch64 +ENV PLATFORM=aarch64 ARG PYTHON_VERSION -ENV PYTHON_VERSION ${PYTHON_VERSION:-3} +ENV PYTHON_VERSION=${PYTHON_VERSION:-3} COPY build-musllinux.sh . RUN chmod a+x "build-musllinux.sh" From 333f8c17b29f76c5c0749e3b0ca9ba2218e55092 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Jun 2025 14:38:09 +0200 Subject: [PATCH 120/509] ci: Use the default installed maven in windows_cmake_java job --- .github/workflows/amd64_windows_cmake_java.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/amd64_windows_cmake_java.yml b/.github/workflows/amd64_windows_cmake_java.yml index 3c15cd4a31..842ef01632 100644 --- a/.github/workflows/amd64_windows_cmake_java.yml +++ b/.github/workflows/amd64_windows_cmake_java.yml @@ -42,10 +42,6 @@ jobs: with: distribution: ${{matrix.java.distrib}} java-version: ${{matrix.java.version}} - - name: Update maven - run: | - choco upgrade maven - echo "C:\ProgramData\chocolatey\lib\maven\apache-maven-3.9.9\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Check java run: | java -version From fca7bd43c89d2a49832c0673c77f4ad027d15b93 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Jun 2025 17:32:45 +0200 Subject: [PATCH 121/509] small updates --- ortools/base/proto_enum_utils.h | 1 + .../challenge/minizinc-challenge-ls.Dockerfile | 2 +- .../flatzinc/challenge/minizinc-challenge.Dockerfile | 2 +- .../linear_solver/wrappers/model_builder_helper.cc | 12 +++++++----- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h index bdf0331056..4c7dc30ba8 100644 --- a/ortools/base/proto_enum_utils.h +++ b/ortools/base/proto_enum_utils.h @@ -26,6 +26,7 @@ // } // +#include #include #include "google/protobuf/descriptor.pb.h" diff --git a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile index d5d6d02634..301a7dc887 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH=v99bugfix +ENV SRC_GIT_BRANCH v99bugfix ENV TZ=America/Los_Angeles diff --git a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile index 0fdfc256e6..1113ff8778 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH=v99bugfix +ENV SRC_GIT_BRANCH v99bugfix ENV TZ=America/Los_Angeles diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 64e810c118..d4ad60d073 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -183,7 +183,7 @@ double ModelBuilderHelper::VarObjectiveCoefficient(int var_index) const { } std::string ModelBuilderHelper::VarName(int var_index) const { - return model_.variable(var_index).name(); + return std::string(model_.variable(var_index).name()); } int ModelBuilderHelper::AddLinearConstraint() { @@ -259,7 +259,7 @@ double ModelBuilderHelper::ConstraintUpperBound(int ct_index) const { } std::string ModelBuilderHelper::ConstraintName(int ct_index) const { - return model_.constraint(ct_index).name(); + return std::string(model_.constraint(ct_index).name()); } std::vector ModelBuilderHelper::ConstraintVarIndices(int ct_index) const { @@ -399,7 +399,7 @@ double ModelBuilderHelper::EnforcedConstraintUpperBound(int ct_index) const { std::string ModelBuilderHelper::EnforcedConstraintName(int ct_index) const { DCHECK(IsEnforcedConstraint(ct_index)); - return model_.general_constraint(ct_index).name(); + return std::string(model_.general_constraint(ct_index).name()); } std::vector ModelBuilderHelper::EnforcedConstraintVarIndices( @@ -436,7 +436,9 @@ int ModelBuilderHelper::num_constraints() const { return model_.constraint_size() + model_.general_constraint_size(); } -std::string ModelBuilderHelper::name() const { return model_.name(); } +std::string ModelBuilderHelper::name() const { + return std::string(model_.name()); +} void ModelBuilderHelper::SetName(const std::string& name) { model_.set_name(name); @@ -741,7 +743,7 @@ double ModelSolverHelper::activity(int ct_index) { std::string ModelSolverHelper::status_string() const { if (!has_response()) return ""; - return response_.value().status_str(); + return std::string(response_.value().status_str()); } double ModelSolverHelper::wall_time() const { From 0ac20dfcd72a6b22f05b03063d0571b8b35a66f3 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Mon, 23 Jun 2025 17:33:30 +0200 Subject: [PATCH 122/509] [CP-SAT] improve precedences; fix #4693; tweak shared tree workers --- ortools/sat/2d_distances_propagator.cc | 109 +++---- ortools/sat/2d_distances_propagator.h | 24 +- ortools/sat/BUILD.bazel | 1 + ortools/sat/cp_model.cc | 4 +- ortools/sat/cp_model_search.cc | 1 + ortools/sat/cp_model_symmetries.cc | 4 + ortools/sat/diffn.cc | 27 +- ortools/sat/disjunctive.cc | 22 +- ortools/sat/disjunctive.h | 2 +- ortools/sat/integer.h | 7 +- ortools/sat/integer_base.h | 5 + ortools/sat/precedences.cc | 430 +++++++++++-------------- ortools/sat/precedences.h | 174 +++++----- ortools/sat/precedences_test.cc | 11 +- ortools/sat/python/cp_model_helper.cc | 4 +- ortools/sat/sat_decision.h | 3 +- ortools/sat/sat_decision_test.cc | 4 +- ortools/sat/solution_crush.cc | 34 ++ ortools/sat/solution_crush.h | 7 + ortools/sat/work_assignment.cc | 12 +- ortools/sat/work_assignment.h | 14 +- ortools/sat/work_assignment_test.cc | 9 + 22 files changed, 501 insertions(+), 407 deletions(-) diff --git a/ortools/sat/2d_distances_propagator.cc b/ortools/sat/2d_distances_propagator.cc index 3d455420a5..08253a04b5 100644 --- a/ortools/sat/2d_distances_propagator.cc +++ b/ortools/sat/2d_distances_propagator.cc @@ -17,13 +17,13 @@ #include #include #include +#include #include #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/types/span.h" -#include "ortools/base/stl_util.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" @@ -43,8 +43,8 @@ Precedences2DPropagator::Precedences2DPropagator( linear2_bounds_(model->GetOrCreate()), linear2_watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - non_trivial_bounds_( - model->GetOrCreate()) { + lin2_indices_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) { model->GetOrCreate()->SetPushAffineUbForBinaryRelation(); } @@ -67,15 +67,46 @@ void Precedences2DPropagator::UpdateVarLookups() { } } +void Precedences2DPropagator::AddOrUpdateDataForPairOfBoxes(int box1, + int box2) { + if (box1 > box2) std::swap(box1, box2); + const auto [it, inserted] = non_trivial_pairs_index_.insert( + {std::make_pair(box1, box2), static_cast(pair_data_.size())}); + if (inserted) { + pair_data_.emplace_back(); + } + PairData& pair_data = pair_data_[it->second]; + pair_data.box1 = box1; + pair_data.box2 = box2; + for (int dim = 0; dim < 2; ++dim) { + const SchedulingConstraintHelper& dim_helper = + dim == 0 ? helper_.x_helper() : helper_.y_helper(); + for (int j = 0; j < 2; ++j) { + int b1 = j == 0 ? box1 : box2; + int b2 = j == 0 ? box2 : box1; + auto [start_minus_end_expr, start_minus_end_ub] = + EncodeDifferenceLowerThan(dim_helper.Starts()[b1], + dim_helper.Ends()[b2], 0); + const LinearExpression2Index start_minus_end_index = + lin2_indices_->GetIndex(start_minus_end_expr); + pair_data.start_before_end[dim][j].ub = start_minus_end_ub; + if (start_minus_end_index != kNoLinearExpression2Index) { + pair_data.start_before_end[dim][j].linear2 = start_minus_end_index; + } else { + pair_data.start_before_end[dim][j].linear2 = start_minus_end_expr; + } + } + } +} + void Precedences2DPropagator::CollectNewPairsOfBoxesWithNonTrivialDistance() { const absl::Span exprs = - non_trivial_bounds_->GetLinear2WithPotentialNonTrivalBounds(); + lin2_indices_->GetStoredLinear2Indices(); if (exprs.size() == num_known_linear2_) { return; } VLOG(2) << "CollectPairsOfBoxesWithNonTrivialDistance called, num_exprs: " << exprs.size(); - const int previous_num_pairs = non_trivial_pairs_.size(); for (; num_known_linear2_ < exprs.size(); ++num_known_linear2_) { const LinearExpression2& positive_expr = exprs[num_known_linear2_]; LinearExpression2 negated_expr = positive_expr; @@ -93,52 +124,25 @@ void Precedences2DPropagator::CollectNewPairsOfBoxesWithNonTrivialDistance() { const VarUsage& usage1 = it1->second; const VarUsage& usage2 = it2->second; for (int dim = 0; dim < 2; ++dim) { - const SchedulingConstraintHelper& dim_helper = - dim == 0 ? helper_.x_helper() : helper_.y_helper(); for (const int box1 : usage1.boxes[dim][0 /* start */]) { for (const int box2 : usage2.boxes[dim][1 /* end */]) { if (box1 == box2) continue; - const auto [expr2, unused] = EncodeDifferenceLowerThan( - dim_helper.Starts()[box1], dim_helper.Ends()[box2], - /*ub=unused*/ 0); - if (expr == expr2) { - if (box1 < box2) { - non_trivial_pairs_.push_back({box1, box2}); - } else { - non_trivial_pairs_.push_back({box2, box1}); - } - } + AddOrUpdateDataForPairOfBoxes(box1, box2); } } } } } +} - // Sort the new pairs. - std::sort(non_trivial_pairs_.begin() + previous_num_pairs, - non_trivial_pairs_.end()); - - // Remove duplicates from new pairs. - non_trivial_pairs_.erase( - std::unique(non_trivial_pairs_.begin() + previous_num_pairs, - non_trivial_pairs_.end()), - non_trivial_pairs_.end()); - - // Merge with the old pairs keeping sorted. - std::inplace_merge(non_trivial_pairs_.begin(), - non_trivial_pairs_.begin() + previous_num_pairs, - non_trivial_pairs_.end()); - - // Remove newly-added duplicates. - non_trivial_pairs_.erase( - std::unique(non_trivial_pairs_.begin(), non_trivial_pairs_.end()), - non_trivial_pairs_.end()); - - // Result should be sorted and without duplicates. - DCHECK(std::is_sorted(non_trivial_pairs_.begin(), non_trivial_pairs_.end())); - DCHECK(std::adjacent_find(non_trivial_pairs_.begin(), - non_trivial_pairs_.end()) == - non_trivial_pairs_.end()); +IntegerValue Precedences2DPropagator::UpperBound( + std::variant linear2) const { + if (std::holds_alternative(linear2)) { + return linear2_bounds_->UpperBound( + std::get(linear2)); + } else { + return integer_trail_->UpperBound(std::get(linear2)); + } } bool Precedences2DPropagator::Propagate() { @@ -147,7 +151,8 @@ bool Precedences2DPropagator::Propagate() { last_helper_inprocessing_count_ = helper_.InProcessingCount(); UpdateVarLookups(); num_known_linear2_ = 0; - non_trivial_pairs_.clear(); + non_trivial_pairs_index_.clear(); + pair_data_.clear(); } CollectNewPairsOfBoxesWithNonTrivialDistance(); @@ -156,7 +161,9 @@ bool Precedences2DPropagator::Propagate() { SchedulingConstraintHelper* helpers[2] = {&helper_.x_helper(), &helper_.y_helper()}; - for (const auto& [box1, box2] : non_trivial_pairs_) { + for (const PairData& pair_data : pair_data_) { + const int box1 = pair_data.box1; + const int box2 = pair_data.box2; DCHECK(box1 < helper_.NumBoxes()); DCHECK(box2 < helper_.NumBoxes()); DCHECK_NE(box1, box2); @@ -166,16 +173,10 @@ bool Precedences2DPropagator::Propagate() { bool is_unfeasible = true; for (int dim = 0; dim < 2; dim++) { - const SchedulingConstraintHelper* helper = helpers[dim]; for (int j = 0; j < 2; j++) { - int b1 = box1; - int b2 = box2; - if (j == 1) { - std::swap(b1, b2); - } - const auto [expr, ub_for_no_overlap] = EncodeDifferenceLowerThan( - helper->Starts()[b1], helper->Ends()[b2], 0); - if (linear2_bounds_->UpperBound(expr) >= ub_for_no_overlap) { + const PairData::Condition& start_before_end = + pair_data.start_before_end[dim][j]; + if (UpperBound(start_before_end.linear2) >= start_before_end.ub) { is_unfeasible = false; break; } @@ -223,7 +224,7 @@ Precedences2DPropagator::~Precedences2DPropagator() { std::vector> stats; stats.push_back({"Precedences2DPropagator/called", num_calls_}); stats.push_back({"Precedences2DPropagator/conflicts", num_conflicts_}); - stats.push_back({"Precedences2DPropagator/pairs", non_trivial_pairs_.size()}); + stats.push_back({"Precedences2DPropagator/pairs", pair_data_.size()}); shared_stats_->AddStats(stats); } diff --git a/ortools/sat/2d_distances_propagator.h b/ortools/sat/2d_distances_propagator.h index 6c47f37f64..a5361ee702 100644 --- a/ortools/sat/2d_distances_propagator.h +++ b/ortools/sat/2d_distances_propagator.h @@ -16,6 +16,7 @@ #include #include +#include #include #include "absl/container/flat_hash_map.h" @@ -47,8 +48,26 @@ class Precedences2DPropagator : public PropagatorInterface { private: void CollectNewPairsOfBoxesWithNonTrivialDistance(); void UpdateVarLookups(); + IntegerValue UpperBound( + std::variant linear2) const; + void AddOrUpdateDataForPairOfBoxes(int box1, int box2); - std::vector> non_trivial_pairs_; + struct PairData { + // The condition must be true if ub(linear2) < ub. + struct Condition { + // If the expression is in the Linear2Indices it is represented by its + // index, otherwise it is represented by the expression itself. + std::variant linear2; + IntegerValue ub; + }; + + int box1; + int box2; + // start1_before_end2[0==x, 1==y][0=start_1_end_2, 1=start_2_end_1] + Condition start_before_end[2][2]; + }; + absl::flat_hash_map, int> non_trivial_pairs_index_; + std::vector pair_data_; struct VarUsage { // boxes[0=x, 1=y][0=start, 1=end] std::vector boxes[2][2]; @@ -60,7 +79,8 @@ class Precedences2DPropagator : public PropagatorInterface { Linear2Bounds* linear2_bounds_; Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; - Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + Linear2Indices* lin2_indices_; + IntegerTrail* integer_trail_; int last_helper_inprocessing_count_ = -1; int num_known_linear2_ = 0; diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 4dc706800d..a67fc0cd6f 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -3577,6 +3577,7 @@ cc_library( ":synchronization", ":timetable", ":util", + "//ortools/base:stl_util", "//ortools/util:bitset", "//ortools/util:saturated_arithmetic", "//ortools/util:strong_integers", diff --git a/ortools/sat/cp_model.cc b/ortools/sat/cp_model.cc index 30a888d3d8..f9db519ed9 100644 --- a/ortools/sat/cp_model.cc +++ b/ortools/sat/cp_model.cc @@ -125,7 +125,7 @@ IntVar IntVar::WithName(absl::string_view name) { std::string IntVar::Name() const { if (builder_ == nullptr) return "null"; - return builder_->Proto().variables(index_).name(); + return std::string(builder_->Proto().variables(index_).name()); } ::operations_research::Domain IntVar::Domain() const { @@ -619,7 +619,7 @@ BoolVar IntervalVar::PresenceBoolVar() const { std::string IntervalVar::Name() const { if (builder_ == nullptr) return "null"; - return builder_->Proto().constraints(index_).name(); + return std::string(builder_->Proto().constraints(index_).name()); } std::string IntervalVar::DebugString() const { diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 6d18fd1ea1..e6b29ef50b 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -188,6 +188,7 @@ void AddExtraSchedulingPropagators(SatParameters& new_params) { new_params.set_use_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_area_energetic_reasoning_in_no_overlap_2d(true); new_params.set_use_try_edge_reasoning_in_no_overlap_2d(true); + new_params.set_no_overlap_2d_boolean_relations_limit(100); } // We want a random tie breaking among variables with equivalent values. diff --git a/ortools/sat/cp_model_symmetries.cc b/ortools/sat/cp_model_symmetries.cc index 98a1ca6ab0..a93b81c067 100644 --- a/ortools/sat/cp_model_symmetries.cc +++ b/ortools/sat/cp_model_symmetries.cc @@ -1453,6 +1453,8 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { if (row_has_at_most_one_true[row]) { context->UpdateRuleStats( "symmetry: fixed all but one to false in orbitope row"); + context->solution_crush().MaybeSwapOrbitopeColumns( + orbitope, row, num_processed_rows - 1, true); for (int j = num_processed_rows; j < num_cols; ++j) { if (!context->SetLiteralToFalse(orbitope[row][j])) return false; } @@ -1460,6 +1462,8 @@ bool DetectAndExploitSymmetriesInPresolve(PresolveContext* context) { CHECK(row_has_at_most_one_false[row]); context->UpdateRuleStats( "symmetry: fixed all but one to true in orbitope row"); + context->solution_crush().MaybeSwapOrbitopeColumns( + orbitope, row, num_processed_rows - 1, false); for (int j = num_processed_rows; j < num_cols; ++j) { if (!context->SetLiteralToTrue(orbitope[row][j])) return false; } diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index 078de5eb92..fde8ee6a46 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -32,6 +32,7 @@ #include "absl/log/vlog_is_on.h" #include "absl/numeric/bits.h" #include "absl/types/span.h" +#include "ortools/base/stl_util.h" #include "ortools/sat/2d_distances_propagator.h" #include "ortools/sat/2d_mandatory_overlap_propagator.h" #include "ortools/sat/2d_orthogonal_packing.h" @@ -276,11 +277,11 @@ void AddNonOverlappingRectangles(const std::vector& x, DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); for (int i = 0; i < num_boxes; ++i) { - if (repository->IsOptional(x[i])) continue; - if (repository->IsOptional(y[i])) continue; + if (repository->IsAbsent(x[i])) continue; + if (repository->IsAbsent(y[i])) continue; for (int j = i + 1; j < num_boxes; ++j) { - if (repository->IsOptional(x[j])) continue; - if (repository->IsOptional(y[j])) continue; + if (repository->IsAbsent(x[j])) continue; + if (repository->IsAbsent(y[j])) continue; // At most one of these two x options is true. const Literal x_ij = repository->GetOrCreatePrecedenceLiteral( @@ -306,8 +307,22 @@ void AddNonOverlappingRectangles(const std::vector& x, return; } - // At least one of the 4 options is true. - if (!sat_solver->AddProblemClause({x_ij, x_ji, y_ij, y_ji})) { + // At least one of the 4 options is true if all boxes are present. + std::vector clause = {x_ij, x_ji, y_ij, y_ji}; + if (repository->IsOptional(x[i])) { + clause.push_back(repository->PresenceLiteral(x[i]).Negated()); + } + if (repository->IsOptional(y[i])) { + clause.push_back(repository->PresenceLiteral(y[i]).Negated()); + } + if (repository->IsOptional(x[j])) { + clause.push_back(repository->PresenceLiteral(x[j]).Negated()); + } + if (repository->IsOptional(y[j])) { + clause.push_back(repository->PresenceLiteral(y[j]).Negated()); + } + gtl::STLSortAndRemoveDuplicates(&clause); + if (!sat_solver->AddProblemClause(clause)) { return; } } diff --git a/ortools/sat/disjunctive.cc b/ortools/sat/disjunctive.cc index c018335fce..c475f64cc4 100644 --- a/ortools/sat/disjunctive.cc +++ b/ortools/sat/disjunctive.cc @@ -1222,8 +1222,6 @@ bool DisjunctivePrecedences::PropagateSubwindow() { // Note that like in Propagate() we split this set of task into critical // subpart as there is no point considering them together. // - // TODO(user): we should probably change the api to return a Span. - // // TODO(user): If more than one set of task push the same variable, we // probably only want to keep the best push? Maybe we want to process them // in reverse order of what we do here? @@ -1233,7 +1231,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { for (; global_i < size; ++global_i) { const EnforcedLinear2Bounds::PrecedenceData& data = before_[global_i]; if (data.var != var) break; - const int index = data.index; + const int index = data.var_index; const auto [t, start_of_t] = window_[index]; if (global_i == global_start_i) { // First loop. local_start = start_of_t; @@ -1242,7 +1240,7 @@ bool DisjunctivePrecedences::PropagateSubwindow() { if (start_of_t >= local_end) break; local_end += helper_->SizeMin(t); } - indices_before_.push_back(index); + indices_before_.push_back({index, data.lin2_index}); } // No need to consider if we don't have at least two tasks before var. @@ -1268,16 +1266,14 @@ bool DisjunctivePrecedences::PropagateSubwindow() { IntegerValue min_offset = kMaxIntegerValue; IntegerValue sum_of_duration = 0; for (int i = num_before; --i >= 0;) { - const TaskTime task_time = window_[indices_before_[i]]; + const auto [task_index, lin2_index] = indices_before_[i]; + const TaskTime task_time = window_[task_index]; const AffineExpression& end_exp = helper_->Ends()[task_time.task_index]; - // TODO(user): The hash lookup here is a bit slow, so we avoid fetching - // the offset as much as possible. Note that the alternative of storing it - // in PrecedenceData is not necessarily better and harder to update as we - // dive/backtrack. + // TODO(user): The lookup here is a bit slow, so we avoid fetching + // the offset as much as possible. const IntegerValue inner_offset = - -linear2_bounds_->NonTrivialUpperBoundForGcd1( - LinearExpression2::Difference(end_exp.var, var)); + -linear2_bounds_->NonTrivialUpperBound(lin2_index); DCHECK_NE(inner_offset, kMinIntegerValue); // We have var >= end_exp.var + inner_offset, so @@ -1320,10 +1316,10 @@ bool DisjunctivePrecedences::PropagateSubwindow() { DCHECK_NE(best_index, -1); helper_->ClearReason(); const IntegerValue window_start = - window_[indices_before_[best_index]].time; + window_[indices_before_[best_index].first].time; for (int i = best_index; i < num_before; ++i) { if (skip_[i]) continue; - const int ct = window_[indices_before_[i]].task_index; + const int ct = window_[indices_before_[i].first].task_index; helper_->AddPresenceReason(ct); helper_->AddEnergyAfterReason(ct, helper_->SizeMin(ct), window_start); diff --git a/ortools/sat/disjunctive.h b/ortools/sat/disjunctive.h index 8550fc2dd1..e50d022975 100644 --- a/ortools/sat/disjunctive.h +++ b/ortools/sat/disjunctive.h @@ -376,7 +376,7 @@ class DisjunctivePrecedences : public PropagatorInterface { FixedCapacityVector window_; FixedCapacityVector index_to_end_vars_; - FixedCapacityVector indices_before_; + FixedCapacityVector> indices_before_; std::vector skip_; std::vector before_; diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index 14e485fdad..1b926092e7 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -1379,11 +1379,14 @@ inline IntegerValue IntegerTrail::UpperBound(AffineExpression expr) const { } inline IntegerValue IntegerTrail::UpperBound(LinearExpression2 expr) const { - expr.SimpleCanonicalization(); IntegerValue result = 0; for (int i = 0; i < 2; ++i) { - if (expr.coeffs[i] != 0) { + if (expr.coeffs[i] == 0) { + continue; + } else if (expr.coeffs[i] > 0) { result += expr.coeffs[i] * UpperBound(expr.vars[i]); + } else { + result += expr.coeffs[i] * LowerBound(expr.vars[i]); } } return result; diff --git a/ortools/sat/integer_base.h b/ortools/sat/integer_base.h index ba4f04cdff..14aa492ced 100644 --- a/ortools/sat/integer_base.h +++ b/ortools/sat/integer_base.h @@ -41,6 +41,11 @@ namespace sat { // Callbacks that will be called when the search goes back to level 0. // Callbacks should return false if the propagation fails. +// +// We will call this after propagation has reached a fixed point. Note however +// that if any callbacks "propagate" something, the callbacks following it might +// not see a state where the propagation have been called again. +// TODO(user): maybe we should re-propagate before calling the next callback. struct LevelZeroCallbackHelper { std::vector> callbacks; }; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 5618fb304a..c4cef71f05 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -55,7 +55,7 @@ namespace operations_research { namespace sat { -LinearExpression2Index Linear2WithPotentialNonTrivalBounds::AddOrGet( +LinearExpression2Index Linear2Indices::AddOrGet( LinearExpression2 original_expr) { LinearExpression2 expr = original_expr; DCHECK(expr.IsCanonicalized()); @@ -75,22 +75,6 @@ LinearExpression2Index Linear2WithPotentialNonTrivalBounds::AddOrGet( if (!inserted) return result; - // Update our special coeff=1 lookup table. - if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { - // +2 to handle possible negation. - const int new_size = - std::max(expr.vars[0].value(), expr.vars[1].value()) + 2; - if (new_size > coeff_one_var_lookup_.size()) { - coeff_one_var_lookup_.resize(new_size); - } - LinearExpression2 neg_expr = original_expr; - neg_expr.Negate(); - coeff_one_var_lookup_[original_expr.vars[0]].push_back(result); - coeff_one_var_lookup_[original_expr.vars[1]].push_back(result); - coeff_one_var_lookup_[neg_expr.vars[1]].push_back(NegationOf(result)); - coeff_one_var_lookup_[neg_expr.vars[0]].push_back(NegationOf(result)); - } - // Update our per-variable and per-pair lookup tables. IntegerVariable var1 = PositiveVariable(expr.vars[0]); IntegerVariable var2 = PositiveVariable(expr.vars[1]); @@ -125,7 +109,7 @@ int64_t Linear2Watcher::VarTimestamp(IntegerVariable var) { bool RootLevelLinear2Bounds::AddUpperBound(LinearExpression2Index index, IntegerValue ub) { - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const LinearExpression2 expr = lin2_indices_->GetExpression(index); const IntegerValue zero_level_ub = integer_trail_->LevelZeroUpperBound(expr); if (ub >= zero_level_ub) { return false; @@ -141,6 +125,27 @@ bool RootLevelLinear2Bounds::AddUpperBound(LinearExpression2Index index, ++num_updates_; linear2_watcher_->NotifyBoundChanged(expr); + // Simple relations. + // + // TODO(user): Remove them each time we go back to level zero and they become + // trivially true ? + if (IntTypeAbs(expr.coeffs[0]) == 1 && IntTypeAbs(expr.coeffs[1]) == 1) { + if (index >= in_coeff_one_lookup_.size()) { + in_coeff_one_lookup_.resize(index + 1, false); + } + if (!in_coeff_one_lookup_[index]) { + const IntegerVariable a = + expr.coeffs[0] > 0 ? expr.vars[0] : NegationOf(expr.vars[0]); + const IntegerVariable b = + expr.coeffs[1] > 0 ? expr.vars[1] : NegationOf(expr.vars[1]); + + coeff_one_var_lookup_.resize(integer_trail_->NumIntegerVariables()); + in_coeff_one_lookup_[index] = true; + coeff_one_var_lookup_[a].push_back({b, index}); + coeff_one_var_lookup_[b].push_back({a, index}); + } + } + // Share. // // TODO(user): It seems we could change the canonicalization to only use @@ -170,6 +175,32 @@ bool RootLevelLinear2Bounds::AddUpperBound(LinearExpression2Index index, return true; } +// TODO(user): If we add an indexing for "coeff * var" this is kind of +// easy to generalize to affine relations, not just "simple one". +int RootLevelLinear2Bounds::AugmentSimpleRelations(IntegerVariable var, + int work_limit) { + var = PositiveVariable(var); + if (var >= coeff_one_var_lookup_.size()) return 0; + if (NegationOf(var) >= coeff_one_var_lookup_.size()) return 0; + + // Note that this never touches in_coeff_one_lookup_[var/NegationOf(var)], + // so it should be safe to iterate on it. + int work_done = 0; + for (const auto [a, a_index] : coeff_one_var_lookup_[var]) { + CHECK_NE(PositiveVariable(a), var); + const IntegerValue a_ub = best_upper_bounds_[a_index]; + for (const auto [b, b_index] : coeff_one_var_lookup_[NegationOf(var)]) { + if (PositiveVariable(b) == PositiveVariable(a)) continue; + CHECK_NE(PositiveVariable(b), var); + if (++work_done > work_limit) return work_done; + + const LinearExpression2 candidate{a, b, 1, 1}; + AddUpperBound(candidate, a_ub + best_upper_bounds_[b_index]); + } + } + return work_done; +} + RootLevelLinear2Bounds::~RootLevelLinear2Bounds() { if (!VLOG_IS_ON(1)) return; std::vector> stats; @@ -207,7 +238,7 @@ RootLevelLinear2Bounds::GetSortedNonTrivialUpperBounds() const { index < best_upper_bounds_.size(); ++index) { const IntegerValue ub = best_upper_bounds_[index]; if (ub == kMaxIntegerValue) continue; - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const LinearExpression2 expr = lin2_indices_->GetExpression(index); if (ub < integer_trail_->LevelZeroUpperBound(expr)) { result.push_back({expr, ub}); } @@ -221,10 +252,10 @@ RootLevelLinear2Bounds::GetAllBoundsContainingVariable( IntegerVariable var) const { std::vector> result; for (const LinearExpression2Index index : - non_trivial_bounds_->GetAllLinear2ContainingVariable(var)) { + lin2_indices_->GetAllLinear2ContainingVariable(var)) { const IntegerValue lb = -GetUpperBoundNoTrail(NegationOf(index)); const IntegerValue ub = GetUpperBoundNoTrail(index); - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const LinearExpression2 expr = lin2_indices_->GetExpression(index); const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); if (lb <= trail_lb && ub >= trail_ub) continue; @@ -256,10 +287,10 @@ RootLevelLinear2Bounds::GetAllBoundsContainingVariables( IntegerVariable var1, IntegerVariable var2) const { std::vector> result; for (const LinearExpression2Index index : - non_trivial_bounds_->GetAllLinear2ContainingVariables(var1, var2)) { + lin2_indices_->GetAllLinear2ContainingVariables(var1, var2)) { const IntegerValue lb = -GetUpperBoundNoTrail(NegationOf(index)); const IntegerValue ub = GetUpperBoundNoTrail(index); - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const LinearExpression2 expr = lin2_indices_->GetExpression(index); const IntegerValue trail_lb = integer_trail_->LevelZeroLowerBound(expr); const IntegerValue trail_ub = integer_trail_->LevelZeroUpperBound(expr); if (lb <= trail_lb && ub >= trail_ub) continue; @@ -287,25 +318,11 @@ RootLevelLinear2Bounds::GetAllBoundsContainingVariables( return result; } -std::vector +absl::Span> RootLevelLinear2Bounds::GetVariablesInSimpleRelation( IntegerVariable var) const { - std::vector result; - for (const LinearExpression2Index index : - non_trivial_bounds_->GetAllLinear2ContainingVariableWithCoeffOne(var)) { - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); - const IntegerVariable other = - (expr.vars[0] == var ? expr.vars[1] : expr.vars[0]); - DCHECK_EQ(expr.coeffs[0], 1); - DCHECK_EQ(expr.coeffs[1], 1); - DCHECK((expr.vars[0] == var && expr.vars[1] == other) || - (expr.vars[0] == other && expr.vars[1] == var)); - if (GetUpperBoundNoTrail(index) < - integer_trail_->LevelZeroUpperBound(expr)) { - result.push_back(other); - } - } - return result; + if (var >= coeff_one_var_lookup_.size()) return {}; + return coeff_one_var_lookup_[var]; } EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { @@ -317,7 +334,7 @@ EnforcedLinear2Bounds::~EnforcedLinear2Bounds() { } void EnforcedLinear2Bounds::PushConditionalRelation( - absl::Span enforcements, LinearExpression2Index index, + absl::Span enforcements, LinearExpression2Index lin2_index, IntegerValue rhs) { // This must be currently true. if (DEBUG_MODE) { @@ -327,24 +344,24 @@ void EnforcedLinear2Bounds::PushConditionalRelation( } if (enforcements.empty() || trail_->CurrentDecisionLevel() == 0) { - root_level_bounds_->AddUpperBound(index, rhs); + root_level_bounds_->AddUpperBound(lin2_index, rhs); return; } - if (rhs >= root_level_bounds_->LevelZeroUpperBound(index)) return; - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + if (rhs >= root_level_bounds_->LevelZeroUpperBound(lin2_index)) return; + const LinearExpression2 expr = lin2_indices_->GetExpression(lin2_index); linear2_watcher_->NotifyBoundChanged(expr); ++num_conditional_relation_updates_; const int new_index = conditional_stack_.size(); - if (conditional_relations_.size() <= index) { - conditional_relations_.resize(index.value() + 1, -1); + if (conditional_relations_.size() <= lin2_index) { + conditional_relations_.resize(lin2_index.value() + 1, -1); } - if (conditional_relations_[index] == -1) { - conditional_relations_[index] = new_index; + if (conditional_relations_[lin2_index] == -1) { + conditional_relations_[lin2_index] = new_index; CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, index, + conditional_stack_.emplace_back(/*prev_entry=*/-1, rhs, lin2_index, enforcements); if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { @@ -353,17 +370,19 @@ void EnforcedLinear2Bounds::PushConditionalRelation( if (new_size > conditional_var_lookup_.size()) { conditional_var_lookup_.resize(new_size); } - conditional_var_lookup_[expr.vars[0]].push_back(expr.vars[1]); - conditional_var_lookup_[expr.vars[1]].push_back(expr.vars[0]); + conditional_var_lookup_[expr.vars[0]].push_back( + {expr.vars[1], lin2_index}); + conditional_var_lookup_[expr.vars[1]].push_back( + {expr.vars[0], lin2_index}); } } else { - const int prev_entry = conditional_relations_[index]; + const int prev_entry = conditional_relations_[lin2_index]; if (rhs >= conditional_stack_[prev_entry].rhs) return; // Update. - conditional_relations_[index] = new_index; + conditional_relations_[lin2_index] = new_index; CreateLevelEntryIfNeeded(); - conditional_stack_.emplace_back(prev_entry, rhs, index, enforcements); + conditional_stack_.emplace_back(prev_entry, rhs, lin2_index, enforcements); } } @@ -387,12 +406,13 @@ void EnforcedLinear2Bounds::SetLevel(int level) { conditional_relations_[back.key] = back.prev_entry; } else { conditional_relations_[back.key] = -1; - const LinearExpression2 expr = - non_trivial_bounds_->GetExpression(back.key); + const LinearExpression2 expr = lin2_indices_->GetExpression(back.key); if (expr.coeffs[0] == 1 && expr.coeffs[1] == 1) { - DCHECK_EQ(conditional_var_lookup_[expr.vars[0]].back(), expr.vars[1]); - DCHECK_EQ(conditional_var_lookup_[expr.vars[1]].back(), expr.vars[0]); + DCHECK_EQ(conditional_var_lookup_[expr.vars[0]].back().first, + expr.vars[1]); + DCHECK_EQ(conditional_var_lookup_[expr.vars[1]].back().first, + expr.vars[0]); conditional_var_lookup_[expr.vars[0]].pop_back(); conditional_var_lookup_[expr.vars[1]].pop_back(); } @@ -444,9 +464,10 @@ IntegerValue EnforcedLinear2Bounds::GetUpperBoundFromEnforced( } } -void TransitivePrecedencesEvaluator::Build() { - if (is_built_) return; - is_built_ = true; +bool TransitivePrecedencesEvaluator::Build() { + const int64_t in_timestamp = root_level_bounds_->num_updates(); + if (in_timestamp <= build_timestamp_) return true; + build_timestamp_ = in_timestamp; const std::vector> root_relations_sorted = @@ -456,19 +477,25 @@ void TransitivePrecedencesEvaluator::Build() { max_node = std::max(max_node, PositiveVariable(expr.vars[0]).value()); max_node = std::max(max_node, PositiveVariable(expr.vars[1]).value()); } - max_node++; - if (max_node >= graph_.num_nodes()) { - graph_.AddNode(max_node); - } - const int num_nodes = graph_.num_nodes(); - util_intops::StrongVector> - before(num_nodes); + max_node++; // For negation. + + // Is it a DAG? + // Get a topological order of the DAG formed by all the arcs that are present. + // + // TODO(user): This can fail if we don't have a DAG. But in the end we + // don't really need a topological order, just something that is close to + // one so that we can compute an approximated transitive closure in O(n^2) and + // not O(n^3). We could use an heuristic instead, like as long as there is + // node with an in-degree of zero, add them to the order and update the + // in-degree of the other (by removing outgoing arcs). If there is a cycle + // (i.e. no node with no incoming arc), pick one with a small in-degree + // randomly. + DenseIntStableTopologicalSorter sorter(max_node); + for (const auto [expr, negated_offset] : root_relations_sorted) { + // Coefficients should be positive. + DCHECK_GT(expr.coeffs[0], 0); + DCHECK_GT(expr.coeffs[1], 0); - // We will construct a graph with the current relation from all_relations_. - // And use this to compute the "closure". - CHECK(arc_offsets_.empty()); - graph_.ReserveArcs(2 * root_relations_sorted.size()); - for (const auto [var_pair, negated_offset] : root_relations_sorted) { // TODO(user): Support negative offset? // // Note that if we only have >= 0 ones, if we do have a cycle, we could @@ -477,46 +504,14 @@ void TransitivePrecedencesEvaluator::Build() { const IntegerValue offset = -negated_offset; if (offset < 0) continue; - if (var_pair.coeffs[0] != 1 || var_pair.coeffs[1] != 1) { + if (expr.coeffs[0] != 1 || expr.coeffs[1] != 1) { // TODO(user): Support non-1 coefficients. continue; } // We have two arcs. - { - const IntegerVariable tail = var_pair.vars[0]; - const IntegerVariable head = NegationOf(var_pair.vars[1]); - graph_.AddArc(tail.value(), head.value()); - arc_offsets_.push_back(offset); - CHECK_LT(var_pair.vars[1], before.size()); - before[head].push_back(tail); - } - { - const IntegerVariable tail = var_pair.vars[1]; - const IntegerVariable head = NegationOf(var_pair.vars[0]); - graph_.AddArc(tail.value(), head.value()); - arc_offsets_.push_back(offset); - CHECK_LT(var_pair.vars[1], before.size()); - before[head].push_back(tail); - } - } - - std::vector permutation; - graph_.Build(&permutation); - util::Permute(permutation, &arc_offsets_); - - // Is it a DAG? - // Get a topological order of the DAG formed by all the arcs that are present. - // - // TODO(user): This can fail if we don't have a DAG. We could just skip Bad - // edges instead, and have a sub-DAG as an heuristic. Or analyze the arc - // weight and make sure cycle are not an issue. We can also start with arcs - // with strictly positive weight. - // - // TODO(user): Only explore the sub-graph reachable from "vars". - DenseIntStableTopologicalSorter sorter(num_nodes); - for (int arc = 0; arc < graph_.num_arcs(); ++arc) { - sorter.AddEdge(graph_.Tail(arc), graph_.Head(arc)); + sorter.AddEdge(expr.vars[0].value(), NegationOf(expr.vars[1]).value()); + sorter.AddEdge(expr.vars[1].value(), NegationOf(expr.vars[0]).value()); } int next; bool graph_has_cycle = false; @@ -525,57 +520,40 @@ void TransitivePrecedencesEvaluator::Build() { topological_order_.push_back(IntegerVariable(next)); if (graph_has_cycle) { is_dag_ = false; - return; + return true; } } is_dag_ = !graph_has_cycle; - // Lets build full precedences if we don't have too many of them. - // TODO(user): Also do that if we don't have a DAG? - if (!is_dag_) return; - - int work = 0; + // Lets get the transitive closure if it is cheap. This is also a way not to + // add too many relations (not more than 1e6) per call. + int total_work = 0; const int kWorkLimit = 1e6; - for (const IntegerVariable tail_var : topological_order_) { - if (++work > kWorkLimit) break; - for (const int arc : graph_.OutgoingArcs(tail_var.value())) { - DCHECK_EQ(tail_var.value(), graph_.Tail(arc)); - const IntegerVariable head_var = IntegerVariable(graph_.Head(arc)); - const IntegerValue arc_offset = arc_offsets_[arc]; - - if (++work > kWorkLimit) break; - if (root_level_bounds_->AddUpperBound( - LinearExpression2::Difference(tail_var, head_var), -arc_offset)) { - before[head_var].push_back(tail_var); - } - - for (const IntegerVariable before_var : before[tail_var]) { - if (++work > kWorkLimit) break; - const LinearExpression2 expr_for_key(before_var, tail_var, 1, -1); - const IntegerValue offset = - -root_level_bounds_->LevelZeroUpperBound(expr_for_key) + arc_offset; - if (root_level_bounds_->AddUpperBound( - LinearExpression2::Difference(before_var, head_var), -offset)) { - before[head_var].push_back(before_var); - } - } - } + for (const IntegerVariable var : topological_order_) { + const int work = root_level_bounds_->AugmentSimpleRelations( + var, kWorkLimit - total_work); + total_work += work; } - VLOG(2) << "Full precedences. Work=" << work - << " Relations=" << root_relations_sorted.size(); + build_timestamp_ = root_level_bounds_->num_updates(); + VLOG(2) << "Full precedences. Work=" << total_work + << " Relations=" << root_relations_sorted.size() + << " num_added=" << build_timestamp_ - in_timestamp; + return true; } +// TODO(user): There is probably little need for that function. For small +// problem, we already augment root_level_bounds_ will all the relation obtained +// by transitive closure, so this algo only need to look at direct dependency in +// root_level_bounds_->GetVariablesInSimpleRelation(). And for large graph, we +// probably do not want this. void TransitivePrecedencesEvaluator::ComputeFullPrecedences( absl::Span vars, std::vector* output) { output->clear(); - if (!is_built_) Build(); + Build(); // Will do nothing if we are up to date. if (!is_dag_) return; - VLOG(2) << "num_nodes: " << graph_.num_nodes() - << " num_arcs: " << graph_.num_arcs() << " is_dag: " << is_dag_; - // Compute all precedences. // We loop over the node in topological order, and we maintain for all // variable we encounter, the list of "to_consider" variables that are before. @@ -603,10 +581,12 @@ void TransitivePrecedencesEvaluator::ComputeFullPrecedences( } } - for (const int arc : graph_.OutgoingArcs(tail_var.value())) { - CHECK_EQ(tail_var.value(), graph_.Tail(arc)); - const IntegerVariable head_var = IntegerVariable(graph_.Head(arc)); - const IntegerValue arc_offset = arc_offsets_[arc]; + // We look for tail_var + offset <= head_var. + for (const auto [neg_head_var, index] : + root_level_bounds_->GetVariablesInSimpleRelation(tail_var)) { + const IntegerVariable head_var = NegationOf(neg_head_var); + const IntegerValue arc_offset = + -root_level_bounds_->GetUpperBoundNoTrail(index); // No need to create an empty entry in this case. if (tail_map.empty() && !to_consider.contains(tail_var)) continue; @@ -675,28 +655,31 @@ void EnforcedLinear2Bounds::CollectPrecedences( IntegerVariable* var_with_positive_degree = var_with_positive_degree_.data(); int* var_to_degree = var_to_degree_.data(); int* var_to_last_index = var_to_last_index_.data(); - const auto process = [&](int index, absl::Span v) { - for (const IntegerVariable other : v) { - const IntegerVariable after = NegationOf(other); - DCHECK_LT(after, needed_size); - if (var_to_degree[after.value()] == 0) { - var_with_positive_degree[num_relevants++] = after; - } else { - // We do not want duplicates. - if (var_to_last_index[after.value()] == index) continue; - } + const auto process = + [&](int var_index, + absl::Span> + v) { + for (const auto [other, lin2_index] : v) { + const IntegerVariable after = NegationOf(other); + DCHECK_LT(after, needed_size); + if (var_to_degree[after.value()] == 0) { + var_with_positive_degree[num_relevants++] = after; + } else { + // We do not want duplicates. + if (var_to_last_index[after.value()] == var_index) continue; + } - tmp_precedences_.push_back({after, index}); - var_to_degree[after.value()]++; - var_to_last_index[after.value()] = index; - } - }; + tmp_precedences_.push_back({after, var_index, lin2_index}); + var_to_degree[after.value()]++; + var_to_last_index[after.value()] = var_index; + } + }; - for (int index = 0; index < vars.size(); ++index) { - const IntegerVariable var = vars[index]; - process(index, root_level_bounds_->GetVariablesInSimpleRelation(var)); + for (int var_index = 0; var_index < vars.size(); ++var_index) { + const IntegerVariable var = vars[var_index]; + process(var_index, root_level_bounds_->GetVariablesInSimpleRelation(var)); if (var < conditional_var_lookup_.size()) { - process(index, conditional_var_lookup_[var]); + process(var_index, conditional_var_lookup_[var]); } } @@ -1802,89 +1785,88 @@ Linear2BoundsFromLinear3::Linear2BoundsFromLinear3(Model* model) watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()), - non_trivial_bounds_( - model->GetOrCreate()) {} + lin2_indices_(model->GetOrCreate()) {} // Note that for speed we do not compare to the trivial or root level bounds. // // It is okay to still store it in the hash-map, since at worst we will have no // more entries than 3 * number_of_linear3_in_the_problem. -bool Linear2BoundsFromLinear3::AddAffineUpperBound(LinearExpression2 expr, - AffineExpression affine_ub) { - expr.SimpleCanonicalization(); - if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; - +bool Linear2BoundsFromLinear3::AddAffineUpperBound( + LinearExpression2Index lin2_index, IntegerValue lin_expr_gcd, + AffineExpression affine_ub) { // At level zero, just add it to root_level_bounds_. - if (trail_->CurrentDecisionLevel() == 0) { + if (trail_->CurrentDecisionLevel() == 0 || affine_ub.IsConstant()) { root_level_bounds_->AddUpperBound( - expr, integer_trail_->LevelZeroUpperBound(affine_ub)); + lin2_index, FloorRatio(integer_trail_->LevelZeroUpperBound(affine_ub), + lin_expr_gcd)); return false; // Not important. } // We have gcd * canonical_expr <= affine_ub, // so we do need to store a "divisor". - const IntegerValue divisor = expr.DivideByGcd(); - auto it = best_affine_ub_.find(expr); - if (it != best_affine_ub_.end()) { + if (lin2_index >= best_affine_ub_.size()) { + best_affine_ub_.resize(lin2_index.value() + 1, {AffineExpression(), 0}); + } + auto& [old_affine_ub, old_divisor] = best_affine_ub_[lin2_index]; + if (old_divisor != 0) { // We have an affine bound for this expr in the map. Can be exactly the // same, a better one or a worse one. // // Note that we expect exactly the same most of the time as it should be // rare to have many linear3 "competing" for the same linear2 bound. - const auto [old_affine_ub, old_divisor] = it->second; - if (old_affine_ub == affine_ub && old_divisor == divisor) { - linear2_watcher_->NotifyBoundChanged(expr); + if (old_affine_ub == affine_ub && old_divisor == lin_expr_gcd) { + linear2_watcher_->NotifyBoundChanged( + lin2_indices_->GetExpression(lin2_index)); return false; } const IntegerValue new_ub = - FloorRatioWithTest(integer_trail_->UpperBound(affine_ub), divisor); + FloorRatioWithTest(integer_trail_->UpperBound(affine_ub), lin_expr_gcd); const IntegerValue old_ub = FloorRatioWithTest( integer_trail_->UpperBound(old_affine_ub), old_divisor); if (old_ub <= new_ub) return false; // old bound is better. - it->second = {affine_ub, divisor}; // Overwrite. + best_affine_ub_[lin2_index] = {affine_ub, lin_expr_gcd}; // Overwrite. } else { // Note that this should almost never happen (only once per lin2). - non_trivial_bounds_->AddOrGet(expr); - best_affine_ub_[expr] = {affine_ub, divisor}; + best_affine_ub_[lin2_index] = {affine_ub, lin_expr_gcd}; } ++num_affine_updates_; - linear2_watcher_->NotifyBoundChanged(expr); + linear2_watcher_->NotifyBoundChanged( + lin2_indices_->GetExpression(lin2_index)); return true; } IntegerValue Linear2BoundsFromLinear3::GetUpperBoundFromLinear3( - LinearExpression2 expr) const { - DCHECK_EQ(expr.DivideByGcd(), 1); - DCHECK(expr.IsCanonicalized()); - const auto it = best_affine_ub_.find(expr); - if (it == best_affine_ub_.end()) { - return kMaxIntegerValue; - } else { - const auto [affine, divisor] = it->second; - return FloorRatio(integer_trail_->UpperBound(affine), divisor); - } + LinearExpression2Index lin2_index) const { + if (lin2_index >= best_affine_ub_.size()) return kMaxIntegerValue; + auto [affine, divisor] = best_affine_ub_[lin2_index]; + if (divisor == 0) return kMaxIntegerValue; + return FloorRatio(integer_trail_->UpperBound(affine), divisor); } void Linear2BoundsFromLinear3::AddReasonForUpperBoundLowerThan( - LinearExpression2 expr, IntegerValue ub, + LinearExpression2Index lin2_index, IntegerValue ub, std::vector* /*literal_reason*/, std::vector* integer_reason) const { - DCHECK(expr.IsCanonicalized()); - DCHECK_EQ(expr.DivideByGcd(), 1); - DCHECK_LE(GetUpperBoundFromLinear3(expr), ub); - - const auto it = best_affine_ub_.find(expr); - DCHECK(it != best_affine_ub_.end()); + DCHECK_LE(GetUpperBoundFromLinear3(lin2_index), ub); + DCHECK_LT(lin2_index, best_affine_ub_.size()); // We want the reason for "expr <= ub" // knowing that expr <= affine / divisor. - const auto [affine, divisor] = it->second; + const auto [affine, divisor] = best_affine_ub_[lin2_index]; + DCHECK_NE(divisor, 0); integer_reason->push_back(affine.LowerOrEqual(CapProdI(ub + 1, divisor) - 1)); } +IntegerValue Linear2Bounds::UpperBound( + LinearExpression2Index lin2_index) const { + return std::min( + NonTrivialUpperBound(lin2_index), + integer_trail_->UpperBound(lin2_indices_->GetExpression(lin2_index))); +} + IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { expr.SimpleCanonicalization(); if (expr.coeffs[0] == 0) { @@ -1893,33 +1875,15 @@ IntegerValue Linear2Bounds::UpperBound(LinearExpression2 expr) const { DCHECK_NE(expr.coeffs[1], 0); const IntegerValue gcd = expr.DivideByGcd(); IntegerValue ub = integer_trail_->UpperBound(expr); - const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); + const LinearExpression2Index index = lin2_indices_->GetIndex(expr); if (index != kNoLinearExpression2Index) { ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(index)); ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(index)); + ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(index)); } - ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); return CapProdI(gcd, ub); } -IntegerValue Linear2Bounds::NonTrivialUpperBoundForGcd1( - LinearExpression2 expr) const { - expr.SimpleCanonicalization(); - if (expr.coeffs[0] == 0) { - return integer_trail_->UpperBound(expr); - } - DCHECK_NE(expr.coeffs[1], 0); - DCHECK_EQ(1, expr.DivideByGcd()); - IntegerValue ub = kMaxIntegerValue; - const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); - if (index != kNoLinearExpression2Index) { - ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(index)); - ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(index)); - } - ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(expr)); - return ub; -} - void Linear2Bounds::AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, @@ -1933,22 +1897,22 @@ void Linear2Bounds::AddReasonForUpperBoundLowerThan( expr.SimpleCanonicalization(); const IntegerValue gcd = expr.DivideByGcd(); ub = FloorRatio(ub, gcd); - const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); - // This one is a single literal. + const LinearExpression2Index index = lin2_indices_->GetIndex(expr); if (index != kNoLinearExpression2Index) { + // No reason. if (root_level_bounds_->GetUpperBoundNoTrail(index) <= ub) { return; } + // This one is a single literal. if (enforced_bounds_->GetUpperBoundFromEnforced(index) <= ub) { return enforced_bounds_->AddReasonForUpperBoundLowerThan( index, ub, literal_reason, integer_reason); } - } - - // This one is a single var upper bound. - if (linear3_bounds_->GetUpperBoundFromLinear3(expr) <= ub) { - return linear3_bounds_->AddReasonForUpperBoundLowerThan( - expr, ub, literal_reason, integer_reason); + // This one is a single var upper bound. + if (linear3_bounds_->GetUpperBoundFromLinear3(index) <= ub) { + return linear3_bounds_->AddReasonForUpperBoundLowerThan( + index, ub, literal_reason, integer_reason); + } } // Trivial linear2 bounds from its variables. diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 586b28dd89..2b56462758 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -65,9 +65,9 @@ inline LinearExpression2Index PositiveLinear2(LinearExpression2Index i) { // list, it does not necessarily mean that it has a non-trivial bound, but the // converse is true: if a linear2 is not in the list, // Linear2Bounds::GetUpperBound() will return a trivial bound. -class Linear2WithPotentialNonTrivalBounds { +class Linear2Indices { public: - Linear2WithPotentialNonTrivalBounds() = default; + Linear2Indices() = default; // Returns a never-changing index for the given linear expression. // The expression must already be canonicalized and divided by its GCD. @@ -84,8 +84,7 @@ class Linear2WithPotentialNonTrivalBounds { // bound. When calling this code it is often a good idea to check both the // expression on the span and its negation. The order is fixed forever and // this span can only grow by appending new expressions. - absl::Span GetLinear2WithPotentialNonTrivalBounds() - const { + absl::Span GetStoredLinear2Indices() const { return exprs_; } @@ -99,27 +98,10 @@ class Linear2WithPotentialNonTrivalBounds { absl::Span GetAllLinear2ContainingVariables( IntegerVariable var1, IntegerVariable var2) const; - // For a given variable `var`, return all linear expressions with both - // coefficients 1 that have a potentially non trivial upper bound. For - // convenience it also returns the other variable to cheaply build the - // linear2. Note that using negation one can also recover x + y >= lb and x - - // y <= ub. - absl::Span - GetAllLinear2ContainingVariableWithCoeffOne(IntegerVariable var) const { - if (var >= coeff_one_var_lookup_.size()) return {}; - return coeff_one_var_lookup_[var]; - } - private: std::vector exprs_; absl::flat_hash_map expr_to_index_; - // Lookup table to find all the LinearExpression2 with a given variable and - // having both coefficient 1. - util_intops::StrongVector> - coeff_one_var_lookup_; - // Map to implement GetAllBoundsContainingVariable(). absl::flat_hash_map> @@ -165,8 +147,7 @@ class RootLevelLinear2Bounds { : integer_trail_(model->GetOrCreate()), linear2_watcher_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - non_trivial_bounds_( - model->GetOrCreate()), + lin2_indices_(model->GetOrCreate()), cp_model_mapping_(model->GetOrCreate()), shared_linear2_bounds_(model->Mutable()), shared_linear2_bounds_id_( @@ -182,9 +163,13 @@ class RootLevelLinear2Bounds { // more restricted than what was currently stored. std::pair Add(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) { + if (integer_trail_->LevelZeroUpperBound(expr) <= ub && + integer_trail_->LevelZeroLowerBound(expr) >= lb) { + return {false, false}; + } const bool negated = expr.CanonicalizeAndUpdateBounds(lb, ub); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return {false, false}; - const LinearExpression2Index index = non_trivial_bounds_->AddOrGet(expr); + const LinearExpression2Index index = lin2_indices_->AddOrGet(expr); bool ub_changed = AddUpperBound(index, ub); bool lb_changed = AddUpperBound(NegationOf(index), -lb); if (negated) { @@ -193,24 +178,26 @@ class RootLevelLinear2Bounds { return {lb_changed, ub_changed}; } - bool AddUpperBound(LinearExpression2Index index, IntegerValue ub); - // Same as above, but only update the upper bound. bool AddUpperBound(LinearExpression2 expr, IntegerValue ub) { + if (integer_trail_->LevelZeroUpperBound(expr) <= ub) return false; expr.SimpleCanonicalization(); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; const IntegerValue gcd = expr.DivideByGcd(); ub = FloorRatio(ub, gcd); - return AddUpperBound(non_trivial_bounds_->AddOrGet(expr), ub); + return AddUpperBound(lin2_indices_->AddOrGet(expr), ub); } + // All modifications go through this function. + bool AddUpperBound(LinearExpression2Index index, IntegerValue ub); + IntegerValue LevelZeroUpperBound(LinearExpression2 expr) const { expr.SimpleCanonicalization(); if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { return integer_trail_->LevelZeroUpperBound(expr); } const IntegerValue gcd = expr.DivideByGcd(); - const LinearExpression2Index index = non_trivial_bounds_->GetIndex(expr); + const LinearExpression2Index index = lin2_indices_->GetIndex(expr); if (index == kNoLinearExpression2Index) { return integer_trail_->LevelZeroUpperBound(expr); } @@ -218,7 +205,7 @@ class RootLevelLinear2Bounds { } IntegerValue LevelZeroUpperBound(LinearExpression2Index index) const { - const LinearExpression2 expr = non_trivial_bounds_->GetExpression(index); + const LinearExpression2 expr = lin2_indices_->GetExpression(index); // TODO(user): Remove the expression from the root_level_relations_ if // the zero-level bound got more restrictive. return std::min(integer_trail_->LevelZeroUpperBound(expr), @@ -250,8 +237,15 @@ class RootLevelLinear2Bounds { // For a given variable `var`, return all variables `other` so that // LinearExpression2(var, other, 1, 1) has a non trivial upper bound. // Note that using negation one can also recover x + y >= lb and x - y <= ub. - std::vector GetVariablesInSimpleRelation( - IntegerVariable var) const; + absl::Span> + GetVariablesInSimpleRelation(IntegerVariable var) const; + + // For all pairs of relation 'a + var <= x' and 'neg(var) + b <= y' try to add + // 'a + b <= x + y' if that relation is better. + // + // This can be quadratic. Returns the amount of "work" done, and abort if + // we reach the limit. This uses GetVariablesInSimpleRelation(). + int AugmentSimpleRelations(IntegerVariable var, int work_limit); RelationStatus GetLevelZeroStatus(LinearExpression2 expr, IntegerValue lb, IntegerValue ub) const; @@ -263,11 +257,13 @@ class RootLevelLinear2Bounds { // canonicalized and gcd-reduced. IntegerValue GetUpperBoundNoTrail(LinearExpression2Index index) const; + int64_t num_updates() const { return num_updates_; } + private: IntegerTrail* integer_trail_; Linear2Watcher* linear2_watcher_; SharedStatistics* shared_stats_; - Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + Linear2Indices* lin2_indices_; CpModelMapping* cp_model_mapping_; SharedLinear2Bounds* shared_linear2_bounds_; // Might be nullptr. @@ -276,6 +272,14 @@ class RootLevelLinear2Bounds { util_intops::StrongVector best_upper_bounds_; + // coeff_one_var_lookup_[var] contains all the other_var such that we have a + // linear2 relation var + other_var <= ub. We also store that relation index. + util_intops::StrongVector in_coeff_one_lookup_; + util_intops::StrongVector< + IntegerVariable, + std::vector>> + coeff_one_var_lookup_; + int64_t num_updates_ = 0; }; @@ -288,15 +292,17 @@ struct FullIntegerPrecedence { // This class is used to compute the transitive closure of the level-zero // precedence relations. // -// TODO(user): Support conditional relation. // TODO(user): Support non-DAG like graph. -// TODO(user): Support variable offset that can be updated as search progress. class TransitivePrecedencesEvaluator { public: explicit TransitivePrecedencesEvaluator(Model* model) : integer_trail_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - root_level_bounds_(model->GetOrCreate()) {} + root_level_bounds_(model->GetOrCreate()) { + // Call Build() each time we go back to level zero. + model->GetOrCreate()->callbacks.push_back( + [this]() { return Build(); }); + } // Returns a set of relations var >= max_i(vars[index[i]] + offsets[i]). // @@ -325,23 +331,15 @@ class TransitivePrecedencesEvaluator { // two variables. This can be used to optimize some scheduling propagation and // reasons. // - // Warning: If there are too many, this will NOT contain all relations. - // - // Returns kMaxIntegerValue if there are none, otherwise return an upper bound - // such that expr <= ub. - // - // TODO(user): Be more dynamic as we start to add relations during search. - void Build(); + // Warning: If there are too many, this will NOT push all relations. + bool Build(); private: IntegerTrail* integer_trail_; SharedStatistics* shared_stats_; RootLevelLinear2Bounds* root_level_bounds_; - util::StaticGraph<> graph_; - std::vector arc_offsets_; - - bool is_built_ = false; + int64_t build_timestamp_ = -1; bool is_dag_ = false; std::vector topological_order_; }; @@ -357,8 +355,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { linear2_watcher_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), - non_trivial_bounds_( - model->GetOrCreate()) { + lin2_indices_(model->GetOrCreate()) { integer_trail_->RegisterReversibleClass(this); } @@ -384,19 +381,19 @@ class EnforcedLinear2Bounds : public ReversibleInterface { if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return; const IntegerValue gcd = expr.DivideByGcd(); rhs = FloorRatio(rhs, gcd); - return PushConditionalRelation(enforcements, - non_trivial_bounds_->AddOrGet(expr), rhs); + return PushConditionalRelation(enforcements, lin2_indices_->AddOrGet(expr), + rhs); } // Called each time we change decision level. void SetLevel(int level) final; - // Returns a set of precedences (var, index) such that we have a relation - // of the form var[index] <= var + offset. + // Returns a set of precedences such that we have a relation + // of the form vars[index] <= var + offset. // // All entries for the same variable will be contiguous and sorted by index. - // We only list variable with at least two entries. The offset can be - // retrieved via Linear2Bounds::UpperBound(Difference(vars[index]), var)). + // We only list variable with at least two entries. The up to date offset can + // be retrieved later via Linear2Bounds::UpperBound(lin2_index). // // This method currently ignores all linear2 expressions with any coefficient // different from 1. @@ -405,7 +402,8 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // with other kind of precedences. struct PrecedenceData { IntegerVariable var; - int index; + int var_index; + LinearExpression2Index lin2_index; }; void CollectPrecedences(absl::Span vars, std::vector* output); @@ -429,7 +427,7 @@ class EnforcedLinear2Bounds : public ReversibleInterface { Linear2Watcher* linear2_watcher_; RootLevelLinear2Bounds* root_level_bounds_; SharedStatistics* shared_stats_; - Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + Linear2Indices* lin2_indices_; int64_t num_conditional_relation_updates_ = 0; @@ -457,7 +455,9 @@ class EnforcedLinear2Bounds : public ReversibleInterface { // Store for each variable x, the variables y that appears alongside it in // lit => x + y <= ub. Note that conditional_var_lookup_ is updated on // dive/backtrack. - util_intops::StrongVector> + util_intops::StrongVector< + IntegerVariable, + std::vector>> conditional_var_lookup_; // Temp data for CollectPrecedences. @@ -551,14 +551,24 @@ class Linear2BoundsFromLinear3 { // If the given upper bound evaluate better than the current one we have, this // will replace it and returns true, otherwise it returns false. - bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub); + bool AddAffineUpperBound(LinearExpression2Index lin2_index, + IntegerValue lin_expr_gcd, + AffineExpression affine_ub); + + bool AddAffineUpperBound(LinearExpression2 expr, AffineExpression affine_ub) { + expr.SimpleCanonicalization(); + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) return false; + const IntegerValue gcd = expr.DivideByGcd(); + return AddAffineUpperBound(lin2_indices_->AddOrGet(expr), gcd, affine_ub); + } // Most users should just use Linear2Bounds::UpperBound() instead. // // Returns the upper bound only if there is some relations coming from a // linear3. Otherwise always returns kMaxIntegerValue. // `expr` must be canonicalized and gcd-reduced. - IntegerValue GetUpperBoundFromLinear3(LinearExpression2 expr) const; + IntegerValue GetUpperBoundFromLinear3( + LinearExpression2Index lin2_index) const; // Most users should use Linear2Bounds::AddReasonForUpperBoundLowerThan() // instead. @@ -566,7 +576,7 @@ class Linear2BoundsFromLinear3 { // Adds the reason for GetUpperBoundFromLinear3() to be <= ub. // `expr` must be canonicalized and gcd-reduced. void AddReasonForUpperBoundLowerThan( - LinearExpression2 expr, IntegerValue ub, + LinearExpression2Index lin2_index, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const; @@ -577,7 +587,7 @@ class Linear2BoundsFromLinear3 { GenericLiteralWatcher* watcher_; SharedStatistics* shared_stats_; RootLevelLinear2Bounds* root_level_bounds_; - Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + Linear2Indices* lin2_indices_; int64_t num_affine_updates_ = 0; @@ -587,8 +597,8 @@ class Linear2BoundsFromLinear3 { // we have many possible AffineExpression that bounds a LinearExpression2, we // keep the best one during "search dive" but on backtrack we might have a // sub-optimal relation. - absl::flat_hash_map> + util_intops::StrongVector> best_affine_ub_; }; @@ -640,28 +650,29 @@ class Linear2Bounds { root_level_bounds_(model->GetOrCreate()), enforced_bounds_(model->GetOrCreate()), linear3_bounds_(model->GetOrCreate()), - non_trivial_bounds_( - model->GetOrCreate()) {} + lin2_indices_(model->GetOrCreate()) {} // Returns the best known upper-bound of the given LinearExpression2 at the // current decision level. If its explanation is needed, it can be queried // with the second function. IntegerValue UpperBound(LinearExpression2 expr) const; + IntegerValue UpperBound(LinearExpression2Index lin2_index) const; + void AddReasonForUpperBoundLowerThan( LinearExpression2 expr, IntegerValue ub, std::vector* literal_reason, std::vector* integer_reason) const; - // Like UpperBound(), but optimized for the case of gcd == 1 and when we - // don't want the trivial bounds. - IntegerValue NonTrivialUpperBoundForGcd1(LinearExpression2 expr) const; + // Like UpperBound() but do not consider the bounds coming from + // the individual variable bounds. This is faster. + IntegerValue NonTrivialUpperBound(LinearExpression2Index lin2_index) const; private: IntegerTrail* integer_trail_; RootLevelLinear2Bounds* root_level_bounds_; EnforcedLinear2Bounds* enforced_bounds_; Linear2BoundsFromLinear3* linear3_bounds_; - Linear2WithPotentialNonTrivalBounds* non_trivial_bounds_; + Linear2Indices* lin2_indices_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the @@ -929,6 +940,17 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface { int64_t num_enforcement_pushes_ = 0; }; +// This can be in a hot-loop, so we want to inline it if possible. +inline IntegerValue Linear2Bounds::NonTrivialUpperBound( + LinearExpression2Index lin2_index) const { + CHECK_NE(lin2_index, kNoLinearExpression2Index); + IntegerValue ub = kMaxIntegerValue; + ub = std::min(ub, root_level_bounds_->GetUpperBoundNoTrail(lin2_index)); + ub = std::min(ub, enforced_bounds_->GetUpperBoundFromEnforced(lin2_index)); + ub = std::min(ub, linear3_bounds_->GetUpperBoundFromLinear3(lin2_index)); + return ub; +} + // ============================================================================= // Implementation of the small API functions below. // ============================================================================= @@ -1021,8 +1043,11 @@ inline std::function ConditionalLowerOrEqualWithOffset( }; } -inline LinearExpression2Index Linear2WithPotentialNonTrivalBounds::GetIndex( +inline LinearExpression2Index Linear2Indices::GetIndex( LinearExpression2 expr) const { + if (expr.coeffs[0] == 0 || expr.coeffs[1] == 0) { + return kNoLinearExpression2Index; + } DCHECK(expr.IsCanonicalized()); DCHECK_EQ(expr.DivideByGcd(), 1); const bool negated = expr.NegateForCanonicalization(); @@ -1037,7 +1062,7 @@ inline LinearExpression2Index Linear2WithPotentialNonTrivalBounds::GetIndex( } } -inline LinearExpression2 Linear2WithPotentialNonTrivalBounds::GetExpression( +inline LinearExpression2 Linear2Indices::GetExpression( LinearExpression2Index index) const { DCHECK_NE(index, kNoLinearExpression2Index); const int lookup_index = index.value() / 2; @@ -1052,8 +1077,7 @@ inline LinearExpression2 Linear2WithPotentialNonTrivalBounds::GetExpression( } inline absl::Span -Linear2WithPotentialNonTrivalBounds::GetAllLinear2ContainingVariable( - IntegerVariable var) const { +Linear2Indices::GetAllLinear2ContainingVariable(IntegerVariable var) const { const IntegerVariable positive_var = PositiveVariable(var); auto it = var_to_bounds_.find(positive_var); if (it == var_to_bounds_.end()) return {}; @@ -1061,8 +1085,8 @@ Linear2WithPotentialNonTrivalBounds::GetAllLinear2ContainingVariable( } inline absl::Span -Linear2WithPotentialNonTrivalBounds::GetAllLinear2ContainingVariables( - IntegerVariable var1, IntegerVariable var2) const { +Linear2Indices::GetAllLinear2ContainingVariables(IntegerVariable var1, + IntegerVariable var2) const { IntegerVariable positive_var1 = PositiveVariable(var1); IntegerVariable positive_var2 = PositiveVariable(var2); if (positive_var1 > positive_var2) { diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index 159469f659..715be1b237 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -190,8 +190,7 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { auto* lin2_bounds = model.GetOrCreate(); auto* integer_trail = model.GetOrCreate(); auto* precedences = model.GetOrCreate(); - auto* non_trivial_bounds = - model.GetOrCreate(); + auto* lin2_indices = model.GetOrCreate(); const std::vector vars = AddVariables(integer_trail); const Literal l(model.Add(NewBooleanVariable()), true); @@ -210,7 +209,7 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { std::vector literal_reason; std::vector integer_reason; precedences->AddReasonForUpperBoundLowerThan( - non_trivial_bounds->AddOrGet(expr_a_plus_b), 15, &literal_reason, + lin2_indices->AddOrGet(expr_a_plus_b), 15, &literal_reason, &integer_reason); EXPECT_THAT(literal_reason, ElementsAre(l.Negated())); @@ -220,8 +219,8 @@ TEST(EnforcedLinear2BoundsTest, ConditionalRelations) { literal_reason.clear(); integer_reason.clear(); precedences->AddReasonForUpperBoundLowerThan( - non_trivial_bounds->AddOrGet(expr_a_plus_b), kMaxIntegerValue, - &literal_reason, &integer_reason); + lin2_indices->AddOrGet(expr_a_plus_b), kMaxIntegerValue, &literal_reason, + &integer_reason); EXPECT_THAT(literal_reason, IsEmpty()); } @@ -515,7 +514,7 @@ TEST(EnforcedLinear2BoundsTest, CollectPrecedences) { std::vector indices; std::vector variables; for (const auto precedence : p) { - indices.push_back(precedence.index); + indices.push_back(precedence.var_index); variables.push_back(precedence.var); } EXPECT_EQ(indices, (std::vector{1, 2})); diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 10e57c7657..e077cc3141 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -151,7 +151,9 @@ class ResponseWrapper { return CpSatHelper::SolverResponseStats(response_); } - std::string SolutionInfo() const { return response_.solution_info(); } + std::string SolutionInfo() const { + return std::string(response_.solution_info()); + } std::vector SufficientAssumptionsForInfeasibility() const { return std::vector( diff --git a/ortools/sat/sat_decision.h b/ortools/sat/sat_decision.h index 371c98223d..a65068a99d 100644 --- a/ortools/sat/sat_decision.h +++ b/ortools/sat/sat_decision.h @@ -107,7 +107,8 @@ class SatDecisionPolicy { } // Like SetAssignmentPreference() but it can be overridden by phase-saving. - void SetTargetPolarity(Literal l) { + void SetTargetPolarityIfUnassigned(Literal l) { + if (trail_.Assignment().VariableIsAssigned(l.Variable())) return; has_target_polarity_[l.Variable()] = true; target_polarity_[l.Variable()] = var_polarity_[l.Variable()] = l.IsPositive(); diff --git a/ortools/sat/sat_decision_test.cc b/ortools/sat/sat_decision_test.cc index ff90d70772..7f246e68ec 100644 --- a/ortools/sat/sat_decision_test.cc +++ b/ortools/sat/sat_decision_test.cc @@ -104,7 +104,7 @@ TEST(SatDecisionPolicyTest, SetTargetPolarityInStablePhase) { decision->IncreaseNumVariables(num_variables); for (int i = 0; i < num_variables; ++i) { - decision->SetTargetPolarity(Literal(BooleanVariable(i), i % 2)); + decision->SetTargetPolarityIfUnassigned(Literal(BooleanVariable(i), i % 2)); } decision->SetStablePhase(true); @@ -125,7 +125,7 @@ TEST(SatDecisionPolicyTest, SetTargetPolarity) { decision->IncreaseNumVariables(num_variables); for (int i = 0; i < num_variables; ++i) { - decision->SetTargetPolarity(Literal(BooleanVariable(i), i % 2)); + decision->SetTargetPolarityIfUnassigned(Literal(BooleanVariable(i), i % 2)); } decision->SetStablePhase(false); diff --git a/ortools/sat/solution_crush.cc b/ortools/sat/solution_crush.cc index aa2f2a955f..cad64f1252 100644 --- a/ortools/sat/solution_crush.cc +++ b/ortools/sat/solution_crush.cc @@ -281,6 +281,40 @@ void SolutionCrush::MaybeUpdateVarWithSymmetriesToValue( DCHECK_EQ(GetVarValue(var), value); } +void SolutionCrush::MaybeSwapOrbitopeColumns( + absl::Span> orbitope, int row, int pivot_col, + bool value) { + if (!solution_is_loaded_) return; + int col = -1; + for (int c = 0; c < orbitope[row].size(); ++c) { + if (GetLiteralValue(orbitope[row][c]) == value) { + if (col != -1) { + VLOG(2) << "Multiple literals in row with given value"; + return; + } + col = c; + } + } + if (col < pivot_col) { + // Nothing to do. + return; + } + // Swap the value of the literals in column `col` with the value of the ones + // in column `pivot_col`, if they all have a value. + for (int i = 0; i < orbitope.size(); ++i) { + if (!HasValue(PositiveRef(orbitope[i][col]))) return; + if (!HasValue(PositiveRef(orbitope[i][pivot_col]))) return; + } + for (int i = 0; i < orbitope.size(); ++i) { + const int src_lit = orbitope[i][col]; + const int dst_lit = orbitope[i][pivot_col]; + const bool src_value = GetLiteralValue(src_lit); + const bool dst_value = GetLiteralValue(dst_lit); + SetLiteralValue(src_lit, dst_value); + SetLiteralValue(dst_lit, src_value); + } +} + void SolutionCrush::UpdateRefsWithDominance( int ref, int64_t min_value, int64_t max_value, absl::Span> dominating_refs) { diff --git a/ortools/sat/solution_crush.h b/ortools/sat/solution_crush.h index 34c10861cf..4412b34323 100644 --- a/ortools/sat/solution_crush.h +++ b/ortools/sat/solution_crush.h @@ -174,6 +174,13 @@ class SolutionCrush { int var, bool value, absl::Span> generators); + // If at most one literal in `orbitope[row]` is equal to `value`, and if this + // literal is in a column 'col' > `pivot_col`, swaps the value of all the + // literals in columns 'col' and `pivot_col` (if they all have a value). + // Otherwise does nothing. + void MaybeSwapOrbitopeColumns(absl::Span> orbitope, + int row, int pivot_col, bool value); + // Sets the value of the i-th variable in `vars` so that the given constraint // "dotproduct(coeffs, vars values) = rhs" is satisfied, if all the other // variables have a value. i is equal to `var_index` if set. Otherwise it is diff --git a/ortools/sat/work_assignment.cc b/ortools/sat/work_assignment.cc index 30fc977c0c..898c200c35 100644 --- a/ortools/sat/work_assignment.cc +++ b/ortools/sat/work_assignment.cc @@ -149,6 +149,7 @@ ProtoTrail::ProtoTrail() { target_phase_.reserve(kMaxPhaseSize); } void ProtoTrail::PushLevel(const ProtoLiteral& decision, IntegerValue objective_lb, int node_id) { CHECK_GT(node_id, 0); + assigned_at_level_[decision] = decision_indexes_.size(); decision_indexes_.push_back(literals_.size()); literals_.push_back(decision); node_ids_.push_back(node_id); @@ -165,14 +166,14 @@ void ProtoTrail::SetLevelImplied(int level) { DCHECK_LE(level, implications_.size()); SetObjectiveLb(level - 1, ObjectiveLb(level)); const ProtoLiteral decision = Decision(level); - implication_level_[decision] = level - 1; + assigned_at_level_[decision] = level - 1; // We don't store implications for level 0, so only move implications up to // the parent if we are removing level 2 or greater. if (level >= 2) { MutableImplications(level - 1).push_back(decision); } for (const ProtoLiteral& implication : Implications(level)) { - implication_level_[implication] = level - 1; + assigned_at_level_[implication] = level - 1; if (level >= 2) { MutableImplications(level - 1).push_back(implication); } @@ -190,7 +191,7 @@ void ProtoTrail::Clear() { level_to_objective_lbs_.clear(); node_ids_.clear(); target_phase_.clear(); - implication_level_.clear(); + assigned_at_level_.clear(); implications_.clear(); } @@ -778,6 +779,7 @@ bool SharedTreeWorker::ShouldReplaceSubtree() { } bool SharedTreeWorker::SyncWithSharedTree() { + DCHECK_EQ(trail_->CurrentDecisionLevel(), 0); manager_->SyncTree(assigned_tree_); if (ShouldReplaceSubtree()) { ++num_trees_; @@ -793,6 +795,8 @@ bool SharedTreeWorker::SyncWithSharedTree() { !decision_policy_->GetBestPartialAssignment().empty()) { assigned_tree_.ClearTargetPhase(); for (Literal lit : decision_policy_->GetBestPartialAssignment()) { + // Skip saving the phase for anything assigned at the root. + if (trail_->Assignment().LiteralIsAssigned(lit)) continue; // Only set the phase for booleans to avoid creating literals on other // workers. auto encoded = ProtoLiteral::EncodeLiteral(lit, mapping_); @@ -809,7 +813,7 @@ bool SharedTreeWorker::SyncWithSharedTree() { << assigned_tree_.TargetPhase().size(); decision_policy_->ClearBestPartialAssignment(); for (const ProtoLiteral& lit : assigned_tree_.TargetPhase()) { - decision_policy_->SetTargetPolarity(DecodeDecision(lit)); + decision_policy_->SetTargetPolarityIfUnassigned(DecodeDecision(lit)); } } } diff --git a/ortools/sat/work_assignment.h b/ortools/sat/work_assignment.h index 1626af4fee..980db80226 100644 --- a/ortools/sat/work_assignment.h +++ b/ortools/sat/work_assignment.h @@ -135,10 +135,10 @@ class ProtoTrail { // the decision. absl::Span Implications(int level) const; void AddImplication(int level, ProtoLiteral implication) { - auto it = implication_level_.find(implication); - if (it != implication_level_.end() && it->second <= level) return; + auto it = assigned_at_level_.find(implication); + if (it != assigned_at_level_.end() && it->second <= level) return; MutableImplications(level).push_back(implication); - implication_level_[implication] = level; + assigned_at_level_[implication] = level; } IntegerValue ObjectiveLb(int level) const { @@ -153,7 +153,7 @@ class ProtoTrail { // Appends a literal to the target phase, returns false if the phase is full. bool AddPhase(const ProtoLiteral& lit) { if (target_phase_.size() >= kMaxPhaseSize) return false; - if (!implication_level_.contains(lit)) { + if (!IsAssigned(lit)) { target_phase_.push_back(lit); } return true; @@ -164,6 +164,10 @@ class ProtoTrail { if (!AddPhase(lit)) break; } } + bool IsAssigned(const ProtoLiteral& lit) const { + return assigned_at_level_.contains(lit) || + assigned_at_level_.contains(lit.Negated()); + } private: // 256 ProtoLiterals take up 4KiB @@ -179,7 +183,7 @@ class ProtoTrail { // Extra implications that can be propagated at each level but were never // branches in the shared tree. std::vector> implications_; - absl::flat_hash_map implication_level_; + absl::flat_hash_map assigned_at_level_; // The index in the literals_/node_ids_ vectors for the start of each level. std::vector decision_indexes_; diff --git a/ortools/sat/work_assignment_test.cc b/ortools/sat/work_assignment_test.cc index f7d9b3ea55..29cc768b4d 100644 --- a/ortools/sat/work_assignment_test.cc +++ b/ortools/sat/work_assignment_test.cc @@ -39,6 +39,9 @@ TEST(ProtoTrailTest, PushLevel) { EXPECT_EQ(p.MaxLevel(), 1); EXPECT_EQ(p.Decision(1), ProtoLiteral(0, 0)); EXPECT_EQ(p.ObjectiveLb(1), 0); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(0, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(0, 0).Negated())); + EXPECT_FALSE(p.IsAssigned(ProtoLiteral(1, 0))); } TEST(ProtoTrailTest, AddImplications) { @@ -57,6 +60,12 @@ TEST(ProtoTrailTest, AddImplications) { EXPECT_THAT(p.Implications(2), testing::UnorderedElementsAre( ProtoLiteral(5, 0), ProtoLiteral(2, 0), ProtoLiteral(6, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(0, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(1, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(2, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(3, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(5, 0))); + EXPECT_TRUE(p.IsAssigned(ProtoLiteral(6, 0))); } TEST(ProtoTrailTest, SetLevel1Implied) { From 15824de47d3b5f6d7695eb0897a933ed9090120d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Jun 2025 17:07:39 +0200 Subject: [PATCH 123/509] math_opt: export from google3 * Protobuf::StringCopy LSC * absl_nonnull attribut --- ortools/math_opt/core/math_opt_proto_utils.cc | 20 +-- ortools/math_opt/core/model_summary.h | 2 +- ortools/math_opt/cpp/BUILD.bazel | 4 +- ortools/math_opt/cpp/callback.cc | 116 +++++++++++++++++- ortools/math_opt/cpp/callback.h | 27 +++- ortools/math_opt/cpp/message_callback.cc | 3 +- ortools/math_opt/cpp/model.cc | 6 +- ortools/math_opt/cpp/model.h | 6 +- ortools/math_opt/cpp/solve_result.cc | 2 +- .../cpp/streamable_solver_init_arguments.cc | 6 +- ortools/math_opt/elemental/elemental.cc | 8 +- .../elemental/elemental_export_model_test.cc | 16 +++ .../elemental_export_model_update_test.cc | 27 ++++ .../elemental/elemental_from_proto.cc | 3 +- ortools/math_opt/elemental/elemental_test.cc | 66 ++++++---- ortools/math_opt/io/proto_converter.cc | 6 +- .../solver_tests/ip_parameter_tests.cc | 9 +- ortools/math_opt/storage/model_storage.cc | 4 +- ortools/math_opt/storage/model_storage.h | 12 +- ortools/math_opt/storage/model_storage_item.h | 5 +- ortools/math_opt/storage/model_storage_v2.cc | 4 +- ortools/math_opt/storage/model_storage_v2.h | 4 +- 22 files changed, 280 insertions(+), 76 deletions(-) diff --git a/ortools/math_opt/core/math_opt_proto_utils.cc b/ortools/math_opt/core/math_opt_proto_utils.cc index ad93e55bc7..88392dfc34 100644 --- a/ortools/math_opt/core/math_opt_proto_utils.cc +++ b/ortools/math_opt/core/math_opt_proto_utils.cc @@ -162,7 +162,7 @@ TerminationProto TerminateForLimit(const LimitProto limit, const bool feasible, } result.set_limit(limit); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -182,7 +182,7 @@ TerminationProto TerminateForReason(const TerminationReasonProto reason, TerminationProto result; result.set_reason(reason); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -214,7 +214,7 @@ TerminationProto TerminateForReason(const bool is_maximize, FEASIBILITY_STATUS_UNDETERMINED); *result.mutable_objective_bounds() = MakeTrivialBounds(is_maximize); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -230,7 +230,7 @@ TerminationProto OptimalTerminationProto(const double finite_primal_objective, FEASIBILITY_STATUS_FEASIBLE); result.mutable_problem_status()->set_dual_status(FEASIBILITY_STATUS_FEASIBLE); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -245,7 +245,7 @@ TerminationProto UnboundedTerminationProto(const bool is_maximize, FEASIBILITY_STATUS_INFEASIBLE); *result.mutable_objective_bounds() = MakeUnboundedBounds(is_maximize); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -264,7 +264,7 @@ TerminationProto InfeasibleTerminationProto( result.objective_bounds().primal_bound()); } if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -308,7 +308,7 @@ TerminationProto LimitTerminationProto( result.mutable_objective_bounds()->set_dual_bound(dual_objective); result.set_limit(limit); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -338,7 +338,7 @@ TerminationProto NoSolutionFoundTerminationProto( } result.set_limit(limit); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -363,7 +363,7 @@ TerminationProto FeasibleTerminationProto( } result.set_limit(limit); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } @@ -381,7 +381,7 @@ TerminationProto InfeasibleOrUnboundedTerminationProto( } *result.mutable_objective_bounds() = MakeTrivialBounds(is_maximize); if (!detail.empty()) { - result.set_detail(std::string(detail)); + result.set_detail(detail); } return result; } diff --git a/ortools/math_opt/core/model_summary.h b/ortools/math_opt/core/model_summary.h index 061000751e..4ba922e211 100644 --- a/ortools/math_opt/core/model_summary.h +++ b/ortools/math_opt/core/model_summary.h @@ -256,7 +256,7 @@ absl::Status UpdateBiMapFromMappedData( } absl::c_sort(new_ids); for (const int64_t id : new_ids) { - RETURN_IF_ERROR(bimap.Insert(id, proto_map.at(id).name())); + RETURN_IF_ERROR(bimap.Insert(id, std::string(proto_map.at(id).name()))); } return absl::OkStatus(); } diff --git a/ortools/math_opt/cpp/BUILD.bazel b/ortools/math_opt/cpp/BUILD.bazel index 2a62a12df6..8319d01ce6 100644 --- a/ortools/math_opt/cpp/BUILD.bazel +++ b/ortools/math_opt/cpp/BUILD.bazel @@ -239,16 +239,16 @@ cc_library( deps = [ ":enums", ":map_filter", + ":model", ":sparse_containers", ":variable_and_expressions", "//ortools/base", - "//ortools/base:intops", "//ortools/base:protoutil", "//ortools/base:status_macros", "//ortools/math_opt:callback_cc_proto", "//ortools/math_opt:sparse_containers_cc_proto", - "//ortools/math_opt/core:sparse_vector_view", "//ortools/math_opt/storage:model_storage", + "//ortools/util:status_macros", "@abseil-cpp//absl/container:flat_hash_set", "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", diff --git a/ortools/math_opt/cpp/callback.cc b/ortools/math_opt/cpp/callback.cc index 3dcc2d2233..1608ea902b 100644 --- a/ortools/math_opt/cpp/callback.cc +++ b/ortools/math_opt/cpp/callback.cc @@ -16,9 +16,7 @@ #include #include #include -#include -#include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" @@ -26,14 +24,15 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/base/protoutil.h" -#include "ortools/base/strong_int.h" +#include "ortools/base/status_macros.h" #include "ortools/math_opt/callback.pb.h" -#include "ortools/math_opt/core/sparse_vector_view.h" #include "ortools/math_opt/cpp/map_filter.h" +#include "ortools/math_opt/cpp/model.h" #include "ortools/math_opt/cpp/sparse_containers.h" #include "ortools/math_opt/cpp/variable_and_expressions.h" #include "ortools/math_opt/sparse_containers.pb.h" #include "ortools/math_opt/storage/model_storage.h" +#include "ortools/util/status_macros.h" namespace operations_research { namespace math_opt { @@ -89,6 +88,73 @@ CallbackData::CallbackData(const ModelStorageCPtr storage, runtime = *maybe_time; } +absl::Status CallbackData::CheckModelStorage( + const ModelStorageCPtr expected_storage) const { + if (solution.has_value()) { + for (const auto& [v, _] : solution.value()) { + RETURN_IF_ERROR(internal::CheckModelStorage( + /*storage=*/v.storage(), /*expected_storage=*/expected_storage)) + << "invalid variable " << v << " in solution"; + } + } + return absl::OkStatus(); +} + +absl::StatusOr CallbackData::Proto() const { + CallbackDataProto proto; + proto.set_event(EnumToProto(event)); + *proto.mutable_presolve_stats() = presolve_stats; + *proto.mutable_simplex_stats() = simplex_stats; + *proto.mutable_barrier_stats() = barrier_stats; + *proto.mutable_mip_stats() = mip_stats; + if (solution.has_value()) { + *proto.mutable_primal_solution_vector() = + VariableValuesToProto(solution.value()); + } + OR_ASSIGN_OR_RETURN3(*proto.mutable_runtime(), + util_time::EncodeGoogleApiProto(runtime), + _ << "failed to encode runtime"); + return proto; +} + +absl::StatusOr CallbackRegistration::FromProto( + const Model& model, const CallbackRegistrationProto& registration_proto) { + CallbackRegistration result; + + // Parses `events`. + for (int e = 0; e < registration_proto.request_registration_size(); ++e) { + const CallbackEventProto event_proto = + registration_proto.request_registration(e); + const std::optional event = EnumFromProto(event_proto); + if (event == std::nullopt) { + return util::InvalidArgumentErrorBuilder() + << "value CallbackRegistrationProto.request_registration[" << e + << "] is CALLBACK_EVENT_UNSPECIFIED"; + } + if (!result.events.insert(event.value()).second) { + return util::InvalidArgumentErrorBuilder() + << "value " << event + << " is repeated at " + "CallbackRegistrationProto.request_registration[" + << e << "]"; + } + } + + OR_ASSIGN_OR_RETURN3( + result.mip_solution_filter, + VariableFilterFromProto(model, registration_proto.mip_solution_filter()), + _ << "invalid CallbackRegistrationProto.mip_solution_filter"); + OR_ASSIGN_OR_RETURN3( + result.mip_node_filter, + VariableFilterFromProto(model, registration_proto.mip_node_filter()), + _ << "invalid CallbackRegistrationProto.mip_node_filter"); + + result.add_cuts = registration_proto.add_cuts(); + result.add_lazy_constraints = registration_proto.add_lazy_constraints(); + + return result; +} + absl::Status CallbackRegistration::CheckModelStorage( const ModelStorageCPtr expected_storage) const { RETURN_IF_ERROR(mip_node_filter.CheckModelStorage(expected_storage)) @@ -112,6 +178,48 @@ CallbackRegistrationProto CallbackRegistration::Proto() const { return result; } +absl::StatusOr CallbackResult::FromProto( + const Model& model, const CallbackResultProto& result_proto) { + CallbackResult result = { + .terminate = result_proto.terminate(), + }; + + // Add new_constraints. + for (int c = 0; c < result_proto.cuts_size(); ++c) { + const CallbackResultProto::GeneratedLinearConstraint& constraint_proto = + result_proto.cuts(c); + OR_ASSIGN_OR_RETURN3( + const VariableMap coefficients, + VariableValuesFromProto(model.storage(), + constraint_proto.linear_expression()), + _ << "invalid CallbackResultProto.cuts[" << c << "].linear_expression"); + LinearExpression expression; + for (const auto [v, coeff] : coefficients) { + expression += coeff * v; + }; + result.new_constraints.push_back({ + .linear_constraint = BoundedLinearExpression( + /*expression=*/std::move(expression), + /*lower_bound=*/constraint_proto.lower_bound(), + /*upper_bound=*/constraint_proto.upper_bound()), + .is_lazy = constraint_proto.is_lazy(), + }); + } + + // Add suggested_solutions. + for (int s = 0; s < result_proto.suggested_solutions_size(); ++s) { + const SparseDoubleVectorProto suggested_solution_proto = + result_proto.suggested_solutions(s); + OR_ASSIGN_OR_RETURN3( + VariableMap suggested_solution, + VariableValuesFromProto(model.storage(), suggested_solution_proto), + _ << "invalid CallbackResultProto.suggested_solutions[" << s << "]"); + result.suggested_solutions.push_back(std::move(suggested_solution)); + } + + return result; +} + absl::Status CallbackResult::CheckModelStorage( const ModelStorageCPtr expected_storage) const { for (const GeneratedLinearConstraint& constraint : new_constraints) { diff --git a/ortools/math_opt/cpp/callback.h b/ortools/math_opt/cpp/callback.h index b0725a97b4..1a1068a815 100644 --- a/ortools/math_opt/cpp/callback.h +++ b/ortools/math_opt/cpp/callback.h @@ -76,11 +76,12 @@ #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" +#include "absl/status/statusor.h" #include "absl/time/time.h" -#include "absl/types/span.h" #include "ortools/math_opt/callback.pb.h" #include "ortools/math_opt/cpp/enums.h" // IWYU pragma: export #include "ortools/math_opt/cpp/map_filter.h" +#include "ortools/math_opt/cpp/model.h" #include "ortools/math_opt/cpp/variable_and_expressions.h" #include "ortools/math_opt/storage/model_storage.h" @@ -142,6 +143,13 @@ MATH_OPT_DEFINE_ENUM(CallbackEvent, CALLBACK_EVENT_UNSPECIFIED); // * what information the callback needs, // * how the callback might alter the solve process. struct CallbackRegistration { + // Returns the CallbackRegistration equivalent to the proto. + // + // Returns an error if filters indices don't match existing variables or if + // events have incorrect values. + static absl::StatusOr FromProto( + const Model& model, const CallbackRegistrationProto& registration_proto); + // Returns a failure if the referenced variables don't belong to the input // expected_storage (which must not be nullptr). absl::Status CheckModelStorage(ModelStorageCPtr expected_storage) const; @@ -191,6 +199,16 @@ struct CallbackData { // Will CHECK fail if proto is not valid. CallbackData(ModelStorageCPtr storage, const CallbackDataProto& proto); + // Returns a failure if the referenced variables don't belong to the input + // expected_storage (which must not be nullptr). + absl::Status CheckModelStorage(ModelStorageCPtr expected_storage) const; + + // Returns the proto equivalent of this object. + // + // The caller should use CheckModelStorage() as this function does not check + // internal consistency of the referenced variables. + absl::StatusOr Proto() const; + // The current state of the underlying solver. CallbackEvent event; @@ -247,6 +265,13 @@ struct CallbackResult { new_constraints.push_back({std::move(linear_constraint), true}); } + // Returns the CallbackResult equivalent to the proto. + // + // Returns an error if constraints or solutions indices don't match existing + // variables. + static absl::StatusOr FromProto( + const Model& model, const CallbackResultProto& result_proto); + // Returns a failure if the referenced variables don't belong to the input // expected_storage (which must not be nullptr). absl::Status CheckModelStorage(ModelStorageCPtr expected_storage) const; diff --git a/ortools/math_opt/cpp/message_callback.cc b/ortools/math_opt/cpp/message_callback.cc index 992806acbf..7c33a6aa62 100644 --- a/ortools/math_opt/cpp/message_callback.cc +++ b/ortools/math_opt/cpp/message_callback.cc @@ -87,8 +87,7 @@ MessageCallback PrinterMessageCallback(std::ostream& output_stream, // it uses an absl::Mutex that is not. const auto impl = std::make_shared(output_stream, prefix); - return - [=](const std::vector& messages) { impl->Call(messages); }; + return [=](absl::Span messages) { impl->Call(messages); }; } MessageCallback InfoLoggerMessageCallback(const absl::string_view prefix, diff --git a/ortools/math_opt/cpp/model.cc b/ortools/math_opt/cpp/model.cc index 12ea552d78..9d19f5af72 100644 --- a/ortools/math_opt/cpp/model.cc +++ b/ortools/math_opt/cpp/model.cc @@ -55,7 +55,7 @@ constexpr double kInf = std::numeric_limits::infinity(); absl::StatusOr> Model::FromModelProto( const ModelProto& model_proto) { - ASSIGN_OR_RETURN(absl::Nonnull> storage, + ASSIGN_OR_RETURN(absl_nonnull std::unique_ptr storage, ModelStorage::FromModelProto(model_proto)); return std::make_unique(std::move(storage)); } @@ -63,10 +63,10 @@ absl::StatusOr> Model::FromModelProto( Model::Model(const absl::string_view name) : storage_(std::make_shared(name)) {} -Model::Model(absl::Nonnull> storage) +Model::Model(absl_nonnull std::unique_ptr storage) : storage_(ABSL_DIE_IF_NULL(std::move(storage))) {} -absl::Nonnull> Model::Clone( +absl_nonnull std::unique_ptr Model::Clone( const std::optional new_name) const { return std::make_unique(storage_->Clone(new_name)); } diff --git a/ortools/math_opt/cpp/model.h b/ortools/math_opt/cpp/model.h index bb9939f098..6cb65ed256 100644 --- a/ortools/math_opt/cpp/model.h +++ b/ortools/math_opt/cpp/model.h @@ -137,7 +137,7 @@ class Model { // This constructor is used when loading a model, for example from a // ModelProto or an MPS file. Note that in those cases the FromModelProto() // should be used. - explicit Model(absl::Nonnull> storage); + explicit Model(absl_nonnull std::unique_ptr storage); Model(const Model&) = delete; Model& operator=(const Model&) = delete; @@ -159,7 +159,7 @@ class Model { // * in an arbitrary order using Variables() and LinearConstraints(). // // Note that the returned model does not have any update tracker. - absl::Nonnull> Clone( + absl_nonnull std::unique_ptr Clone( std::optional new_name = std::nullopt) const; inline absl::string_view name() const; @@ -925,7 +925,7 @@ class Model { // We use a shared_ptr here so that the UpdateTracker class can have a // weak_ptr on the ModelStorage. This let it have a destructor that don't // crash when called after the destruction of the associated Model. - const absl::Nonnull> storage_; + const absl_nonnull std::shared_ptr storage_; }; //////////////////////////////////////////////////////////////////////////////// diff --git a/ortools/math_opt/cpp/solve_result.cc b/ortools/math_opt/cpp/solve_result.cc index ae0cab0da0..249b986dcb 100644 --- a/ortools/math_opt/cpp/solve_result.cc +++ b/ortools/math_opt/cpp/solve_result.cc @@ -373,7 +373,7 @@ absl::StatusOr Termination::FromProto( return absl::InvalidArgumentError("reason must be specified"); } Termination result(/*is_maximize=*/false, *reason, - termination_proto.detail()); + std::string(termination_proto.detail())); result.limit = EnumFromProto(termination_proto.limit()); OR_ASSIGN_OR_RETURN3( result.problem_status, diff --git a/ortools/math_opt/cpp/streamable_solver_init_arguments.cc b/ortools/math_opt/cpp/streamable_solver_init_arguments.cc index 3d0eea1b74..41ab393e87 100644 --- a/ortools/math_opt/cpp/streamable_solver_init_arguments.cc +++ b/ortools/math_opt/cpp/streamable_solver_init_arguments.cc @@ -34,10 +34,10 @@ GurobiInitializerProto::ISVKey GurobiISVKey::Proto() const { GurobiISVKey GurobiISVKey::FromProto( const GurobiInitializerProto::ISVKey& key_proto) { return GurobiISVKey{ - .name = key_proto.name(), - .application_name = key_proto.application_name(), + .name = std::string(key_proto.name()), + .application_name = std::string(key_proto.application_name()), .expiration = key_proto.expiration(), - .key = key_proto.key(), + .key = std::string(key_proto.key()), }; } diff --git a/ortools/math_opt/elemental/elemental.cc b/ortools/math_opt/elemental/elemental.cc index f3018ccde9..4f26acd525 100644 --- a/ortools/math_opt/elemental/elemental.cc +++ b/ortools/math_opt/elemental/elemental.cc @@ -106,7 +106,13 @@ bool Elemental::DeleteElementUntyped(const ElementType e, int64_t id) { const auto keys = element_ref_trackers_[a].GetKeysReferencing( ValueTypeFor(id)); for (const auto key : keys) { - SetAttr(a, key, ValueTypeFor()); + // Don't use SetAttr here, we do not want to track this change, it is + // already implied by the deletion of the element. But still clean up + // the diff trackers for all keys and zero out the value. + for (auto& [unused, diff] : diffs_->UpdateAndGetAll()) { + diff->EraseKeysForAttr(a, {key}); + } + attrs_[a].Erase(key); } } } diff --git a/ortools/math_opt/elemental/elemental_export_model_test.cc b/ortools/math_opt/elemental/elemental_export_model_test.cc index b7dd048fcb..4f9ccbf684 100644 --- a/ortools/math_opt/elemental/elemental_export_model_test.cc +++ b/ortools/math_opt/elemental/elemental_export_model_test.cc @@ -419,6 +419,22 @@ TEST(ExportModelProtoTest, IndicatorConstraintNoneSet) { EXPECT_THAT(elemental.ExportModel(), IsOkAndHolds(EqualsProto(expected))); } +TEST(ExportModelProtoTest, IndicatorConstraintDeleteIndicator) { + Elemental elemental; + const IndicatorConstraintId c = + elemental.AddElement(""); + const VariableId x = elemental.AddElement(""); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(c), x); + elemental.DeleteElement(x); + + ModelProto expected; + IndicatorConstraintProto& ind_con = + (*expected.mutable_indicator_constraints())[c.value()]; + ind_con.set_lower_bound(-kInf); + ind_con.set_upper_bound(kInf); + EXPECT_THAT(elemental.ExportModel(), IsOkAndHolds(EqualsProto(expected))); +} + //////////////////////////////////////////////////////////////////////////////// // Larger tests //////////////////////////////////////////////////////////////////////////////// diff --git a/ortools/math_opt/elemental/elemental_export_model_update_test.cc b/ortools/math_opt/elemental/elemental_export_model_update_test.cc index 0aaecbfa54..36a7dfb2b5 100644 --- a/ortools/math_opt/elemental/elemental_export_model_update_test.cc +++ b/ortools/math_opt/elemental/elemental_export_model_update_test.cc @@ -1246,6 +1246,33 @@ TEST(ExportModelUpdateTest, ModifyIndicatorConstraintActiveOnZeroUnsupported) { HasSubstr("indicator_constraint_activate_on_zero"))); } +TEST(ExportModelUpdateTest, DeleteIndicatorVariable) { + Elemental elemental; + const IndicatorConstraintId c = + elemental.AddElement("c"); + const VariableId x = elemental.AddElement("x"); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(c), x); + + const Elemental::DiffHandle d = elemental.AddDiff(); + + elemental.DeleteElement(x); + + // Modifying the indicator variable of an indicator constraint (not what is + // happening in this test) is not supported up the stack in mathopt. + // + // Instead here, we are deleting the variable that is the indicator in the + // constraint from the model entirely. MathOpt does support this, and can + // generate a ModelProto here, but solving this model will be an error at + // runtime unless the entire indicator constraint is deleted. + // + // This behavior may change in the future, e.g., we may support modification + // of indicator constraints. + ModelUpdateProto expected; + expected.add_deleted_variable_ids(0); + EXPECT_THAT(elemental.ExportModelUpdate(d), + IsOkAndHolds(Optional(EqualsProto(expected)))); +} + } // namespace } // namespace operations_research::math_opt diff --git a/ortools/math_opt/elemental/elemental_from_proto.cc b/ortools/math_opt/elemental/elemental_from_proto.cc index 50f5afa087..5da78b171a 100644 --- a/ortools/math_opt/elemental/elemental_from_proto.cc +++ b/ortools/math_opt/elemental/elemental_from_proto.cc @@ -253,7 +253,8 @@ absl::StatusOr ElementalFromModelProtoImpl(const ModelProto& proto) { return absl::UnimplementedError( "Elemental does not support sos2 constraints yet"); } - Elemental elemental(proto.name(), proto.objective().name()); + Elemental elemental(std::string(proto.name()), + std::string(proto.objective().name())); AddVariables(proto.variables(), elemental); { const ObjectiveProto& objective = proto.objective(); diff --git a/ortools/math_opt/elemental/elemental_test.cc b/ortools/math_opt/elemental/elemental_test.cc index 44452ef0fa..11744b4bf0 100644 --- a/ortools/math_opt/elemental/elemental_test.cc +++ b/ortools/math_opt/elemental/elemental_test.cc @@ -546,28 +546,52 @@ TEST(ElementalTest, ElementValuedAttr) { const IndicatorConstraintId ic3 = elemental.AddElement(""); - { - const Diff& diff = ElementalTestPeer::GetDiffRef(elemental.AddDiff()); - elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic1), x); - elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic2), x); - elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic2), y); - elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic3), x); - EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), - UnorderedElementsAre(AttrKey(ic1), AttrKey(ic2), AttrKey(ic3))); - } + const Diff& diff = ElementalTestPeer::GetDiffRef(elemental.AddDiff()); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic1), x); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic2), x); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic2), y); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic3), x); + EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), + UnorderedElementsAre(AttrKey(ic1), AttrKey(ic2), AttrKey(ic3))); +} - { - const Diff& diff = ElementalTestPeer::GetDiffRef(elemental.AddDiff()); - // Deleting `x` clears the attribute for `ic1` and `ic3`, which both - // reference `x`. - elemental.DeleteElement(x); - EXPECT_THAT(elemental.AttrNonDefaults(VariableAttr1::kIndConIndicator), - UnorderedElementsAre(AttrKey(ic2))); - // It also informs the diffs that the attributes referencing `x` were - // modified. - EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), - UnorderedElementsAre(AttrKey(ic1), AttrKey(ic3))); - } +TEST(ElementalTest, ElementValuedAttrDeleteVar) { + Elemental elemental; + const VariableId x = elemental.AddElement(""); + const IndicatorConstraintId ic = + elemental.AddElement(""); + + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic), x); + + const Diff& diff = ElementalTestPeer::GetDiffRef(elemental.AddDiff()); + // Deleting `x` clears the attribute for `ic`. + EXPECT_THAT(elemental.AttrNonDefaults(VariableAttr1::kIndConIndicator), + UnorderedElementsAre(AttrKey(ic))); + elemental.DeleteElement(x); + EXPECT_THAT(elemental.AttrNonDefaults(VariableAttr1::kIndConIndicator), + IsEmpty()); + // We do not explicitly track this change in the diff, it is implicit from + // the deletion of f. + EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), IsEmpty()); +} + +TEST(ElementalTest, ElementValuedAttrDeleteVarAfterModifyingVar) { + Elemental elemental; + const VariableId x = elemental.AddElement(""); + const VariableId y = elemental.AddElement(""); + const IndicatorConstraintId ic = + elemental.AddElement(""); + + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic), x); + + const Diff& diff = ElementalTestPeer::GetDiffRef(elemental.AddDiff()); + elemental.SetAttr(VariableAttr1::kIndConIndicator, AttrKey(ic), y); + EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), + UnorderedElementsAre(AttrKey(ic))); + elemental.DeleteElement(y); + EXPECT_THAT(elemental.AttrNonDefaults(VariableAttr1::kIndConIndicator), + IsEmpty()); + EXPECT_THAT(diff.modified_keys(VariableAttr1::kIndConIndicator), IsEmpty()); } TEST(ElementalTest, ElementValuedAttrClear) { diff --git a/ortools/math_opt/io/proto_converter.cc b/ortools/math_opt/io/proto_converter.cc index 8dd21764c3..f0977f8ee0 100644 --- a/ortools/math_opt/io/proto_converter.cc +++ b/ortools/math_opt/io/proto_converter.cc @@ -140,7 +140,7 @@ QuadraticConstraintProto QuadraticConstraintFromMPModelToMathOpt( QuadraticConstraintProto out_constraint; out_constraint.set_lower_bound(in_constraint.lower_bound()); out_constraint.set_upper_bound(in_constraint.upper_bound()); - out_constraint.set_name(std::string(name)); + out_constraint.set_name(name); LinearTermsFromMPModelToMathOpt( in_constraint.var_index(), in_constraint.coefficient(), *out_constraint.mutable_linear_terms()->mutable_ids(), @@ -155,7 +155,7 @@ QuadraticConstraintProto QuadraticConstraintFromMPModelToMathOpt( SosConstraintProto SosConstraintFromMPModelToMathOpt( const MPSosConstraint& in_constraint, const absl::string_view name) { SosConstraintProto out_constraint; - out_constraint.set_name(std::string(name)); + out_constraint.set_name(name); for (const int j : in_constraint.var_index()) { LinearExpressionProto& expr = *out_constraint.add_expressions(); expr.add_ids(j); @@ -172,7 +172,7 @@ absl::StatusOr IndicatorConstraintFromMPModelToMathOpt( const MPIndicatorConstraint& in_constraint, const absl::string_view name) { IndicatorConstraintProto out_constraint; - out_constraint.set_name(std::string(name)); + out_constraint.set_name(name); out_constraint.set_indicator_id(in_constraint.var_index()); out_constraint.set_activate_on_zero(in_constraint.has_var_value() && in_constraint.var_value() == 0); diff --git a/ortools/math_opt/solver_tests/ip_parameter_tests.cc b/ortools/math_opt/solver_tests/ip_parameter_tests.cc index da2bd74f8d..dfd2d2a42b 100644 --- a/ortools/math_opt/solver_tests/ip_parameter_tests.cc +++ b/ortools/math_opt/solver_tests/ip_parameter_tests.cc @@ -1273,12 +1273,9 @@ TEST_P(LargeInstanceIpParameterTest, AbsoluteGapTolerance) { } ASSERT_THAT(result, IsOkAndHolds(IsOptimal())); // There should be some space between our optimal solution and best bound - if (GetParam().solver_type != SolverType::kCpSat) { - // CP-SAT in parallel can find the optimal solution directly. - EXPECT_GE(result->termination.objective_bounds.primal_bound - - result->termination.objective_bounds.dual_bound, - absolute_lp_relax_gap / 40.0); - } + EXPECT_GE(result->termination.objective_bounds.primal_bound - + result->termination.objective_bounds.dual_bound, + absolute_lp_relax_gap / 40.0); } // Set the relative gap to 2*(8090 - 7649)/8090 ~= 0.1 and check there is diff --git a/ortools/math_opt/storage/model_storage.cc b/ortools/math_opt/storage/model_storage.cc index 3c5139d07e..9c24890944 100644 --- a/ortools/math_opt/storage/model_storage.cc +++ b/ortools/math_opt/storage/model_storage.cc @@ -46,7 +46,7 @@ namespace operations_research { namespace math_opt { -absl::StatusOr>> +absl::StatusOr> ModelStorage::FromModelProto(const ModelProto& model_proto) { // We don't check names since ModelStorage does not do so before exporting // models. Thus a model built by ModelStorage can contain duplicated @@ -144,7 +144,7 @@ void ModelStorage::UpdateLinearConstraintCoefficients( } } -absl::Nonnull> ModelStorage::Clone( +absl_nonnull std::unique_ptr ModelStorage::Clone( const std::optional new_name) const { // We leverage the private copy constructor that copies copyable_data_ but not // update_trackers_ here. diff --git a/ortools/math_opt/storage/model_storage.h b/ortools/math_opt/storage/model_storage.h index 2334290cdc..0fb0519dcf 100644 --- a/ortools/math_opt/storage/model_storage.h +++ b/ortools/math_opt/storage/model_storage.h @@ -177,7 +177,7 @@ class ModelStorage { // considered invalid when solving. // // See ApplyUpdateProto() for dealing with subsequent updates. - static absl::StatusOr > > + static absl::StatusOr > FromModelProto(const ModelProto& model_proto); // Creates an empty minimization problem. @@ -192,7 +192,7 @@ class ModelStorage { // reused any id of variable/constraint that was deleted in the original. // // Note that the returned model does not have any update tracker. - absl::Nonnull > Clone( + absl_nonnull std::unique_ptr Clone( std::optional new_name = std::nullopt) const; inline const std::string& name() const { return copyable_data_.name; } @@ -1311,10 +1311,10 @@ namespace operations_research::math_opt { // Aliases for non-nullable and nullable pointers to a `ModelStorage`. // We should mostly be using the former, but in some cases we need the latter. -using ModelStoragePtr = absl::Nonnull; -using NullableModelStoragePtr = absl::Nullable; -using ModelStorageCPtr = absl::Nonnull; -using NullableModelStorageCPtr = absl::Nullable; +using ModelStoragePtr = ModelStorage* absl_nonnull; +using NullableModelStoragePtr = ModelStorage* absl_nullable; +using ModelStorageCPtr = const ModelStorage* absl_nonnull; +using NullableModelStorageCPtr = const ModelStorage* absl_nullable; } // namespace operations_research::math_opt diff --git a/ortools/math_opt/storage/model_storage_item.h b/ortools/math_opt/storage/model_storage_item.h index bfda35e691..539993bb04 100644 --- a/ortools/math_opt/storage/model_storage_item.h +++ b/ortools/math_opt/storage/model_storage_item.h @@ -174,9 +174,10 @@ class ModelStorageItemContainer { // When moving we're leaving the moved-from object unassociated with any // model. Derived classes should hold no items after being moved from. - ModelStorageItemContainer(ModelStorageItemContainer&& other) + ModelStorageItemContainer(ModelStorageItemContainer&& other) noexcept : storage_(std::exchange(other.storage_, nullptr)) {} - ModelStorageItemContainer& operator=(ModelStorageItemContainer&& other) { + ModelStorageItemContainer& operator=( + ModelStorageItemContainer&& other) noexcept { storage_ = std::exchange(other.storage_, nullptr); return *this; } diff --git a/ortools/math_opt/storage/model_storage_v2.cc b/ortools/math_opt/storage/model_storage_v2.cc index e911eaecc4..60b0ec952d 100644 --- a/ortools/math_opt/storage/model_storage_v2.cc +++ b/ortools/math_opt/storage/model_storage_v2.cc @@ -76,13 +76,13 @@ void ModelStorageV2::DeleteLinearConstraint(LinearConstraintId id) { << ", it is not in the model"; } -absl::StatusOr>> +absl::StatusOr> ModelStorageV2::FromModelProto(const ModelProto& model_proto) { ASSIGN_OR_RETURN(Elemental e, Elemental::FromModelProto(model_proto)); return absl::WrapUnique(new ModelStorageV2(std::move(e))); } -absl::Nonnull> ModelStorageV2::Clone( +absl_nonnull std::unique_ptr ModelStorageV2::Clone( const std::optional new_name) const { return absl::WrapUnique(new ModelStorageV2(elemental_.Clone(new_name))); } diff --git a/ortools/math_opt/storage/model_storage_v2.h b/ortools/math_opt/storage/model_storage_v2.h index 45078bedad..c8c13b7232 100644 --- a/ortools/math_opt/storage/model_storage_v2.h +++ b/ortools/math_opt/storage/model_storage_v2.h @@ -90,7 +90,7 @@ class ModelStorageV2 { // considered invalid when solving. // // See ApplyUpdateProto() for dealing with subsequent updates. - static absl::StatusOr>> + static absl::StatusOr> FromModelProto(const ModelProto& model_proto); // Creates an empty minimization problem. @@ -106,7 +106,7 @@ class ModelStorageV2 { // reused any id of variable/constraint that was deleted in the original. // // Note that the returned model does not have any update tracker. - absl::Nonnull> Clone( + absl_nonnull std::unique_ptr Clone( std::optional new_name = std::nullopt) const; inline const std::string& name() const { return elemental_.model_name(); } From 67aef7f1e89989b7e74153b08eb8f3249845404b Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 23 Jun 2025 17:08:46 +0200 Subject: [PATCH 124/509] linear_solver: export from google3 --- ortools/linear_solver/BUILD.bazel | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ortools/linear_solver/BUILD.bazel b/ortools/linear_solver/BUILD.bazel index e8103e9968..463468c7ee 100644 --- a/ortools/linear_solver/BUILD.bazel +++ b/ortools/linear_solver/BUILD.bazel @@ -348,12 +348,12 @@ cc_library( hdrs = ["gurobi_util.h"], deps = [ "//ortools/third_party_solvers:gurobi_environment", - "@abseil-cpp//absl/status", - "@abseil-cpp//absl/strings", - "@abseil-cpp//absl/strings:str_format", "@abseil-cpp//absl/flags:flag", "@abseil-cpp//absl/log", + "@abseil-cpp//absl/status", "@abseil-cpp//absl/status:statusor", + "@abseil-cpp//absl/strings", + "@abseil-cpp//absl/strings:str_format", ], ) From b56b4196a2dec05dd346fe1a2f05b60d915c8136 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 24 Jun 2025 17:47:02 +0200 Subject: [PATCH 125/509] Update README.md --- ortools/python/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/python/README.md b/ortools/python/README.md index e95202f480..82c903afe9 100644 --- a/ortools/python/README.md +++ b/ortools/python/README.md @@ -18,7 +18,7 @@ This project aim to explain how you build a Python native wheel package using ## Requirement -You'll need "Python >= 3.6" and few python modules ("wheel" and "absl-py"). +You'll need "Python >= 3.9" and few python modules ("wheel" and "absl-py"). ## Directory Layout From 8ed3f8d9955ba742afcc9cddff7e8a5b304004d3 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 24 Jun 2025 21:08:27 +0200 Subject: [PATCH 126/509] [CP-SAT] more tuning on shared tree workers; add parameter to limit linear2 efforts on restarts --- ortools/sat/precedences.cc | 15 +++++++++------ ortools/sat/precedences.h | 4 +++- ortools/sat/python/cp_model_test.py | 6 +++--- ortools/sat/sat_decision.cc | 23 +++++++++++++++++++++++ ortools/sat/sat_decision.h | 9 +++++++++ ortools/sat/sat_decision_test.cc | 28 ++++++++++++++++++++++++++++ ortools/sat/sat_parameters.proto | 7 ++++++- ortools/sat/work_assignment.cc | 1 + 8 files changed, 82 insertions(+), 11 deletions(-) diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index c4cef71f05..339c6ece5e 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -526,13 +526,16 @@ bool TransitivePrecedencesEvaluator::Build() { is_dag_ = !graph_has_cycle; // Lets get the transitive closure if it is cheap. This is also a way not to - // add too many relations (not more than 1e6) per call. + // add too many relations per call. int total_work = 0; - const int kWorkLimit = 1e6; - for (const IntegerVariable var : topological_order_) { - const int work = root_level_bounds_->AugmentSimpleRelations( - var, kWorkLimit - total_work); - total_work += work; + const int kWorkLimit = params_->transitive_precedences_work_limit(); + if (kWorkLimit > 0) { + for (const IntegerVariable var : topological_order_) { + const int work = root_level_bounds_->AugmentSimpleRelations( + var, kWorkLimit - total_work); + total_work += work; + if (total_work >= kWorkLimit) break; + } } build_timestamp_ = root_level_bounds_->num_updates(); diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 2b56462758..59b0d0f064 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -296,7 +296,8 @@ struct FullIntegerPrecedence { class TransitivePrecedencesEvaluator { public: explicit TransitivePrecedencesEvaluator(Model* model) - : integer_trail_(model->GetOrCreate()), + : params_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()), shared_stats_(model->GetOrCreate()), root_level_bounds_(model->GetOrCreate()) { // Call Build() each time we go back to level zero. @@ -335,6 +336,7 @@ class TransitivePrecedencesEvaluator { bool Build(); private: + SatParameters* params_; IntegerTrail* integer_trail_; SharedStatistics* shared_stats_; RootLevelLinear2Bounds* root_level_bounds_; diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 9bbaee5513..7e1b1b1554 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2296,10 +2296,10 @@ TRFM""" solver.best_bound_callback = best_bound_callback.new_best_bound status = solver.Solve(model, solution_callback) if status == cp_model.OPTIMAL: - self.assertLess( - time.time(), - max(best_bound_callback.last_time, solution_callback.last_time) + 9.0, + last_activity = max( + best_bound_callback.last_time, solution_callback.last_time ) + self.assertLess(time.time(), last_activity + 15.0) def test_issue4434(self) -> None: model = cp_model.CpModel() diff --git a/ortools/sat/sat_decision.cc b/ortools/sat/sat_decision.cc index deb38947e3..ba731aa626 100644 --- a/ortools/sat/sat_decision.cc +++ b/ortools/sat/sat_decision.cc @@ -14,12 +14,14 @@ #include "ortools/sat/sat_decision.h" #include +#include #include #include #include #include #include +#include "absl/algorithm/container.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/logging.h" @@ -225,6 +227,27 @@ void SatDecisionPolicy::RandomizeCurrentPolarity() { } } +void SatDecisionPolicy::ResetActivitiesToFollowBestPartialAssignment() { + DCHECK_EQ(trail_.CurrentDecisionLevel(), 0); + CHECK(!activities_.empty()); + const double max_activity = + *absl::c_max_element(activities_) + variable_activity_increment_; + const double kDecay = 0.999; + variable_activity_increment_ = + max_activity / pow(kDecay, best_partial_assignment_.size() + 1); + var_ordering_is_initialized_ = false; + if (max_activity + variable_activity_increment_ > + parameters_.max_variable_activity_value()) { + RescaleVariableActivities(1 / parameters_.max_variable_activity_value()); + } + double weight = 1.0; + for (int i = 0; i < best_partial_assignment_.size(); ++i) { + const Literal l = best_partial_assignment_[i]; + weight *= kDecay; + activities_[l.Variable()] += weight * variable_activity_increment_; + } +} + void SatDecisionPolicy::InitializeVariableOrdering() { const int num_variables = activities_.size(); diff --git a/ortools/sat/sat_decision.h b/ortools/sat/sat_decision.h index a65068a99d..b00fd48882 100644 --- a/ortools/sat/sat_decision.h +++ b/ortools/sat/sat_decision.h @@ -124,6 +124,15 @@ class SatDecisionPolicy { best_partial_assignment_.clear(); } + // Increases activities of variables in the best partial assignment to ensure + // they are branched on first in the same order until the next conflict. + // Activities before this call are scaled to become disambiguation terms. + // Future conflicts will bump activity by the largest increase applied by this + // method. + // This acts as a soft-reset of the decision policy, useful when exploring a + // new region of the search space. + void ResetActivitiesToFollowBestPartialAssignment(); + private: // Computes an initial variable ordering. void InitializeVariableOrdering(); diff --git a/ortools/sat/sat_decision_test.cc b/ortools/sat/sat_decision_test.cc index 7f246e68ec..798178bfe9 100644 --- a/ortools/sat/sat_decision_test.cc +++ b/ortools/sat/sat_decision_test.cc @@ -136,6 +136,34 @@ TEST(SatDecisionPolicyTest, SetTargetPolarity) { trail->EnqueueSearchDecision(literal); } } + +TEST(SatDecisionPolicyTest, TestFollowBestPartialAssignment) { + Model model; + model.GetOrCreate()->set_initial_variables_activity(1e9); + Trail* trail = model.GetOrCreate(); + SatDecisionPolicy* decision = model.GetOrCreate(); + const int num_variables = 10; + trail->Resize(num_variables); + decision->IncreaseNumVariables(num_variables); + + for (int i = 0; i < num_variables; ++i) { + decision->SetTargetPolarityIfUnassigned(Literal(BooleanVariable(i), i % 2)); + } + for (int i = 0; i < num_variables - 1; ++i) { + // Bump all suffixes of the best partial assignment, so the last element has + // the highest activity. + decision->BumpVariableActivities( + decision->GetBestPartialAssignment().subspan(i)); + } + decision->ResetActivitiesToFollowBestPartialAssignment(); + + decision->SetStablePhase(false); + for (int i = 0; i < num_variables; ++i) { + const Literal literal = decision->NextBranch(); + EXPECT_EQ(literal, Literal(BooleanVariable(i), i % 2)); + trail->EnqueueSearchDecision(literal); + } +} } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 60901fc1c0..ab7d851a1d 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -24,7 +24,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 327 +// NEXT TAG: 328 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -825,6 +825,11 @@ message SatParameters { // depending on the problem, turning this off may lead to a faster solution. optional bool use_precedences_in_disjunctive_constraint = 74 [default = true]; + // At root level, we might compute the transitive closure of "precedences" + // relations so that we can exploit that in scheduling problems. Setting this + // to zero disable the feature. + optional int32 transitive_precedences_work_limit = 327 [default = 1000000]; + // Create one literal for each disjunction of two pairs of tasks. This slows // down the solve time, but improves the lower bound of the objective in the // makespan case. This will be triggered if the number of intervals is less or diff --git a/ortools/sat/work_assignment.cc b/ortools/sat/work_assignment.cc index 898c200c35..cec74c9b52 100644 --- a/ortools/sat/work_assignment.cc +++ b/ortools/sat/work_assignment.cc @@ -815,6 +815,7 @@ bool SharedTreeWorker::SyncWithSharedTree() { for (const ProtoLiteral& lit : assigned_tree_.TargetPhase()) { decision_policy_->SetTargetPolarityIfUnassigned(DecodeDecision(lit)); } + decision_policy_->ResetActivitiesToFollowBestPartialAssignment(); } } // If we commit to this subtree, keep it for at least 1s of dtime. From edc06ed66220149a33d7b289d480b9b076818e7c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 24 Jun 2025 21:08:39 +0200 Subject: [PATCH 127/509] cleanup dockerfile --- ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile | 2 +- ortools/flatzinc/challenge/minizinc-challenge.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile index 301a7dc887..d5d6d02634 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH v99bugfix +ENV SRC_GIT_BRANCH=v99bugfix ENV TZ=America/Los_Angeles diff --git a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile index 1113ff8778..0fdfc256e6 100644 --- a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile +++ b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile @@ -1,6 +1,6 @@ FROM minizinc/mznc2025:latest AS env -ENV SRC_GIT_BRANCH v99bugfix +ENV SRC_GIT_BRANCH=v99bugfix ENV TZ=America/Los_Angeles From 8b6100e21e1704de46f0d2c5b0b92341955421cd Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Mon, 23 Jun 2025 16:27:46 +0000 Subject: [PATCH 128/509] Format py files to adopt internal style --- cmake/samples/python/sample.py | 1 + examples/python/appointments.py | 2 +- examples/python/balance_group_sat.py | 1 + examples/python/bus_driver_scheduling_sat.py | 9 +- examples/python/cryptarithm_sat.py | 3 +- examples/python/golomb8.py | 2 +- examples/python/golomb_sat.py | 1 + examples/python/knapsack_2d_sat.py | 1 - examples/python/line_balancing_sat.py | 1 + examples/python/linear_assignment_api.py | 13 +- examples/python/maximize_combinations_sat.py | 1 + examples/python/maze_escape_sat.py | 1 + examples/python/nqueens_sat.py | 1 + examples/python/pell_equation_sat.py | 1 + examples/python/pyflow_example.py | 7 +- examples/python/rcpsp_sat.py | 2 +- examples/python/spread_robots_sat.py | 1 - examples/python/steel_mill_slab_sat.py | 1 + .../python/weighted_latency_problem_sat.py | 5 +- ortools/sat/python/cp_model_test.py | 6 +- ortools/set_cover/samples/set_cover.py | 1 + tools/check_python_deps.py | 5 +- tools/doc/doxygen_filter.py | 302 ++++++------ tools/doc/gen_ref_doc.py | 455 +++++++++--------- 24 files changed, 414 insertions(+), 409 deletions(-) diff --git a/cmake/samples/python/sample.py b/cmake/samples/python/sample.py index ebeb7be394..abd5f6ce16 100644 --- a/cmake/samples/python/sample.py +++ b/cmake/samples/python/sample.py @@ -13,6 +13,7 @@ # limitations under the License. """Sample to test or-tools installation.""" + import ortools # from ortools.algorithms import knapsack_solver diff --git a/examples/python/appointments.py b/examples/python/appointments.py index 328ac691f0..65fbc66031 100644 --- a/examples/python/appointments.py +++ b/examples/python/appointments.py @@ -180,7 +180,7 @@ def aggregate_item_collections_optimally( def get_optimal_schedule( - demand: list[tuple[float, str, int]] + demand: list[tuple[float, str, int]], ) -> list[tuple[int, list[tuple[int, str]]]]: """Computes the optimal schedule for the installation input. diff --git a/examples/python/balance_group_sat.py b/examples/python/balance_group_sat.py index 5f37f95605..10d0725075 100644 --- a/examples/python/balance_group_sat.py +++ b/examples/python/balance_group_sat.py @@ -19,6 +19,7 @@ be as close to the average as possible. Furthermore, if one color is an a group, at least k items with this color must be in that group. """ + from typing import Dict, Sequence from absl import app diff --git a/examples/python/bus_driver_scheduling_sat.py b/examples/python/bus_driver_scheduling_sat.py index 0a957febd2..64f77118d7 100644 --- a/examples/python/bus_driver_scheduling_sat.py +++ b/examples/python/bus_driver_scheduling_sat.py @@ -29,6 +29,7 @@ import math from absl import app from absl import flags + from google.protobuf import text_format from ortools.sat.python import cp_model @@ -81,7 +82,7 @@ SAMPLE_SHIFTS_TINY = [ [25, "15:40", "15:56", 940, 956, 16], [26, "15:58", "16:45", 958, 1005, 47], [27, "16:04", "17:30", 964, 1050, 86], -] # yapf:disable +] SAMPLE_SHIFTS_SMALL = [ # @@ -143,7 +144,7 @@ SAMPLE_SHIFTS_SMALL = [ [47, "18:34", "19:58", 1114, 1198, 84], [48, "19:56", "20:34", 1196, 1234, 38], [49, "20:05", "20:48", 1205, 1248, 43], -] # yapf:disable +] SAMPLE_SHIFTS_MEDIUM = [ [0, "04:30", "04:53", 270, 293, 23], @@ -346,7 +347,7 @@ SAMPLE_SHIFTS_MEDIUM = [ [197, "00:02", "00:12", 1442, 1452, 10], [198, "00:07", "00:39", 1447, 1479, 32], [199, "00:25", "01:12", 1465, 1512, 47], -] # yapf:disable +] SAMPLE_SHIFTS_LARGE = [ [0, "04:18", "05:00", 258, 300, 42], @@ -1705,7 +1706,7 @@ SAMPLE_SHIFTS_LARGE = [ [1353, "00:47", "01:26", 1487, 1526, 39], [1354, "00:54", "01:04", 1494, 1504, 10], [1355, "00:57", "01:07", 1497, 1507, 10], -] # yapf:disable +] def bus_driver_scheduling(minimize_drivers: bool, max_num_drivers: int) -> int: diff --git a/examples/python/cryptarithm_sat.py b/examples/python/cryptarithm_sat.py index c4e49e0873..9b28dbc97b 100644 --- a/examples/python/cryptarithm_sat.py +++ b/examples/python/cryptarithm_sat.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Use CP-SAT to solve a simple cryptarithmetic problem: SEND+MORE=MONEY. -""" +"""Use CP-SAT to solve a simple cryptarithmetic problem: SEND+MORE=MONEY.""" from absl import app from ortools.sat.python import cp_model diff --git a/examples/python/golomb8.py b/examples/python/golomb8.py index cb2a2423ca..ec720e3191 100755 --- a/examples/python/golomb8.py +++ b/examples/python/golomb8.py @@ -69,7 +69,7 @@ def main(_) -> None: branches = collector.Branches(i) failures = collector.Failures(i) print( - ("Solution #%i: value = %i, failures = %i, branches = %i," "time = %i ms") + "Solution #%i: value = %i, failures = %i, branches = %i,time = %i ms" % (i, obj_value, failures, branches, time) ) time = solver.WallTime() diff --git a/examples/python/golomb_sat.py b/examples/python/golomb_sat.py index 6b4e19cc06..0ed2240a6b 100644 --- a/examples/python/golomb_sat.py +++ b/examples/python/golomb_sat.py @@ -24,6 +24,7 @@ see: https://en.wikipedia.org/wiki/Golomb_ruler """ from typing import Sequence + from absl import app from absl import flags diff --git a/examples/python/knapsack_2d_sat.py b/examples/python/knapsack_2d_sat.py index e771821552..5014d246e8 100644 --- a/examples/python/knapsack_2d_sat.py +++ b/examples/python/knapsack_2d_sat.py @@ -26,7 +26,6 @@ import numpy as np import pandas as pd from google.protobuf import text_format - from ortools.sat.python import cp_model diff --git a/examples/python/line_balancing_sat.py b/examples/python/line_balancing_sat.py index c80a747d3a..5cb513c52b 100644 --- a/examples/python/line_balancing_sat.py +++ b/examples/python/line_balancing_sat.py @@ -33,6 +33,7 @@ from typing import Dict, Sequence from absl import app from absl import flags + from google.protobuf import text_format from ortools.sat.python import cp_model diff --git a/examples/python/linear_assignment_api.py b/examples/python/linear_assignment_api.py index 34514974e4..64c4de0532 100644 --- a/examples/python/linear_assignment_api.py +++ b/examples/python/linear_assignment_api.py @@ -14,9 +14,9 @@ """Test linear sum assignment on a 4x4 matrix. - Example taken from: - http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm with kCost[0][1] - modified so the optimum solution is unique. +Example taken from: +http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm with kCost[0][1] +modified so the optimum solution is unique. """ from typing import Sequence @@ -28,7 +28,12 @@ def run_assignment_on_4x4_matrix(): """Test linear sum assignment on a 4x4 matrix.""" num_sources = 4 num_targets = 4 - cost = [[90, 76, 75, 80], [35, 85, 55, 65], [125, 95, 90, 105], [45, 110, 95, 115]] + cost = [ + [90, 76, 75, 80], + [35, 85, 55, 65], + [125, 95, 90, 105], + [45, 110, 95, 115], + ] expected_cost = cost[0][3] + cost[1][2] + cost[2][1] + cost[3][0] assignment = linear_sum_assignment.SimpleLinearSumAssignment() diff --git a/examples/python/maximize_combinations_sat.py b/examples/python/maximize_combinations_sat.py index a23e90d384..853107deaf 100644 --- a/examples/python/maximize_combinations_sat.py +++ b/examples/python/maximize_combinations_sat.py @@ -15,6 +15,7 @@ """Maximize the number of valid combinations of Boolean variables.""" from typing import Sequence + from absl import app from ortools.sat.python import cp_model diff --git a/examples/python/maze_escape_sat.py b/examples/python/maze_escape_sat.py index 7a96e453d8..6d5e9c4796 100644 --- a/examples/python/maze_escape_sat.py +++ b/examples/python/maze_escape_sat.py @@ -20,6 +20,7 @@ visit all boxes in order, and walk on each block in a 4x4x4 map exactly once. Admissible moves are one step in one of the 6 directions: x+, x-, y+, y-, z+(up), z-(down) """ + from typing import Dict, Sequence, Tuple from absl import app diff --git a/examples/python/nqueens_sat.py b/examples/python/nqueens_sat.py index e2f29542a5..6fd102dcdc 100644 --- a/examples/python/nqueens_sat.py +++ b/examples/python/nqueens_sat.py @@ -18,6 +18,7 @@ import time from absl import app from absl import flags + from ortools.sat.python import cp_model _SIZE = flags.DEFINE_integer("size", 8, "Number of queens.") diff --git a/examples/python/pell_equation_sat.py b/examples/python/pell_equation_sat.py index 3583c5557d..75554e6a40 100644 --- a/examples/python/pell_equation_sat.py +++ b/examples/python/pell_equation_sat.py @@ -18,6 +18,7 @@ from collections.abc import Sequence from absl import app from absl import flags + from ortools.sat.python import cp_model diff --git a/examples/python/pyflow_example.py b/examples/python/pyflow_example.py index 55db850530..44f0498d4c 100644 --- a/examples/python/pyflow_example.py +++ b/examples/python/pyflow_example.py @@ -52,7 +52,12 @@ def min_cost_flow_api(): print("MinCostFlow on 4x4 matrix.") num_sources = 4 num_targets = 4 - costs = [[90, 75, 75, 80], [35, 85, 55, 65], [125, 95, 90, 105], [45, 110, 95, 115]] + costs = [ + [90, 75, 75, 80], + [35, 85, 55, 65], + [125, 95, 90, 105], + [45, 110, 95, 115], + ] expected_cost = 275 smcf = min_cost_flow.SimpleMinCostFlow() for source in range(0, num_sources): diff --git a/examples/python/rcpsp_sat.py b/examples/python/rcpsp_sat.py index 2b78e3d049..eb4fa3e1b7 100644 --- a/examples/python/rcpsp_sat.py +++ b/examples/python/rcpsp_sat.py @@ -27,9 +27,9 @@ from absl import app from absl import flags from google.protobuf import text_format -from ortools.sat.python import cp_model from ortools.scheduling import rcpsp_pb2 from ortools.scheduling.python import rcpsp +from ortools.sat.python import cp_model _INPUT = flags.DEFINE_string("input", "", "Input file to parse and solve.") _OUTPUT_PROTO = flags.DEFINE_string( diff --git a/examples/python/spread_robots_sat.py b/examples/python/spread_robots_sat.py index 27da1d65b9..b9fc5999e6 100644 --- a/examples/python/spread_robots_sat.py +++ b/examples/python/spread_robots_sat.py @@ -18,7 +18,6 @@ import math from typing import Sequence from absl import app from absl import flags - from google.protobuf import text_format from ortools.sat.python import cp_model diff --git a/examples/python/steel_mill_slab_sat.py b/examples/python/steel_mill_slab_sat.py index 6f79d85fbe..e84b490cf9 100644 --- a/examples/python/steel_mill_slab_sat.py +++ b/examples/python/steel_mill_slab_sat.py @@ -21,6 +21,7 @@ import time from absl import app from absl import flags + from google.protobuf import text_format from ortools.sat.python import cp_model diff --git a/examples/python/weighted_latency_problem_sat.py b/examples/python/weighted_latency_problem_sat.py index 36616bb26b..0abc315afe 100644 --- a/examples/python/weighted_latency_problem_sat.py +++ b/examples/python/weighted_latency_problem_sat.py @@ -16,6 +16,7 @@ import random from typing import Sequence + from absl import app from absl import flags @@ -27,7 +28,9 @@ _GRID_SIZE = flags.DEFINE_integer("grid_size", 20, "Size of the grid where nodes _PROFIT_RANGE = flags.DEFINE_integer("profit_range", 50, "Range of profit.") _SEED = flags.DEFINE_integer("seed", 0, "Random seed.") _PARAMS = flags.DEFINE_string( - "params", "num_search_workers:16, max_time_in_seconds:5", "Sat solver parameters." + "params", + "num_search_workers:16, max_time_in_seconds:5", + "Sat solver parameters.", ) _PROTO_FILE = flags.DEFINE_string( "proto_file", "", "If not empty, output the proto to this file." diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 9bbaee5513..7e1b1b1554 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2296,10 +2296,10 @@ TRFM""" solver.best_bound_callback = best_bound_callback.new_best_bound status = solver.Solve(model, solution_callback) if status == cp_model.OPTIMAL: - self.assertLess( - time.time(), - max(best_bound_callback.last_time, solution_callback.last_time) + 9.0, + last_activity = max( + best_bound_callback.last_time, solution_callback.last_time ) + self.assertLess(time.time(), last_activity + 15.0) def test_issue4434(self) -> None: model = cp_model.CpModel() diff --git a/ortools/set_cover/samples/set_cover.py b/ortools/set_cover/samples/set_cover.py index c6d5b48aef..fd6e8f7055 100755 --- a/ortools/set_cover/samples/set_cover.py +++ b/ortools/set_cover/samples/set_cover.py @@ -17,6 +17,7 @@ # [START program] # [START import] from ortools.set_cover.python import set_cover + # [END import] diff --git a/tools/check_python_deps.py b/tools/check_python_deps.py index deeb558abd..f205680a93 100644 --- a/tools/check_python_deps.py +++ b/tools/check_python_deps.py @@ -111,7 +111,10 @@ if __name__ == "__main__": "-l", "--log", type="string", - help="Available levels are CRITICAL (3), ERROR (2), WARNING (1), INFO (0), DEBUG (-1)", + help=( + "Available levels are CRITICAL (3), ERROR (2), WARNING (1), INFO (0)," + " DEBUG (-1)" + ), default="INFO", ) options, args = parser.parse_args() diff --git a/tools/doc/doxygen_filter.py b/tools/doc/doxygen_filter.py index f9271df7a7..4fcb95b667 100644 --- a/tools/doc/doxygen_filter.py +++ b/tools/doc/doxygen_filter.py @@ -36,186 +36,182 @@ import sys class DoxygenFormatter: - """Transforms lines of a source file to make them Doxygen-friendly.""" + """Transforms lines of a source file to make them Doxygen-friendly.""" - ANYWHERE = 'anywhere' - COMMENT = 'comment' + ANYWHERE = "anywhere" + COMMENT = "comment" - def __init__(self, outfile): - # The file-like object to which we will write lines. - self.out = outfile + def __init__(self, outfile): + # The file-like object to which we will write lines. + self.out = outfile - # A buffer for storing empty lines which we can use later if we need to - # retroactively insert markup without causing line number offset problems. - self.empty_line_buffer = [] + # A buffer for storing empty lines which we can use later if we need to + # retroactively insert markup without causing line number offset problems. + self.empty_line_buffer = [] - # Whether we are currently inside an indented code block. - self.in_code_block = False + # Whether we are currently inside an indented code block. + self.in_code_block = False - self.CompileExpressions() + self.CompileExpressions() - def CompileExpressions(self): - """Pre-compiles frequently used regexps for improved performance. + def CompileExpressions(self): + """Pre-compiles frequently used regexps for improved performance. - The regexps are arranged as a list of 3-tuples, where the second value is - the replacement string (which may include backreferences) and the third - value is one of the context constants ANYWHERE or COMMENT. This is a list - of tuples instead of a dictionary because order matters: earlier regexps - will be applied first, and the resulting text (not the original) will be - what is seen by subsequent regexps. - """ - self.comment_regex = re.compile(r'^\s*//') + The regexps are arranged as a list of 3-tuples, where the second value is + the replacement string (which may include backreferences) and the third + value is one of the context constants ANYWHERE or COMMENT. This is a list + of tuples instead of a dictionary because order matters: earlier regexps + will be applied first, and the resulting text (not the original) will be + what is seen by subsequent regexps. + """ + self.comment_regex = re.compile(r"^\s*//") - self.substitutions = [ - # Remove copyright lines. - (re.compile(r'^\s*//\s*[Cc]opyright.*Google.*'), r'', self.ANYWHERE), + self.substitutions = [ + # Remove copyright lines. + (re.compile(r"^\s*//\s*[Cc]opyright.*Google.*"), r"", self.ANYWHERE), + # Remove any comment lines that consist of only punctuation (banners). + # We only allow a maximum of two spaces before the punctuation so we + # don't accidentally get rid of code examples with bare braces and + # whatnot. + (re.compile(r"(^\s*)//\s{0,2}[-=#/]+$"), r"\1//\n", self.ANYWHERE), + # If we find something that looks like a list item that is indented four + # or more spaces, pull it back to the left so doxygen's Markdown engine + # doesn't treat it like a code block. + (re.compile(r"(^\s*)//\s{4,}([-\d*].*)"), r"\1 \2", self.COMMENT), + # Replace TODO(user) in a comment with @todo (someone) + (re.compile(r"TODO"), r"@todo ", self.COMMENT), + # Replace leading 'Note:' or 'Note that' in a comment with @note + ( + re.compile(r"(\/\/\s+)Note(?:\:| that)", re.I), + r"\1@note", + self.COMMENT, + ), + # Replace leading 'Warning:' in a comment with @warning + (re.compile(r"(\/\/\s+)Warning:", re.I), r"\1@warning", self.COMMENT), + # Replace leading 'Deprecated' in a comment with @deprecated + ( + re.compile(r"(\/\/\s+)Deprecated[^\w\s]*", re.I), + r"\1@deprecated", + self.COMMENT, + ), + # Replace pipe-delimited parameter names with backtick-delimiters + (re.compile(r"\|(\w+)\|"), r"`\1`", self.COMMENT), + # Convert standalone comment lines to Doxygen style. + (re.compile(r"(^\s*)//(?=[^/])"), r"\1///", self.ANYWHERE), + # Strip trailing comments from preprocessor directives. + (re.compile(r"(^#.*)//.*"), r"\1", self.ANYWHERE), + # Convert remaining trailing comments to doxygen style, unless they are + # documenting the end of a block. + (re.compile(r"([^} ]\s+)//(?=[^/])"), r"\1///<", self.ANYWHERE), + ] - # Remove any comment lines that consist of only punctuation (banners). - # We only allow a maximum of two spaces before the punctuation so we - # don't accidentally get rid of code examples with bare braces and - # whatnot. - (re.compile(r'(^\s*)//\s{0,2}[-=#/]+$'), r'\1//\n', self.ANYWHERE), + def Transform(self, line): + """Performs the regexp transformations defined by self.substitutions. - # If we find something that looks like a list item that is indented four - # or more spaces, pull it back to the left so doxygen's Markdown engine - # doesn't treat it like a code block. - (re.compile(r'(^\s*)//\s{4,}([-\d*].*)'), r'\1 \2', self.COMMENT), + Args: + line: The line to transform. - # Replace TODO(user) in a comment with @todo (someone) - (re.compile(r'TODO'), r'@todo ', self.COMMENT), - - # Replace leading 'Note:' or 'Note that' in a comment with @note - (re.compile(r'(\/\/\s+)Note(?:\:| that)', re.I), r'\1@note', - self.COMMENT), - - # Replace leading 'Warning:' in a comment with @warning - (re.compile(r'(\/\/\s+)Warning:', re.I), r'\1@warning', self.COMMENT), - - # Replace leading 'Deprecated' in a comment with @deprecated - (re.compile(r'(\/\/\s+)Deprecated[^\w\s]*', re.I), r'\1@deprecated', - self.COMMENT), - - # Replace pipe-delimited parameter names with backtick-delimiters - (re.compile(r'\|(\w+)\|'), r'`\1`', self.COMMENT), - - # Convert standalone comment lines to Doxygen style. - (re.compile(r'(^\s*)//(?=[^/])'), r'\1///', self.ANYWHERE), - - # Strip trailing comments from preprocessor directives. - (re.compile(r'(^#.*)//.*'), r'\1', self.ANYWHERE), - - # Convert remaining trailing comments to doxygen style, unless they are - # documenting the end of a block. - (re.compile(r'([^} ]\s+)//(?=[^/])'), r'\1///<', self.ANYWHERE), - ] - - def Transform(self, line): - """Performs the regexp transformations defined by self.substitutions. - - Args: - line: The line to transform. - - Returns: - The resulting line. - """ - for (regex, repl, where) in self.substitutions: - if where is self.COMMENT and not self.comment_regex.match(line): + Returns: + The resulting line. + """ + for regex, repl, where in self.substitutions: + if where is self.COMMENT and not self.comment_regex.match(line): + return line + line = regex.sub(repl, line) return line - line = regex.sub(repl, line) - return line - def AppendToBufferedLine(self, text): - """Appends text to the last buffered empty line. + def AppendToBufferedLine(self, text): + """Appends text to the last buffered empty line. - Empty lines are buffered rather than being written out directly. This lets - us retroactively rewrite buffered lines to include markup that affects the - following line, while avoiding the line number offset that would result from - inserting a line that wasn't in the original source. + Empty lines are buffered rather than being written out directly. This lets + us retroactively rewrite buffered lines to include markup that affects the + following line, while avoiding the line number offset that would result from + inserting a line that wasn't in the original source. - Args: - text: The text to append to the line. + Args: + text: The text to append to the line. - Returns: - True if there was an available empty line to which text could be - appended, and False otherwise. - """ - if self.empty_line_buffer: - last_line = self.empty_line_buffer.pop().rstrip() - last_line += text + '\n' - self.empty_line_buffer.append(last_line) - return True - else: - return False + Returns: + True if there was an available empty line to which text could be + appended, and False otherwise. + """ + if self.empty_line_buffer: + last_line = self.empty_line_buffer.pop().rstrip() + last_line += text + "\n" + self.empty_line_buffer.append(last_line) + return True + else: + return False - def ConvertCodeBlock(self, line): - """Converts any code block that may begin or end on this line. + def ConvertCodeBlock(self, line): + """Converts any code block that may begin or end on this line. - Doxygen has (at least) two kinds of code blocks. Any block indented at - least four spaces gets formatted as code, but (for some reason) no syntax - highlighting is applied. Any block surrounded by "~~~" on both sides is - also treated as code, but these are syntax highlighted intelligently - depending on the file type. We typically write code blocks in the former - style, but we'd like them to be highlighted, so this function converts them - to the latter style by adding in the ~~~ lines. + Doxygen has (at least) two kinds of code blocks. Any block indented at + least four spaces gets formatted as code, but (for some reason) no syntax + highlighting is applied. Any block surrounded by "~~~" on both sides is + also treated as code, but these are syntax highlighted intelligently + depending on the file type. We typically write code blocks in the former + style, but we'd like them to be highlighted, so this function converts them + to the latter style by adding in the ~~~ lines. - To make this a bit more complicated, we would really prefer not to insert - new lines into the file, since that will make the line numbers shown in - doxygen not match the line numbers in the actual source code. For this - reason, we only perform the conversion if at least one "blank" line (empty - comment line) appears before the start of the code block. If we get down to - the bottom of the block and there's no blank line after it, we will be - forced to add a line, since we can't go back and undo what we already did. + To make this a bit more complicated, we would really prefer not to insert + new lines into the file, since that will make the line numbers shown in + doxygen not match the line numbers in the actual source code. For this + reason, we only perform the conversion if at least one "blank" line (empty + comment line) appears before the start of the code block. If we get down to + the bottom of the block and there's no blank line after it, we will be + forced to add a line, since we can't go back and undo what we already did. - Args: - line: The line to process. + Args: + line: The line to process. - Returns: - The converted line. - """ - if not self.in_code_block and re.match(r'\s*///\s{4,}', line): - if self.AppendToBufferedLine(' ~~~'): - # If this fails, we'll just leave it un-highlighted. - self.in_code_block = True - elif self.in_code_block and not re.match(r'\s*///\s{4,}', line): - if not self.AppendToBufferedLine(' ~~~'): - # This is bad. We don't have a buffered line to use to end the code - # block, so we'll have to insert one. This will cause the line - # numbers to stop matching the original source, unfortunately. - line = '/// ~~~\n' + line - self.in_code_block = False - return line + Returns: + The converted line. + """ + if not self.in_code_block and re.match(r"\s*///\s{4,}", line): + if self.AppendToBufferedLine(" ~~~"): + # If this fails, we'll just leave it un-highlighted. + self.in_code_block = True + elif self.in_code_block and not re.match(r"\s*///\s{4,}", line): + if not self.AppendToBufferedLine(" ~~~"): + # This is bad. We don't have a buffered line to use to end the code + # block, so we'll have to insert one. This will cause the line + # numbers to stop matching the original source, unfortunately. + line = "/// ~~~\n" + line + self.in_code_block = False + return line - def ProcessLine(self, line): - """Processes a line. + def ProcessLine(self, line): + """Processes a line. - If the line is an empty line inside a comment, we buffer it for possible - rewriting later on. Otherwise, we transform it using our regexps and - write it (as well as any buffered blank lines) out to the output. + If the line is an empty line inside a comment, we buffer it for possible + rewriting later on. Otherwise, we transform it using our regexps and + write it (as well as any buffered blank lines) out to the output. - Args: - line: The line to process. - """ - line = self.Transform(line) + Args: + line: The line to process. + """ + line = self.Transform(line) - if line.strip() == '///': - # We may repurpose this empty line later, so don't write it out yet. - self.empty_line_buffer.append(line) - else: - line = self.ConvertCodeBlock(line) - # Flush the line buffer and write this line as well. - for buffered_line in self.empty_line_buffer: - self.out.write(buffered_line) - self.empty_line_buffer = [] - self.out.write(line) + if line.strip() == "///": + # We may repurpose this empty line later, so don't write it out yet. + self.empty_line_buffer.append(line) + else: + line = self.ConvertCodeBlock(line) + # Flush the line buffer and write this line as well. + for buffered_line in self.empty_line_buffer: + self.out.write(buffered_line) + self.empty_line_buffer = [] + self.out.write(line) def main(argv): - sourcefile = argv[1] - with open(sourcefile, 'r') as infile: - formatter = DoxygenFormatter(sys.stdout) - for line in infile: - formatter.ProcessLine(line) + sourcefile = argv[1] + with open(sourcefile, "r") as infile: + formatter = DoxygenFormatter(sys.stdout) + for line in infile: + formatter.ProcessLine(line) -if __name__ == '__main__': - main(sys.argv) +if __name__ == "__main__": + main(sys.argv) diff --git a/tools/doc/gen_ref_doc.py b/tools/doc/gen_ref_doc.py index 303c43ab21..691b5eed5f 100755 --- a/tools/doc/gen_ref_doc.py +++ b/tools/doc/gen_ref_doc.py @@ -18,249 +18,234 @@ import re def main(version): - """For each doc section, edit the doxy and header files, and generate the doc.""" - sections = create_section_data() - doxy_tmp = 'tools/doc/tmp.doxy' - header_tmp = 'tools/doc/header.tmp.html' - footer_tmp = 'tools/doc/footer.tmp.html' - style_sheet_tmp = 'tools/doc/styleSheet.tmp.css' + """For each doc section, edit the doxy and header files, and generate the doc.""" + sections = create_section_data() + doxy_tmp = "tools/doc/tmp.doxy" + header_tmp = "tools/doc/header.tmp.html" + footer_tmp = "tools/doc/footer.tmp.html" + style_sheet_tmp = "tools/doc/styleSheet.tmp.css" - for section in sections: - output_dir = section['output_dir'] - project_name = section['project name'] - title = section['title'] - doxyfile = 'tools/doc/' + section['doxyfile'] - headerfile = 'tools/doc/' + section['headerfile'] - footerfile = 'tools/doc/' + section['footerfile'] - stylesheetfile = 'tools/doc/' + section['styleSheetfile'] - input_files = section['input_files'] - # Edit doxyfile. - project_name_string = 'PROJECT_NAME = ' + project_name - project_number_string = 'PROJECT_NUMBER = ' + version - html_output_string = 'HTML_OUTPUT = ' + output_dir - input_string = 'INPUT = ' + input_files - f = open(doxyfile, 'r') - g = open(doxy_tmp, 'w') - filedata = f.read() - filedata = re.sub('PROJECT_NAME', project_name_string, filedata) - filedata = re.sub('PROJECT_NUMBER', project_number_string, filedata) - filedata = re.sub('HTML_OUTPUT', html_output_string, filedata) - if input_files: - filedata = re.sub(r'INPUT.*=.*', input_string, filedata) - # Write filedata. - g.write(filedata) - f.close() - g.close() + for section in sections: + output_dir = section["output_dir"] + project_name = section["project name"] + title = section["title"] + doxyfile = "tools/doc/" + section["doxyfile"] + headerfile = "tools/doc/" + section["headerfile"] + footerfile = "tools/doc/" + section["footerfile"] + stylesheetfile = "tools/doc/" + section["styleSheetfile"] + input_files = section["input_files"] + # Edit doxyfile. + project_name_string = "PROJECT_NAME = " + project_name + project_number_string = "PROJECT_NUMBER = " + version + html_output_string = "HTML_OUTPUT = " + output_dir + input_string = "INPUT = " + input_files + f = open(doxyfile, "r") + g = open(doxy_tmp, "w") + filedata = f.read() + filedata = re.sub("PROJECT_NAME", project_name_string, filedata) + filedata = re.sub("PROJECT_NUMBER", project_number_string, filedata) + filedata = re.sub("HTML_OUTPUT", html_output_string, filedata) + if input_files: + filedata = re.sub(r"INPUT.*=.*", input_string, filedata) + # Write filedata. + g.write(filedata) + f.close() + g.close() - # Edit header file. - f = open(headerfile, 'r') - g = open(header_tmp, 'w') - filedata = f.read() - filedata = re.sub('Banner Text', 'Google OR-Tools ' + version, - filedata) - filedata = re.sub('Page Title', title, filedata) - # Write filedata. - g.write(filedata) - f.close() - g.close() + # Edit header file. + f = open(headerfile, "r") + g = open(header_tmp, "w") + filedata = f.read() + filedata = re.sub("Banner Text", "Google OR-Tools " + version, filedata) + filedata = re.sub("Page Title", title, filedata) + # Write filedata. + g.write(filedata) + f.close() + g.close() - # Edit footer file. - f = open(footerfile, 'r') - g = open(footer_tmp, 'w') - filedata = f.read() - # Write filedata. - g.write(filedata) - f.close() - g.close() + # Edit footer file. + f = open(footerfile, "r") + g = open(footer_tmp, "w") + filedata = f.read() + # Write filedata. + g.write(filedata) + f.close() + g.close() - # Edit style sheet file. - f = open(stylesheetfile, 'r') - g = open(style_sheet_tmp, 'w') - filedata = f.read() - # Write filedata. - g.write(filedata) - f.close() - g.close() + # Edit style sheet file. + f = open(stylesheetfile, "r") + g = open(style_sheet_tmp, "w") + filedata = f.read() + # Write filedata. + g.write(filedata) + f.close() + g.close() - # Clean previous doc. - os.system('rm -rf docs/' + output_dir) - # Generate the doc. - os.system(f'doxygen {doxy_tmp}') - # Remove temp files. - os.system(f'rm {doxy_tmp}') - os.system(f'rm {header_tmp}') - os.system(f'rm {footer_tmp}') - os.system(f'rm {style_sheet_tmp}') + # Clean previous doc. + os.system("rm -rf docs/" + output_dir) + # Generate the doc. + os.system(f"doxygen {doxy_tmp}") + # Remove temp files. + os.system(f"rm {doxy_tmp}") + os.system(f"rm {header_tmp}") + os.system(f"rm {footer_tmp}") + os.system(f"rm {style_sheet_tmp}") def create_section_data(): - """Generate each section configuration.""" - sections = [{ - 'output_dir': - 'cpp_algorithms', - 'project name': - 'Algorithms', - 'title': - 'C++ Reference: Algorithms', - 'doxyfile': - 'cpp.doxy.in', - 'headerfile': - 'cpp.header.html.in', - 'footerfile': - 'all.footer.html.in', - 'styleSheetfile': - 'all.styleSheet.css.in', - 'input_files': - 'ortools/algorithms/dense_doubly_linked_list.h ' + - 'ortools/algorithms/dynamic_partition.h ' + - 'ortools/algorithms/dynamic_permutation.h ' + - 'ortools/algorithms/find_graph_symmetries.h ' + - 'ortools/algorithms/hungarian.h ' + - 'ortools/algorithms/knapsack_solver.h ' + - 'ortools/algorithms/sparse_permutation.h' - }, { - 'output_dir': - 'cpp_sat', - 'project name': - 'CP-SAT', - 'title': - 'C++ Reference: CP-SAT', - 'doxyfile': - 'cpp.doxy.in', - 'headerfile': - 'cpp.header.html.in', - 'footerfile': - 'all.footer.html.in', - 'styleSheetfile': - 'all.styleSheet.css.in', - 'input_files': - 'ortools/sat/cp_model.h ' + 'ortools/sat/cp_model_solver.h ' + - 'ortools/sat/model.h ' + 'ortools/util/sorted_interval_list.h ' + - 'ortools/util/time_limit.h ' + - 'ortools/gen/ortools/sat/boolean_problem.pb.h ' + - 'ortools/gen/ortools/sat/cp_model.pb.h ' + - 'ortools/gen/ortools/sat/sat_parameters.pb.h' - }, { - 'output_dir': - 'cpp_graph', - 'project name': - 'Graph', - 'title': - 'C++ Reference: Graph', - 'doxyfile': - 'cpp.doxy.in', - 'headerfile': - 'cpp.header.html.in', - 'footerfile': - 'all.footer.html.in', - 'styleSheetfile': - 'all.styleSheet.css.in', - 'input_files': - 'ortools/graph/christofides.h ' + 'ortools/graph/cliques.h ' + - 'ortools/graph/connected_components.h ' + - 'ortools/graph/connectivity.h ' + - 'ortools/graph/eulerian_path.h ' + 'ortools/graph/graph.h ' + - 'ortools/graph/graphs.h ' + 'ortools/graph/hamiltonian_path.h ' + - 'ortools/graph/graph_io.h ' + 'ortools/graph/iterators.h ' + - 'ortools/graph/linear_assignment.h ' + 'ortools/graph/max_flow.h ' + - 'ortools/graph/min_cost_flow.h ' + - 'ortools/graph/minimum_spanning_tree.h ' + - 'ortools/graph/one_tree_lower_bound.h ' + - 'ortools/graph/shortestpaths.h ' + - 'ortools/graph/strongly_connected_components.h ' + - 'ortools/graph/util.h ' + - 'ortools/gen/ortools/graph/flow_problem.pb.h ' - }, { - 'output_dir': - 'cpp_linear', - 'project name': - 'Linear solver', - 'title': - 'C++ Reference: Linear solver', - 'doxyfile': - 'cpp.doxy.in', - 'headerfile': - 'cpp.header.html.in', - 'footerfile': - 'all.footer.html.in', - 'styleSheetfile': - 'all.styleSheet.css.in', - 'input_files': - 'ortools/linear_solver/linear_expr.h ' + - 'ortools/linear_solver/linear_solver.h ' + - 'ortools/linear_solver/model_exporter.h ' + - 'ortools/linear_solver/model_exporter_swig_helper.h ' + - 'ortools/linear_solver/model_validator.h ' + - 'ortools/gen/ortools/linear_solver/linear_solver.pb.h ' - }, { - 'output_dir': - 'cpp_routing', - 'project name': - 'Routing', - 'title': - 'C++ Reference: Routing', - 'doxyfile': - 'cpp.doxy.in', - 'headerfile': - 'cpp.header.html.in', - 'footerfile': - 'all.footer.html.in', - 'styleSheetfile': - 'all.styleSheet.css.in', - 'input_files': - 'ortools/constraint_solver/constraint_solver.h ' + - 'ortools/constraint_solver/constraint_solveri.h ' + - 'ortools/constraint_solver/routing.h ' + - 'ortools/constraint_solver/routing_index_manager.h ' + - 'ortools/constraint_solver/routing_lp_scheduling.h ' + - 'ortools/constraint_solver/routing_neighborhoods.h ' + - 'ortools/constraint_solver/routing_parameters.h ' + - 'ortools/constraint_solver/routing_types.h ' + - 'ortools/gen/ortools/constraint_solver/assignment.pb.h ' + - 'ortools/gen/ortools/constraint_solver/demon_profiler.pb.h ' + - 'ortools/gen/ortools/constraint_solver/routing_enums.pb.h ' + - 'ortools/gen/ortools/constraint_solver/routing_parameters.pb.h ' + - 'ortools/gen/ortools/constraint_solver/search_limit.pb.h ' + - 'ortools/gen/ortools/constraint_solver/solver_parameters.pb.h ' - }, { - 'output_dir': 'cpp', - 'project name': 'OR-Tools', - 'title': 'C++ Reference', - 'doxyfile': 'cpp.doxy.in', - 'headerfile': 'default.header.html.in', - 'footerfile': 'default.footer.html.in', - 'styleSheetfile': 'default.styleSheet.css.in', - 'input_files': 'ortools ' + 'tools/doc' - }, { - 'output_dir': 'dotnet', - 'project name': 'OR-Tools', - 'title': '.Net Reference', - 'doxyfile': 'dotnet.doxy.in', - 'headerfile': 'dotnet.header.html.in', - 'footerfile': 'all.footer.html.in', - 'styleSheetfile': 'all.styleSheet.css.in', - 'input_files': 'ortools ' + 'tools/doc' - }, { - 'output_dir': 'java', - 'project name': 'OR-Tools', - 'title': 'Java Reference', - 'doxyfile': 'java.doxy.in', - 'headerfile': 'java.header.html.in', - 'footerfile': 'all.footer.html.in', - 'styleSheetfile': 'all.styleSheet.css.in', - 'input_files': 'ortools ' + 'tools/doc' - }] - return sections + """Generate each section configuration.""" + sections = [ + { + "output_dir": "cpp_algorithms", + "project name": "Algorithms", + "title": "C++ Reference: Algorithms", + "doxyfile": "cpp.doxy.in", + "headerfile": "cpp.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": ( + "ortools/algorithms/dense_doubly_linked_list.h " + + "ortools/algorithms/dynamic_partition.h " + + "ortools/algorithms/dynamic_permutation.h " + + "ortools/algorithms/find_graph_symmetries.h " + + "ortools/algorithms/hungarian.h " + + "ortools/algorithms/knapsack_solver.h " + + "ortools/algorithms/sparse_permutation.h" + ), + }, + { + "output_dir": "cpp_sat", + "project name": "CP-SAT", + "title": "C++ Reference: CP-SAT", + "doxyfile": "cpp.doxy.in", + "headerfile": "cpp.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": ( + "ortools/sat/cp_model.h " + + "ortools/sat/cp_model_solver.h " + + "ortools/sat/model.h " + + "ortools/util/sorted_interval_list.h " + + "ortools/util/time_limit.h " + + "ortools/gen/ortools/sat/boolean_problem.pb.h " + + "ortools/gen/ortools/sat/cp_model.pb.h " + + "ortools/gen/ortools/sat/sat_parameters.pb.h" + ), + }, + { + "output_dir": "cpp_graph", + "project name": "Graph", + "title": "C++ Reference: Graph", + "doxyfile": "cpp.doxy.in", + "headerfile": "cpp.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": ( + "ortools/graph/christofides.h " + + "ortools/graph/cliques.h " + + "ortools/graph/connected_components.h " + + "ortools/graph/connectivity.h " + + "ortools/graph/eulerian_path.h " + + "ortools/graph/graph.h " + + "ortools/graph/graphs.h " + + "ortools/graph/hamiltonian_path.h " + + "ortools/graph/graph_io.h " + + "ortools/graph/iterators.h " + + "ortools/graph/linear_assignment.h " + + "ortools/graph/max_flow.h " + + "ortools/graph/min_cost_flow.h " + + "ortools/graph/minimum_spanning_tree.h " + + "ortools/graph/one_tree_lower_bound.h " + + "ortools/graph/shortestpaths.h " + + "ortools/graph/strongly_connected_components.h " + + "ortools/graph/util.h " + + "ortools/gen/ortools/graph/flow_problem.pb.h " + ), + }, + { + "output_dir": "cpp_linear", + "project name": "Linear solver", + "title": "C++ Reference: Linear solver", + "doxyfile": "cpp.doxy.in", + "headerfile": "cpp.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": ( + "ortools/linear_solver/linear_expr.h " + + "ortools/linear_solver/linear_solver.h " + + "ortools/linear_solver/model_exporter.h " + + "ortools/linear_solver/model_exporter_swig_helper.h " + + "ortools/linear_solver/model_validator.h " + + "ortools/gen/ortools/linear_solver/linear_solver.pb.h " + ), + }, + { + "output_dir": "cpp_routing", + "project name": "Routing", + "title": "C++ Reference: Routing", + "doxyfile": "cpp.doxy.in", + "headerfile": "cpp.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": ( + "ortools/constraint_solver/constraint_solver.h " + + "ortools/constraint_solver/constraint_solveri.h " + + "ortools/constraint_solver/routing.h " + + "ortools/constraint_solver/routing_index_manager.h " + + "ortools/constraint_solver/routing_lp_scheduling.h " + + "ortools/constraint_solver/routing_neighborhoods.h " + + "ortools/constraint_solver/routing_parameters.h " + + "ortools/constraint_solver/routing_types.h " + + "ortools/gen/ortools/constraint_solver/assignment.pb.h " + + "ortools/gen/ortools/constraint_solver/demon_profiler.pb.h " + + "ortools/gen/ortools/constraint_solver/routing_enums.pb.h " + + "ortools/gen/ortools/constraint_solver/routing_parameters.pb.h " + + "ortools/gen/ortools/constraint_solver/search_limit.pb.h " + + "ortools/gen/ortools/constraint_solver/solver_parameters.pb.h " + ), + }, + { + "output_dir": "cpp", + "project name": "OR-Tools", + "title": "C++ Reference", + "doxyfile": "cpp.doxy.in", + "headerfile": "default.header.html.in", + "footerfile": "default.footer.html.in", + "styleSheetfile": "default.styleSheet.css.in", + "input_files": "ortools " + "tools/doc", + }, + { + "output_dir": "dotnet", + "project name": "OR-Tools", + "title": ".Net Reference", + "doxyfile": "dotnet.doxy.in", + "headerfile": "dotnet.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": "ortools " + "tools/doc", + }, + { + "output_dir": "java", + "project name": "OR-Tools", + "title": "Java Reference", + "doxyfile": "java.doxy.in", + "headerfile": "java.header.html.in", + "footerfile": "all.footer.html.in", + "styleSheetfile": "all.styleSheet.css.in", + "input_files": "ortools " + "tools/doc", + }, + ] + return sections -if __name__ == '__main__': - version_file = open('Version.txt', 'r') - version_file_data = version_file.read() - version_file.close() - major_pattern = re.compile(r'OR_TOOLS_MAJOR=(\d)') - minor_pattern = re.compile(r'OR_TOOLS_MINOR=(\d)') - major = major_pattern.findall(version_file_data)[0] - minor = minor_pattern.findall(version_file_data)[0] +if __name__ == "__main__": + version_file = open("Version.txt", "r") + version_file_data = version_file.read() + version_file.close() + major_pattern = re.compile(r"OR_TOOLS_MAJOR=(\d)") + minor_pattern = re.compile(r"OR_TOOLS_MINOR=(\d)") + major = major_pattern.findall(version_file_data)[0] + minor = minor_pattern.findall(version_file_data)[0] - version_number = f'{major}.{minor}' - main(version_number) + version_number = f"{major}.{minor}" + main(version_number) From 478239a7efd239f7fa50013374d8f4640fb3ac12 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 25 Jun 2025 15:46:08 +0200 Subject: [PATCH 129/509] Fix or-tools.code-workspace --- or-tools.code-workspace | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/or-tools.code-workspace b/or-tools.code-workspace index 96c6145349..abf8dd1536 100644 --- a/or-tools.code-workspace +++ b/or-tools.code-workspace @@ -113,7 +113,7 @@ "USE_SCIP" ], "C_Cpp.clang_format_style": "Google", - "python.formatting.provider": "yapf", + "python.formatting.provider": "black", "python.pythonPath": "python3", "python.autoComplete.extraPaths": [ "${workspaceRoot}", From a62d284dbde529e218bc9ce6cfd02f21e004a367 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 24 Jun 2025 18:20:08 +0200 Subject: [PATCH 130/509] routing: export from google3 --- ortools/routing/constraints.cc | 114 +++++++++++++++++++++++++++ ortools/routing/constraints.h | 9 +++ ortools/routing/decision_builders.cc | 10 ++- ortools/routing/filters.cc | 5 +- ortools/routing/lp_scheduling.h | 1 + ortools/routing/routing.cc | 36 ++++++++- ortools/routing/routing.h | 6 +- ortools/routing/sat.cc | 52 ++++++++---- 8 files changed, 207 insertions(+), 26 deletions(-) diff --git a/ortools/routing/constraints.cc b/ortools/routing/constraints.cc index da13afb0b4..172d7a73b6 100644 --- a/ortools/routing/constraints.cc +++ b/ortools/routing/constraints.cc @@ -22,6 +22,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/functional/any_invocable.h" #include "absl/log/check.h" @@ -1032,4 +1033,117 @@ Constraint* MakeGlobalVehicleBreaksConstraint( return solver->RevAlloc(new GlobalVehicleBreaksConstraint(dimension)); } +namespace { + +// TODO(user): Make this a real constraint with demons on transit and active +// variables. +class NumActiveVehiclesCapacityConstraint : public Constraint { + public: + NumActiveVehiclesCapacityConstraint(Solver* solver, + std::vector transit_vars, + std::vector active_vars, + std::vector vehicle_active_vars, + std::vector vehicle_capacities, + int max_active_vehicles, + bool enforce_active_vehicles) + : Constraint(solver), + transit_vars_(std::move(transit_vars)), + active_vars_(std::move(active_vars)), + vehicle_active_vars_(std::move(vehicle_active_vars)), + vehicle_capacities_(std::move(vehicle_capacities)), + max_active_vehicles_( + std::min(max_active_vehicles, + static_cast(vehicle_active_vars_.size()))), + enforce_active_vehicles_(enforce_active_vehicles) { + DCHECK_EQ(transit_vars_.size(), active_vars_.size()); + DCHECK_EQ(vehicle_capacities_.size(), vehicle_active_vars_.size()); + } + std::string DebugString() const override { + return "NumActiveVehiclesCapacityConstraint"; + } + void Post() override { + int64_t remaining_demand = 0; + for (int i = 0; i < transit_vars_.size(); ++i) { + if (active_vars_[i]->Min() == 1) { + CapAddTo(transit_vars_[i]->Min(), &remaining_demand); + } + } + sorted_by_capacity_vehicles_.clear(); + sorted_by_capacity_vehicles_.reserve(vehicle_capacities_.size()); + for (int v = 0; v < vehicle_active_vars_.size(); ++v) { + if (vehicle_active_vars_[v]->Max() == 0) continue; + sorted_by_capacity_vehicles_.push_back(v); + } + const int updated_max_active_vehicles = std::min( + max_active_vehicles_, sorted_by_capacity_vehicles_.size()); + absl::c_sort(sorted_by_capacity_vehicles_, [this](int a, int b) { + return vehicle_capacities_[a] > vehicle_capacities_[b]; + }); + for (int i = 0; i < updated_max_active_vehicles; ++i) { + CapSubFrom(vehicle_capacities_[sorted_by_capacity_vehicles_[i]], + &remaining_demand); + } + if (remaining_demand > 0) solver()->Fail(); + + // Check vehicles that need to be forced to be active. + if (enforce_active_vehicles_) { + int64_t extended_capacity = 0; + if (updated_max_active_vehicles < sorted_by_capacity_vehicles_.size()) { + extended_capacity = vehicle_capacities_ + [sorted_by_capacity_vehicles_[updated_max_active_vehicles]]; + } + for (int i = 0; i < updated_max_active_vehicles; ++i) { + const int vehicle = sorted_by_capacity_vehicles_[i]; + if (CapAdd(remaining_demand, vehicle_capacities_[vehicle]) > + extended_capacity) { + vehicle_active_vars_[vehicle]->SetValue(1); + } else { + break; + } + } + } + + // Check remaining vehicles and make inactive the ones which do not have + // enough capacity. + if (updated_max_active_vehicles > 0 && + updated_max_active_vehicles - 1 < sorted_by_capacity_vehicles_.size()) { + CapAddTo( + vehicle_capacities_ + [sorted_by_capacity_vehicles_[updated_max_active_vehicles - 1]], + &remaining_demand); + } + for (int i = updated_max_active_vehicles; + i < sorted_by_capacity_vehicles_.size(); ++i) { + const int vehicle = sorted_by_capacity_vehicles_[i]; + if (vehicle_capacities_[vehicle] < remaining_demand || + updated_max_active_vehicles == 0) { + vehicle_active_vars_[vehicle]->SetValue(0); + } + } + } + void InitialPropagate() override {} + + private: + const std::vector transit_vars_; + const std::vector active_vars_; + const std::vector vehicle_active_vars_; + const std::vector vehicle_capacities_; + const int max_active_vehicles_; + const bool enforce_active_vehicles_; + std::vector sorted_by_capacity_vehicles_; +}; + +} // namespace + +Constraint* MakeNumActiveVehiclesCapacityConstraint( + Solver* solver, std::vector transit_vars, + std::vector active_vars, std::vector vehicle_active_vars, + std::vector vehicle_capacities, int max_active_vehicles, + bool enforce_active_vehicles) { + return solver->RevAlloc(new NumActiveVehiclesCapacityConstraint( + solver, std::move(transit_vars), std::move(active_vars), + std::move(vehicle_active_vars), std::move(vehicle_capacities), + max_active_vehicles, enforce_active_vehicles)); +} + } // namespace operations_research::routing diff --git a/ortools/routing/constraints.h b/ortools/routing/constraints.h index 4d7e1759b8..4d7ee86187 100644 --- a/ortools/routing/constraints.h +++ b/ortools/routing/constraints.h @@ -52,6 +52,15 @@ Constraint* MakeRouteConstraint( Constraint* MakeGlobalVehicleBreaksConstraint( Solver* solver, const RoutingDimension* dimension); +/// Makes inactive the vehicles which cannot cover the demand resulting from +/// the transit variables of the active nodes given the maximum number of +/// vehicles which can be active. +Constraint* MakeNumActiveVehiclesCapacityConstraint( + Solver* solver, std::vector transit_vars, + std::vector active_vars, std::vector vehicle_active_vars, + std::vector vehicle_capacities, int max_active_vehicles, + bool enforce_active_vehicles = false); + } // namespace operations_research::routing #endif // OR_TOOLS_ROUTING_CONSTRAINTS_H_ diff --git a/ortools/routing/decision_builders.cc b/ortools/routing/decision_builders.cc index 65d2a17013..6ccb8ccc05 100644 --- a/ortools/routing/decision_builders.cc +++ b/ortools/routing/decision_builders.cc @@ -255,11 +255,10 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { vehicles_without_resource_assignment_.clear(); vehicles_with_resource_assignment_.clear(); - util_intops::StrongVector> - used_resources_per_class; + used_resources_per_class_.clear(); DetermineVehiclesRequiringResourceAssignment( &vehicles_without_resource_assignment_, - &vehicles_with_resource_assignment_, &used_resources_per_class); + &vehicles_with_resource_assignment_, &used_resources_per_class_); const auto next = [&model = model_](int64_t n) { return model.NextVar(n)->Value(); @@ -272,6 +271,7 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { int solve_duration_shares = vehicles_without_resource_assignment_.size() + vehicles_with_resource_assignment_.size(); for (int vehicle : vehicles_without_resource_assignment_) { + // This can trigger a fail if the time limit is reached. solver->TopPeriodicCheck(); cumul_values_.clear(); break_start_end_values_.clear(); @@ -298,7 +298,7 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { // corresponding var and values. resource_indices_.clear(); if (!ComputeVehicleResourceClassValuesAndIndices( - vehicles_with_resource_assignment_, used_resources_per_class, next, + vehicles_with_resource_assignment_, used_resources_per_class_, next, &resource_indices_)) { return false; } @@ -532,6 +532,8 @@ class SetCumulsFromLocalDimensionCosts : public DecisionBuilder { // limit is reached. std::vector vehicles_without_resource_assignment_; std::vector vehicles_with_resource_assignment_; + util_intops::StrongVector> + used_resources_per_class_; std::vector cumul_values_; std::vector break_start_end_values_; std::vector resource_indices_; diff --git a/ortools/routing/filters.cc b/ortools/routing/filters.cc index 90e9be3f43..5581a4d44f 100644 --- a/ortools/routing/filters.cc +++ b/ortools/routing/filters.cc @@ -941,8 +941,9 @@ class SameVehicleCostFilter : public BasePathFilter { ¤t_cost_); } } - int64_t GetCost(int index, const std::vector>& - nodes_per_vehicle) const { + int64_t GetCost( + int index, + absl::Span> nodes_per_vehicle) const { const int num_vehicles_used = nodes_per_vehicle[index].size(); if (num_vehicles_used <= 1) return 0; return CapProd(num_vehicles_used - 1, model_.GetSoftSameVehicleCost(index)); diff --git a/ortools/routing/lp_scheduling.h b/ortools/routing/lp_scheduling.h index ca526f41f8..5190bb350e 100644 --- a/ortools/routing/lp_scheduling.h +++ b/ortools/routing/lp_scheduling.h @@ -494,6 +494,7 @@ class RoutingCPSatWrapper : public RoutingLinearSolverWrapper { parameters_.set_add_lp_constraints_lazily(false); parameters_.set_use_absl_random(false); parameters_.set_alternative_pool_size(0); + parameters_.set_transitive_precedences_work_limit(0); } ~RoutingCPSatWrapper() override {} void Clear() override { diff --git a/ortools/routing/routing.cc b/ortools/routing/routing.cc index a7dc30470a..2056bdc3d4 100644 --- a/ortools/routing/routing.cc +++ b/ortools/routing/routing.cc @@ -2599,6 +2599,11 @@ void RoutingModel::CloseModelWithParameters( if (vehicles_ > max_active_vehicles_) { solver_->AddConstraint( solver_->MakeSumLessOrEqual(vehicle_active_, max_active_vehicles_)); + for (const RoutingDimension* dimension : dimensions_) { + solver_->AddConstraint(MakeNumActiveVehiclesCapacityConstraint( + solver_.get(), dimension->fixed_transits_, active_, vehicle_active_, + dimension->vehicle_capacities_, max_active_vehicles_)); + } } // If there is only one vehicle in the model the vehicle variables will have @@ -2615,7 +2620,17 @@ void RoutingModel::CloseModelWithParameters( // Nodes which are not in a disjunction are mandatory, and those with a // trivially infeasible type are necessarily unperformed for (int i = 0; i < size; ++i) { - if (GetDisjunctionIndices(i).empty() && active_[i]->Max() != 0) { + const std::vector& disjunctions = + GetDisjunctionIndices(i); + bool is_mandatory = disjunctions.empty(); + for (const DisjunctionIndex& disjunction : disjunctions) { + if (GetDisjunctionNodeIndices(disjunction).size() == 1 && + GetDisjunctionPenalty(disjunction) == kNoPenalty) { + is_mandatory = true; + break; + } + } + if (is_mandatory && active_[i]->Max() != 0) { active_[i]->SetValue(1); } const int type = GetVisitType(i); @@ -3374,7 +3389,8 @@ const Assignment* RoutingModel::SolveFromAssignmentsWithParameters( VLOG(1) << "Solving with CP-SAT"; Assignment* const cp_solution = solution_collector->last_solution_or_null(); Assignment sat_solution(solver_.get()); - if (SolveModelWithSat(this, parameters, cp_solution, &sat_solution) && + if (SolveModelWithSat(this, &search_stats_, parameters, cp_solution, + &sat_solution) && update_time_limits(/*leave_secondary_solve_buffer=*/false) && AppendAssignmentIfFeasible(sat_solution, &solution_pool)) { if (parameters.log_search()) { @@ -6688,10 +6704,22 @@ void RoutingDimension::InitializeTransitVariables(int64_t slack_max) { break; } } + const bool is_unary = IsUnary(); for (int64_t i = 0; i < size; ++i) { + int64_t min_fixed_transit = std::numeric_limits::max(); + if (is_unary) { + for (int evaluator_index : class_evaluators_) { + const auto& unary_transit_callback = + model_->UnaryTransitCallbackOrNull(evaluator_index); + DCHECK(unary_transit_callback != nullptr); + min_fixed_transit = + std::min(min_fixed_transit, unary_transit_callback(i)); + } + } fixed_transits_[i] = solver->MakeIntVar( - are_all_evaluators_positive ? int64_t{0} - : std::numeric_limits::min(), + is_unary ? min_fixed_transit + : are_all_evaluators_positive ? int64_t{0} + : std::numeric_limits::min(), std::numeric_limits::max(), absl::StrCat(transit_name, i)); // Setting dependent_transits_[i]. if (base_dimension_ != nullptr) { diff --git a/ortools/routing/routing.h b/ortools/routing/routing.h index c52403b301..ba7a761766 100644 --- a/ortools/routing/routing.h +++ b/ortools/routing/routing.h @@ -255,6 +255,8 @@ struct RoutingSearchStats { int64_t num_cp_sat_calls_in_lp_scheduling = 0; int64_t num_glop_calls_in_lp_scheduling = 0; int64_t num_min_cost_flow_calls = 0; + int64_t num_cp_sat_calls_in_routing = 0; + int64_t num_generalized_cp_sat_calls_in_routing = 0; }; class OR_DLL RoutingModel { @@ -1469,6 +1471,8 @@ class OR_DLL RoutingModel { int64_t objective_lower_bound() const { return objective_lower_bound_; } /// Returns the current status of the routing model. RoutingSearchStatus::Value status() const { return status_; } + /// Returns search statistics. + const RoutingSearchStats& search_stats() const { return search_stats_; } /// Returns the value of the internal enable_deep_serialization_ parameter. bool enable_deep_serialization() const { return enable_deep_serialization_; } /// Applies a lock chain to the next search. 'locks' represents an ordered @@ -3644,7 +3648,7 @@ class RoutingDimension { /// solve the TSP corresponding to the model if it has a single vehicle. /// Therefore the resulting solution might not actually be feasible. Will return /// false if a solution could not be found. -bool SolveModelWithSat(RoutingModel* model, +bool SolveModelWithSat(RoutingModel* model, RoutingSearchStats* search_stats, const RoutingSearchParameters& search_parameters, const Assignment* initial_solution, Assignment* solution); diff --git a/ortools/routing/sat.cc b/ortools/routing/sat.cc index 9395d86c36..6c3792506b 100644 --- a/ortools/routing/sat.cc +++ b/ortools/routing/sat.cc @@ -176,21 +176,22 @@ void AddDimensions(const RoutingModel& model, const ArcVarMap& arc_vars, // Only a single vehicle class. const RoutingModel::TransitCallback2& transit = dimension->transit_evaluator(0); - std::vector cumuls(dimension->cumuls().size(), -1); + const int num_cumuls = dimension->cumuls().size(); + std::vector cumuls(num_cumuls, -1); const int64_t min_start = dimension->cumuls()[model.Start(0)]->Min(); const int64_t max_end = std::min(dimension->cumuls()[model.End(0)]->Max(), dimension->vehicle_capacities()[0]); for (int i = 0; i < cumuls.size(); ++i) { if (model.IsStart(i) || model.IsEnd(i)) continue; // Reducing bounds supposing the triangular inequality. - const int64_t cumul_min = - std::max(sat::kMinIntegerValue.value(), - std::max(dimension->cumuls()[i]->Min(), - CapAdd(transit(model.Start(0), i), min_start))); - const int64_t cumul_max = - std::min(sat::kMaxIntegerValue.value(), - std::min(dimension->cumuls()[i]->Max(), - CapSub(max_end, transit(i, model.End(0))))); + const int64_t cumul_min = std::max( + std::numeric_limits::min() / num_cumuls, + std::max(dimension->cumuls()[i]->Min(), + CapAdd(transit(model.Start(0), i), min_start))); + const int64_t cumul_max = std::min( + std::numeric_limits::max() / num_cumuls, + std::min(dimension->cumuls()[i]->Max(), + CapSub(max_end, transit(i, model.End(0))))); cumuls[i] = AddVariable(cp_model, cumul_min, cumul_max); AddSoftCumulBounds(dimension, i, cumuls[i], cumul_min, cumul_max, cp_model); @@ -505,7 +506,9 @@ void AddGeneralizedDimensions( for (int cp_node = 1; cp_node < num_cp_nodes; ++cp_node) { const int node = cp_node - 1; int64_t cumul_min = dimension->cumuls()[node]->Min(); - int64_t cumul_max = dimension->cumuls()[node]->Max(); + int64_t cumul_max = + std::min(dimension->cumuls()[node]->Max(), + std::numeric_limits::max() / (2 * num_cp_nodes)); if (model.IsStart(node) || model.IsEnd(node)) { const int vehicle = model.VehicleIndex(node); cumul_max = @@ -547,7 +550,10 @@ void AddGeneralizedDimensions( cp_tail - 1 < dimension->slacks().size() ? dimension->slacks()[cp_tail - 1]->Max() : 0; - slack[cp_tail] = AddVariable(cp_model, 0, slack_max); + slack[cp_tail] = AddVariable( + cp_model, 0, + std::min(slack_max, std::numeric_limits::max() / + (2 * num_cp_nodes))); if (slack_max > 0 && slack_cost > 0) { cp_model->mutable_objective()->add_vars(slack[cp_tail]); cp_model->mutable_objective()->add_coeffs(slack_cost); @@ -815,6 +821,7 @@ ArcVarMap PopulateGeneralizedRouteModelFromRoutingModel( {{num_performed, 1}, {num_violated, 1}}); } // Create "arc" variables. + std::vector> first_to_end_arcs; for (int tail = 0; tail < num_nodes; ++tail) { const int cp_tail = tail + 1; std::unique_ptr iter( @@ -845,8 +852,19 @@ ArcVarMap PopulateGeneralizedRouteModelFromRoutingModel( DCHECK(!arc_vars.contains(arc)); const int arc_var = AddVariable(cp_model, 0, 1); arc_vars.insert({arc, arc_var}); + if (model.IsStart(tail) && model.IsEnd(head)) { + first_to_end_arcs.push_back({arc_var, 1}); + } } } + // Limit the number of routes to the maximum number of vehicles. + { + AddLinearConstraint( + cp_model, + std::max(model.vehicles() - model.GetMaximumNumberOfActiveVehicles(), + 0), + model.vehicles(), first_to_end_arcs); + } // Set literals for vehicle performing node. for (int cp_node = 1; cp_node < num_cp_nodes; cp_node++) { @@ -1154,11 +1172,12 @@ bool IsFeasibleArcVarMap(const ArcVarMap& arc_vars, int max_node_index) { // Solves a RoutingModel using the CP-SAT solver. Returns false if no solution // was found. -bool SolveModelWithSat(RoutingModel* model, +bool SolveModelWithSat(RoutingModel* model, RoutingSearchStats* search_stats, const RoutingSearchParameters& search_parameters, const Assignment* initial_solution, Assignment* solution) { - const absl::Duration remaining_time = model->RemainingTime(); + // Adding a bit of slack to the time limit for the CP-SAT solver. + const absl::Duration remaining_time = model->RemainingTime() * 0.95; const absl::Time deadline = model->solver()->Now() + remaining_time; sat::CpModelProto cp_model; cp_model.mutable_objective()->set_scaling_factor( @@ -1167,7 +1186,8 @@ bool SolveModelWithSat(RoutingModel* model, const sat::CpObjectiveProto& objective = cp_model.objective(); const std::function null_observer; - if (search_parameters.use_generalized_cp_sat() == BOOL_TRUE) { + if (search_parameters.use_generalized_cp_sat() == BOOL_TRUE || + !sat::RoutingModelCanBeSolvedBySat(*model)) { const sat::ArcVarMap arc_vars = sat::PopulateGeneralizedRouteModelFromRoutingModel(*model, &cp_model); const int max_node_index = model->Nexts().size() + model->vehicles(); @@ -1189,13 +1209,14 @@ bool SolveModelWithSat(RoutingModel* model, *solution, /*call_at_solution_monitors=*/true); } : null_observer; + if (search_stats) search_stats->num_generalized_cp_sat_calls_in_routing++; return sat::ConvertGeneralizedResponseToSolution( sat::SolveRoutingModel(cp_model, remaining_time, model->GetMutableCPSatInterrupt(), search_parameters, observer), objective, *model, arc_vars, solution); } - if (!sat::RoutingModelCanBeSolvedBySat(*model)) return false; + DCHECK(sat::RoutingModelCanBeSolvedBySat(*model)); const sat::ArcVarMap arc_vars = sat::PopulateModelFromRoutingModel(*model, &cp_model); sat::AddSolutionAsHintToModel(initial_solution, *model, arc_vars, &cp_model); @@ -1212,6 +1233,7 @@ bool SolveModelWithSat(RoutingModel* model, model->CheckIfAssignmentIsFeasible(*solution, /*call_at_solution_monitors=*/true); } : null_observer; + if (search_stats) search_stats->num_cp_sat_calls_in_routing++; return sat::ConvertToSolution( sat::SolveRoutingModel(cp_model, remaining_time, model->GetMutableCPSatInterrupt(), From b7f7e9b1ec24e1c9c99a4c12371a4187660cc6ec Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:24:10 +0200 Subject: [PATCH 131/509] cmake: update doxygen-awesome-css from v2.1.0 to v2.3.4 --- cmake/cpp.cmake | 2 +- cmake/dotnet.cmake | 2 +- cmake/java.cmake | 2 +- cmake/python.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 269275b0af..d3322843bf 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -601,7 +601,7 @@ if(BUILD_CXX_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/cpp/Doxyfile.in ${PROJECT_BINARY_DIR}/cpp/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index c6fca998bf..5dc4642e86 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -507,7 +507,7 @@ if(BUILD_DOTNET_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/dotnet/Doxyfile.in ${PROJECT_BINARY_DIR}/dotnet/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/java.cmake b/cmake/java.cmake index c515c0c17b..f72b0ab6e0 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -569,7 +569,7 @@ if(BUILD_JAVA_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/java/Doxyfile.in ${PROJECT_BINARY_DIR}/java/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/python.cmake b/cmake/python.cmake index 0a4641faa3..2b241548fa 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -867,7 +867,7 @@ if(BUILD_PYTHON_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/python/Doxyfile.in ${PROJECT_BINARY_DIR}/python/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css SHOW_PROGRESS ) From d263eea61930d0ccc2cb098590e5d913aba0ddb9 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 25 Jun 2025 15:46:08 +0200 Subject: [PATCH 132/509] Fix or-tools.code-workspace --- or-tools.code-workspace | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/or-tools.code-workspace b/or-tools.code-workspace index 96c6145349..abf8dd1536 100644 --- a/or-tools.code-workspace +++ b/or-tools.code-workspace @@ -113,7 +113,7 @@ "USE_SCIP" ], "C_Cpp.clang_format_style": "Google", - "python.formatting.provider": "yapf", + "python.formatting.provider": "black", "python.pythonPath": "python3", "python.autoComplete.extraPaths": [ "${workspaceRoot}", From 52d44af1dd46e99e72664d01e5f72a5325e24955 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:24:10 +0200 Subject: [PATCH 133/509] cmake: update doxygen-awesome-css from v2.1.0 to v2.3.4 --- cmake/cpp.cmake | 2 +- cmake/dotnet.cmake | 2 +- cmake/java.cmake | 2 +- cmake/python.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 9cdc27ec1e..09f7417613 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -581,7 +581,7 @@ if(BUILD_CXX_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/cpp/Doxyfile.in ${PROJECT_BINARY_DIR}/cpp/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index c74042c04c..52f77b77e3 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -505,7 +505,7 @@ if(BUILD_DOTNET_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/dotnet/Doxyfile.in ${PROJECT_BINARY_DIR}/dotnet/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/java.cmake b/cmake/java.cmake index 74184c1f4f..9783f2e1dd 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -567,7 +567,7 @@ if(BUILD_JAVA_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/java/Doxyfile.in ${PROJECT_BINARY_DIR}/java/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/python.cmake b/cmake/python.cmake index 2112ceeb66..70a8151a89 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -849,7 +849,7 @@ if(BUILD_PYTHON_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/python/Doxyfile.in ${PROJECT_BINARY_DIR}/python/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css SHOW_PROGRESS ) From 94308a9fe68c7d41cd3775b74fddd89a5dbe6ec5 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:32:30 +0200 Subject: [PATCH 134/509] doxygen: force light-mode for doc --- cmake/cpp.cmake | 1 + cmake/dotnet.cmake | 1 + cmake/java.cmake | 1 + cmake/python.cmake | 1 + ortools/cpp/Doxyfile.in | 4 +- ortools/dotnet/Doxyfile.in | 4 +- ortools/doxygen/header.html | 76 +++++++++++++++++++++++++++++++++++++ ortools/java/Doxyfile.in | 4 +- ortools/python/Doxyfile.in | 4 +- 9 files changed, 88 insertions(+), 8 deletions(-) create mode 100644 ortools/doxygen/header.html diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index d3322843bf..587d159d52 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -612,6 +612,7 @@ if(BUILD_CXX_DOC) DEPENDS ${PROJECT_BINARY_DIR}/cpp/Doxyfile ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/cpp/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating C++ API documentation with Doxygen" diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 5dc4642e86..12ead2117a 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -519,6 +519,7 @@ if(BUILD_DOTNET_DOC) dotnet_package ${PROJECT_BINARY_DIR}/dotnet/Doxyfile ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/dotnet/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating .Net API documentation with Doxygen" diff --git a/cmake/java.cmake b/cmake/java.cmake index f72b0ab6e0..ae7f023c9d 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -581,6 +581,7 @@ if(BUILD_JAVA_DOC) java_package ${PROJECT_BINARY_DIR}/java/Doxyfile ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/java/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Java API documentation with Doxygen" diff --git a/cmake/python.cmake b/cmake/python.cmake index 2b241548fa..6d4fbde01e 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -879,6 +879,7 @@ if(BUILD_PYTHON_DOC) python_package ${PROJECT_BINARY_DIR}/python/Doxyfile ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/python/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Python API documentation with Doxygen" diff --git a/ortools/cpp/Doxyfile.in b/ortools/cpp/Doxyfile.in index c73af82a1c..3b22a9d3b4 100644 --- a/ortools/cpp/Doxyfile.in +++ b/ortools/cpp/Doxyfile.in @@ -1267,7 +1267,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1331,7 +1331,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/dotnet/Doxyfile.in b/ortools/dotnet/Doxyfile.in index 705bb2febb..a137f56eef 100644 --- a/ortools/dotnet/Doxyfile.in +++ b/ortools/dotnet/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/doxygen/header.html b/ortools/doxygen/header.html new file mode 100644 index 0000000000..105640b7d3 --- /dev/null +++ b/ortools/doxygen/header.html @@ -0,0 +1,76 @@ + + + + + + + + +$projectname: $title +$title + + + + + + + + + + + + +$treeview +$search +$mathjax +$darkmode + +$extrastylesheet + + + +
+ + +
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
$projectname $projectnumber +
+
$projectbrief
+
+
$projectbrief
+
$searchbox
$searchbox
+
+ + diff --git a/ortools/java/Doxyfile.in b/ortools/java/Doxyfile.in index 5e35f02281..ff8544e977 100644 --- a/ortools/java/Doxyfile.in +++ b/ortools/java/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/python/Doxyfile.in b/ortools/python/Doxyfile.in index 4e773821e7..48b3347927 100644 --- a/ortools/python/Doxyfile.in +++ b/ortools/python/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to From b644c5fb1e82ac1251a4df330db46f2bd3045af0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Jun 2025 09:59:26 +0200 Subject: [PATCH 135/509] cmake(ci): Rework Vagrantfile ci: Rework BSD workflows ci: Bump freebsd Vagrantfile from python 3.9 to 3.11 --- .github/workflows/amd64_freebsd_cmake.yml | 54 ++++++++++++++--------- cmake/Makefile | 2 +- cmake/vagrant/freebsd/cpp/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/dotnet/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/java/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/python/Vagrantfile | 14 ++++-- cmake/vagrant/netbsd/cpp/Vagrantfile | 4 +- cmake/vagrant/netbsd/dotnet/Vagrantfile | 6 +-- cmake/vagrant/netbsd/java/Vagrantfile | 6 +-- cmake/vagrant/netbsd/python/Vagrantfile | 8 ++-- 10 files changed, 81 insertions(+), 43 deletions(-) diff --git a/.github/workflows/amd64_freebsd_cmake.yml b/.github/workflows/amd64_freebsd_cmake.yml index 9cf860332a..81fa90da0b 100644 --- a/.github/workflows/amd64_freebsd_cmake.yml +++ b/.github/workflows/amd64_freebsd_cmake.yml @@ -1,3 +1,4 @@ +# ref: https://github.com/actions/runner-images name: amd64 FreeBSD CMake on: [push, pull_request, workflow_dispatch] @@ -6,31 +7,44 @@ concurrency: group: ${{github.workflow}}-${{github.ref}} cancel-in-progress: true -# Only macos-12 runner provide virtualisation with vagrant/virtualbox installed. -# ref: https://github.com/actions/runner-images/tree/main/images/macos -# ref: https://app.vagrantup.com/generic/boxes/freebsd13 +# Building using the github runner environement directly. jobs: vagrant: strategy: - fail-fast: false matrix: - distro: [freebsd] - lang: [cpp, python] - allow_failure: [false] - include: - - distro: freebsd - lang: dotnet - allow_failure: true - - distro: freebsd - lang: java - allow_failure: true - name: amd64•FreeBSD•CMake•${{matrix.lang}} - runs-on: macos-12 + distro: [ + freebsd, + #netbsd, + #openbsd, + ] + lang: [ + cpp, + dotnet, + java, + python, + ] + allow_failure: [true] + fail-fast: false + name: amd64•${{matrix.distro}}•CMake•${{matrix.lang}} + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: vagrant version - run: Vagrant --version - - name: VirtualBox version - run: virtualbox -h + - name: Virtualbox install + run: | + sudo apt update -q + sudo apt install -yq virtualbox + virtualbox --help + - name: Vagrant install + run: | + sudo apt update -q + wget https://releases.hashicorp.com/vagrant/2.4.7/vagrant_2.4.7-1_amd64.deb + sudo apt install -y ./vagrant_2.4.7-1_amd64.deb + vagrant --version - name: Build run: make --directory=cmake ${{matrix.distro}}_${{matrix.lang}} + + amd64_bsd_cmake: + runs-on: ubuntu-latest + needs: vagrant + steps: + - uses: actions/checkout@v4 diff --git a/cmake/Makefile b/cmake/Makefile index f63de4d762..021c17fa42 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -803,4 +803,4 @@ clean: clean_all clean_platforms clean_toolchains clean_web clean_vagrant clean_ distclean: clean -docker container rm -f $$(docker container ls -aq) -docker image rm -f $$(docker image ls -aq) - -vagrant box remove -f generic/freebsd12 + -vagrant box remove -f generic/freebsd14 diff --git a/cmake/vagrant/freebsd/cpp/Vagrantfile b/cmake/vagrant/freebsd/cpp/Vagrantfile index 31e311c074..5440fd949e 100644 --- a/cmake/vagrant/freebsd/cpp/Vagrantfile +++ b/cmake/vagrant/freebsd/cpp/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_cpp" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/dotnet/Vagrantfile b/cmake/vagrant/freebsd/dotnet/Vagrantfile index 567bd27889..7648c0ae11 100644 --- a/cmake/vagrant/freebsd/dotnet/Vagrantfile +++ b/cmake/vagrant/freebsd/dotnet/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_dotnet" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/java/Vagrantfile b/cmake/vagrant/freebsd/java/Vagrantfile index c6584eb767..fff3664342 100644 --- a/cmake/vagrant/freebsd/java/Vagrantfile +++ b/cmake/vagrant/freebsd/java/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_java" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/python/Vagrantfile b/cmake/vagrant/freebsd/python/Vagrantfile index 73a6ba7b2a..7cd93bd1d5 100644 --- a/cmake/vagrant/freebsd/python/Vagrantfile +++ b/cmake/vagrant/freebsd/python/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_python" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. @@ -75,8 +81,8 @@ Vagrant.configure("2") do |config| set -x pkg update -f pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkg install -y swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkg install -y py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/cpp/Vagrantfile b/cmake/vagrant/netbsd/cpp/Vagrantfile index 0378b3d5a8..db188d48cc 100644 --- a/cmake/vagrant/netbsd/cpp/Vagrantfile +++ b/cmake/vagrant/netbsd/cpp/Vagrantfile @@ -73,8 +73,8 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/dotnet/Vagrantfile b/cmake/vagrant/netbsd/dotnet/Vagrantfile index bceb231d8b..09ac57e594 100644 --- a/cmake/vagrant/netbsd/dotnet/Vagrantfile +++ b/cmake/vagrant/netbsd/dotnet/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake kldload linux64 - pkg install -y swig linux-dotnet-sdk + pkgin -y install swig linux-dotnet-sdk SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/java/Vagrantfile b/cmake/vagrant/netbsd/java/Vagrantfile index 050e73496e..7c44664b8e 100644 --- a/cmake/vagrant/netbsd/java/Vagrantfile +++ b/cmake/vagrant/netbsd/java/Vagrantfile @@ -73,9 +73,9 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig openjdk11 maven + pkgin update + pkg -y install git cmake + pkg -y install swig openjdk11 maven mount -t fdescfs fdesc /dev/fd mount -t procfs proc /proc SHELL diff --git a/cmake/vagrant/netbsd/python/Vagrantfile b/cmake/vagrant/netbsd/python/Vagrantfile index 6cfb5783f6..86340ab3d0 100644 --- a/cmake/vagrant/netbsd/python/Vagrantfile +++ b/cmake/vagrant/netbsd/python/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkgin update + pkgin -y install git cmake + pkgin -y install swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkgin -y install py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" From 835b3b3b93fc5fdc9db69aa1f6704f393cf2959d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:32:30 +0200 Subject: [PATCH 136/509] doxygen: force light-mode for doc --- cmake/cpp.cmake | 1 + cmake/dotnet.cmake | 1 + cmake/java.cmake | 1 + cmake/python.cmake | 1 + ortools/cpp/Doxyfile.in | 4 +- ortools/dotnet/Doxyfile.in | 4 +- ortools/doxygen/header.html | 76 +++++++++++++++++++++++++++++++++++++ ortools/java/Doxyfile.in | 4 +- ortools/python/Doxyfile.in | 4 +- 9 files changed, 88 insertions(+), 8 deletions(-) create mode 100644 ortools/doxygen/header.html diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 09f7417613..551bbd255d 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -592,6 +592,7 @@ if(BUILD_CXX_DOC) DEPENDS ${PROJECT_BINARY_DIR}/cpp/Doxyfile ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/cpp/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating C++ API documentation with Doxygen" diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 52f77b77e3..731dd4d8ff 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -517,6 +517,7 @@ if(BUILD_DOTNET_DOC) dotnet_package ${PROJECT_BINARY_DIR}/dotnet/Doxyfile ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/dotnet/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating .Net API documentation with Doxygen" diff --git a/cmake/java.cmake b/cmake/java.cmake index 9783f2e1dd..eb465a106e 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -579,6 +579,7 @@ if(BUILD_JAVA_DOC) java_package ${PROJECT_BINARY_DIR}/java/Doxyfile ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/java/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Java API documentation with Doxygen" diff --git a/cmake/python.cmake b/cmake/python.cmake index 70a8151a89..c5daeee8a3 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -861,6 +861,7 @@ if(BUILD_PYTHON_DOC) python_package ${PROJECT_BINARY_DIR}/python/Doxyfile ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/python/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Python API documentation with Doxygen" diff --git a/ortools/cpp/Doxyfile.in b/ortools/cpp/Doxyfile.in index c73af82a1c..3b22a9d3b4 100644 --- a/ortools/cpp/Doxyfile.in +++ b/ortools/cpp/Doxyfile.in @@ -1267,7 +1267,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1331,7 +1331,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/dotnet/Doxyfile.in b/ortools/dotnet/Doxyfile.in index 705bb2febb..a137f56eef 100644 --- a/ortools/dotnet/Doxyfile.in +++ b/ortools/dotnet/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/doxygen/header.html b/ortools/doxygen/header.html new file mode 100644 index 0000000000..105640b7d3 --- /dev/null +++ b/ortools/doxygen/header.html @@ -0,0 +1,76 @@ + + + + + + + + +$projectname: $title +$title + + + + + + + + + + + + +$treeview +$search +$mathjax +$darkmode + +$extrastylesheet + + + +
+ + +
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
$projectname $projectnumber +
+
$projectbrief
+
+
$projectbrief
+
$searchbox
$searchbox
+
+ + diff --git a/ortools/java/Doxyfile.in b/ortools/java/Doxyfile.in index 5e35f02281..ff8544e977 100644 --- a/ortools/java/Doxyfile.in +++ b/ortools/java/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/python/Doxyfile.in b/ortools/python/Doxyfile.in index 4e773821e7..48b3347927 100644 --- a/ortools/python/Doxyfile.in +++ b/ortools/python/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to From 09e7e951a25653b19f0b8fcdaa4565435a54c405 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Jun 2025 09:59:26 +0200 Subject: [PATCH 137/509] cmake(ci): Rework Vagrantfile ci: Rework BSD workflows ci: Bump freebsd Vagrantfile from python 3.9 to 3.11 --- .github/workflows/amd64_freebsd_cmake.yml | 54 ++++++++++++++--------- cmake/Makefile | 2 +- cmake/vagrant/freebsd/cpp/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/dotnet/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/java/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/python/Vagrantfile | 14 ++++-- cmake/vagrant/netbsd/cpp/Vagrantfile | 4 +- cmake/vagrant/netbsd/dotnet/Vagrantfile | 6 +-- cmake/vagrant/netbsd/java/Vagrantfile | 6 +-- cmake/vagrant/netbsd/python/Vagrantfile | 8 ++-- 10 files changed, 81 insertions(+), 43 deletions(-) diff --git a/.github/workflows/amd64_freebsd_cmake.yml b/.github/workflows/amd64_freebsd_cmake.yml index 9cf860332a..81fa90da0b 100644 --- a/.github/workflows/amd64_freebsd_cmake.yml +++ b/.github/workflows/amd64_freebsd_cmake.yml @@ -1,3 +1,4 @@ +# ref: https://github.com/actions/runner-images name: amd64 FreeBSD CMake on: [push, pull_request, workflow_dispatch] @@ -6,31 +7,44 @@ concurrency: group: ${{github.workflow}}-${{github.ref}} cancel-in-progress: true -# Only macos-12 runner provide virtualisation with vagrant/virtualbox installed. -# ref: https://github.com/actions/runner-images/tree/main/images/macos -# ref: https://app.vagrantup.com/generic/boxes/freebsd13 +# Building using the github runner environement directly. jobs: vagrant: strategy: - fail-fast: false matrix: - distro: [freebsd] - lang: [cpp, python] - allow_failure: [false] - include: - - distro: freebsd - lang: dotnet - allow_failure: true - - distro: freebsd - lang: java - allow_failure: true - name: amd64•FreeBSD•CMake•${{matrix.lang}} - runs-on: macos-12 + distro: [ + freebsd, + #netbsd, + #openbsd, + ] + lang: [ + cpp, + dotnet, + java, + python, + ] + allow_failure: [true] + fail-fast: false + name: amd64•${{matrix.distro}}•CMake•${{matrix.lang}} + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: vagrant version - run: Vagrant --version - - name: VirtualBox version - run: virtualbox -h + - name: Virtualbox install + run: | + sudo apt update -q + sudo apt install -yq virtualbox + virtualbox --help + - name: Vagrant install + run: | + sudo apt update -q + wget https://releases.hashicorp.com/vagrant/2.4.7/vagrant_2.4.7-1_amd64.deb + sudo apt install -y ./vagrant_2.4.7-1_amd64.deb + vagrant --version - name: Build run: make --directory=cmake ${{matrix.distro}}_${{matrix.lang}} + + amd64_bsd_cmake: + runs-on: ubuntu-latest + needs: vagrant + steps: + - uses: actions/checkout@v4 diff --git a/cmake/Makefile b/cmake/Makefile index f63de4d762..021c17fa42 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -803,4 +803,4 @@ clean: clean_all clean_platforms clean_toolchains clean_web clean_vagrant clean_ distclean: clean -docker container rm -f $$(docker container ls -aq) -docker image rm -f $$(docker image ls -aq) - -vagrant box remove -f generic/freebsd12 + -vagrant box remove -f generic/freebsd14 diff --git a/cmake/vagrant/freebsd/cpp/Vagrantfile b/cmake/vagrant/freebsd/cpp/Vagrantfile index 31e311c074..5440fd949e 100644 --- a/cmake/vagrant/freebsd/cpp/Vagrantfile +++ b/cmake/vagrant/freebsd/cpp/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_cpp" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/dotnet/Vagrantfile b/cmake/vagrant/freebsd/dotnet/Vagrantfile index 567bd27889..7648c0ae11 100644 --- a/cmake/vagrant/freebsd/dotnet/Vagrantfile +++ b/cmake/vagrant/freebsd/dotnet/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_dotnet" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/java/Vagrantfile b/cmake/vagrant/freebsd/java/Vagrantfile index c6584eb767..fff3664342 100644 --- a/cmake/vagrant/freebsd/java/Vagrantfile +++ b/cmake/vagrant/freebsd/java/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_java" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/python/Vagrantfile b/cmake/vagrant/freebsd/python/Vagrantfile index 73a6ba7b2a..7cd93bd1d5 100644 --- a/cmake/vagrant/freebsd/python/Vagrantfile +++ b/cmake/vagrant/freebsd/python/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_python" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. @@ -75,8 +81,8 @@ Vagrant.configure("2") do |config| set -x pkg update -f pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkg install -y swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkg install -y py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/cpp/Vagrantfile b/cmake/vagrant/netbsd/cpp/Vagrantfile index 0378b3d5a8..db188d48cc 100644 --- a/cmake/vagrant/netbsd/cpp/Vagrantfile +++ b/cmake/vagrant/netbsd/cpp/Vagrantfile @@ -73,8 +73,8 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/dotnet/Vagrantfile b/cmake/vagrant/netbsd/dotnet/Vagrantfile index bceb231d8b..09ac57e594 100644 --- a/cmake/vagrant/netbsd/dotnet/Vagrantfile +++ b/cmake/vagrant/netbsd/dotnet/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake kldload linux64 - pkg install -y swig linux-dotnet-sdk + pkgin -y install swig linux-dotnet-sdk SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/java/Vagrantfile b/cmake/vagrant/netbsd/java/Vagrantfile index 050e73496e..7c44664b8e 100644 --- a/cmake/vagrant/netbsd/java/Vagrantfile +++ b/cmake/vagrant/netbsd/java/Vagrantfile @@ -73,9 +73,9 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig openjdk11 maven + pkgin update + pkg -y install git cmake + pkg -y install swig openjdk11 maven mount -t fdescfs fdesc /dev/fd mount -t procfs proc /proc SHELL diff --git a/cmake/vagrant/netbsd/python/Vagrantfile b/cmake/vagrant/netbsd/python/Vagrantfile index 6cfb5783f6..86340ab3d0 100644 --- a/cmake/vagrant/netbsd/python/Vagrantfile +++ b/cmake/vagrant/netbsd/python/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkgin update + pkgin -y install git cmake + pkgin -y install swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkgin -y install py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" From 5b1376b9dffcee3d3c3956bc1f70902b01b5841e Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 1 Jul 2025 16:51:11 +0200 Subject: [PATCH 138/509] small cleaning --- cmake/python.cmake | 2 - ortools/algorithms/hungarian.cc | 1 - ortools/algorithms/hungarian.h | 16 - ortools/bop/bop_portfolio.h | 1 - ortools/constraint_solver/alldiff_cst.cc | 2 - .../constraint_solver/constraint_solver.cc | 1 - ortools/constraint_solver/count_cst.cc | 1 - ortools/constraint_solver/expr_array.cc | 1 - ortools/constraint_solver/expr_cst.cc | 1 - ortools/constraint_solver/range_cst.cc | 1 - ortools/constraint_solver/search_limit.proto | 1 - ortools/constraint_solver/table.cc | 1 - ortools/glop/preprocessor.h | 1 - ortools/glop/revised_simplex.cc | 1 - ortools/graph/cliques.h | 1 - ortools/graph/linear_assignment.h | 1 - ortools/graph/solve_flow_model.cc | 1 - ortools/linear_solver/cbc_interface.cc | 1 - ortools/linear_solver/clp_interface.cc | 1 - .../linear_solver/proto_solver/BUILD.bazel | 20 + .../proto_solver/preprocessor.cc | 492 +++++++ .../linear_solver/proto_solver/preprocessor.h | 198 +++ .../proto_solver/sat_solver_utils.cc | 51 +- .../python/model_builder_helper.cc | 3 +- ortools/lp_data/BUILD.bazel | 11 - ortools/lp_data/README.md | 86 ++ ortools/lp_data/lp_data.h | 1 - ortools/lp_data/sparse.h | 1 - ortools/math_opt/cpp/solver_resources.cc | 6 + ortools/math_opt/cpp/solver_resources.h | 3 + ortools/math_opt/labs/BUILD.bazel | 14 + ortools/math_opt/python/BUILD.bazel | 60 - ortools/math_opt/python/hash_model_storage.py | 843 ------------ .../python/hash_model_storage_test.py | 31 - ortools/math_opt/python/model_storage.py | 441 ------- ortools/math_opt/python/model_storage_test.py | 941 ------------- .../python/model_storage_update_test.py | 1175 ----------------- .../routing/parsers/solomon_parser_test.cc | 2 +- ortools/routing/samples/cvrp_disjoint_tw.cc | 1 - ortools/routing/samples/cvrptw.cc | 1 - ortools/routing/samples/cvrptw_with_breaks.cc | 1 - .../samples/cvrptw_with_precedences.cc | 1 - ortools/sat/BUILD.bazel | 2 + ortools/sat/cp_model_solver_helpers.cc | 4 + ortools/sat/sat_solver.cc | 2 +- ortools/sat/synchronization.cc | 2 +- ortools/sat/synchronization.h | 8 +- 47 files changed, 868 insertions(+), 3569 deletions(-) create mode 100644 ortools/linear_solver/proto_solver/preprocessor.cc create mode 100644 ortools/linear_solver/proto_solver/preprocessor.h delete mode 100644 ortools/math_opt/python/hash_model_storage.py delete mode 100644 ortools/math_opt/python/hash_model_storage_test.py delete mode 100644 ortools/math_opt/python/model_storage.py delete mode 100644 ortools/math_opt/python/model_storage_test.py delete mode 100644 ortools/math_opt/python/model_storage_update_test.py diff --git a/cmake/python.cmake b/cmake/python.cmake index 6d4fbde01e..01d0e0b8f2 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -384,7 +384,6 @@ if(BUILD_MATH_OPT) ortools/math_opt/python/errors.py ortools/math_opt/python/expressions.py ortools/math_opt/python/from_model.py - ortools/math_opt/python/hash_model_storage.py ortools/math_opt/python/indicator_constraints.py ortools/math_opt/python/init_arguments.py ortools/math_opt/python/linear_constraints.py @@ -392,7 +391,6 @@ if(BUILD_MATH_OPT) ortools/math_opt/python/message_callback.py ortools/math_opt/python/model.py ortools/math_opt/python/model_parameters.py - ortools/math_opt/python/model_storage.py ortools/math_opt/python/normalized_inequality.py ortools/math_opt/python/normalize.py ortools/math_opt/python/objectives.py diff --git a/ortools/algorithms/hungarian.cc b/ortools/algorithms/hungarian.cc index f39d420cc3..b0b2fa109a 100644 --- a/ortools/algorithms/hungarian.cc +++ b/ortools/algorithms/hungarian.cc @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/ortools/algorithms/hungarian.h b/ortools/algorithms/hungarian.h index 538027b0c9..5ae49fbbae 100644 --- a/ortools/algorithms/hungarian.h +++ b/ortools/algorithms/hungarian.h @@ -11,18 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// -// IMPORTANT NOTE: we advise using the code in -// graph/linear_assignment.h whose complexity is -// usually much smaller. -// TODO(user): base this code on LinearSumAssignment. -// -// For each of the four functions declared in this file, in case the input -// parameter 'cost' contains NaN, the function will return without invoking the -// Hungarian algorithm, and the output parameters 'direct_assignment' and -// 'reverse_assignment' will be left unchanged. -// - // An O(n^4) implementation of the Kuhn-Munkres algorithm (a.k.a. the // Hungarian algorithm) for solving the assignment problem. // The assignment problem takes a set of agents, a set of tasks and a @@ -30,10 +18,6 @@ // an optimal (i.e., least cost) assignment of agents to tasks. // The code also enables computing a maximum assignment by changing the // input matrix. -// -// This code is based on (read: translated from) the Java version -// (read: translated from) the Python version at -// http://www.clapper.org/software/python/munkres/. #ifndef OR_TOOLS_ALGORITHMS_HUNGARIAN_H_ #define OR_TOOLS_ALGORITHMS_HUNGARIAN_H_ diff --git a/ortools/bop/bop_portfolio.h b/ortools/bop/bop_portfolio.h index 16f78a616f..085ddaaf9b 100644 --- a/ortools/bop/bop_portfolio.h +++ b/ortools/bop/bop_portfolio.h @@ -56,7 +56,6 @@ class OptimizerSelector; // - LP_FIRST_SOLUTION // - OBJECTIVE_FIRST_SOLUTION // - USER_GUIDED_FIRST_SOLUTION -// - FEASIBILITY_PUMP_FIRST_SOLUTION // - RANDOM_CONSTRAINT_LNS_GUIDED_BY_LP // - RANDOM_VARIABLE_LNS_GUIDED_BY_LP // - RELATION_GRAPH_LNS diff --git a/ortools/constraint_solver/alldiff_cst.cc b/ortools/constraint_solver/alldiff_cst.cc index 3a3c2aff9a..328d756c3b 100644 --- a/ortools/constraint_solver/alldiff_cst.cc +++ b/ortools/constraint_solver/alldiff_cst.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // AllDifferent constraints #include @@ -24,7 +23,6 @@ #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "ortools/base/logging.h" -#include "ortools/base/types.h" #include "ortools/constraint_solver/constraint_solver.h" #include "ortools/constraint_solver/constraint_solveri.h" #include "ortools/util/string_array.h" diff --git a/ortools/constraint_solver/constraint_solver.cc b/ortools/constraint_solver/constraint_solver.cc index de6ebe007f..0f4f56dfc0 100644 --- a/ortools/constraint_solver/constraint_solver.cc +++ b/ortools/constraint_solver/constraint_solver.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // This file implements the core objects of the constraint solver: // Solver, Search, Queue, ... along with the main resolution loop. diff --git a/ortools/constraint_solver/count_cst.cc b/ortools/constraint_solver/count_cst.cc index edab301b68..80ac0f9617 100644 --- a/ortools/constraint_solver/count_cst.cc +++ b/ortools/constraint_solver/count_cst.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Count constraints #include diff --git a/ortools/constraint_solver/expr_array.cc b/ortools/constraint_solver/expr_array.cc index 914c7e5e88..714909fd63 100644 --- a/ortools/constraint_solver/expr_array.cc +++ b/ortools/constraint_solver/expr_array.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Array Expression constraints #include diff --git a/ortools/constraint_solver/expr_cst.cc b/ortools/constraint_solver/expr_cst.cc index fb1b45bb97..c3d50056c7 100644 --- a/ortools/constraint_solver/expr_cst.cc +++ b/ortools/constraint_solver/expr_cst.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Expression constraints #include diff --git a/ortools/constraint_solver/range_cst.cc b/ortools/constraint_solver/range_cst.cc index 6637eeb40e..fa4b24ca66 100644 --- a/ortools/constraint_solver/range_cst.cc +++ b/ortools/constraint_solver/range_cst.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Range constraints #include diff --git a/ortools/constraint_solver/search_limit.proto b/ortools/constraint_solver/search_limit.proto index fc78a2e35b..43c3dd3be4 100644 --- a/ortools/constraint_solver/search_limit.proto +++ b/ortools/constraint_solver/search_limit.proto @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // This file contains a protocol buffer definition for search limits. syntax = "proto3"; diff --git a/ortools/constraint_solver/table.cc b/ortools/constraint_solver/table.cc index 40db3285f6..60003d8c2d 100644 --- a/ortools/constraint_solver/table.cc +++ b/ortools/constraint_solver/table.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // This file implements the table constraints. #include diff --git a/ortools/glop/preprocessor.h b/ortools/glop/preprocessor.h index dc171cb91d..4e77c5cdfe 100644 --- a/ortools/glop/preprocessor.h +++ b/ortools/glop/preprocessor.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // This file contains the presolving code for a LinearProgram. // // A classical reference is: diff --git a/ortools/glop/revised_simplex.cc b/ortools/glop/revised_simplex.cc index fcfe6ef92e..f1aeae58aa 100644 --- a/ortools/glop/revised_simplex.cc +++ b/ortools/glop/revised_simplex.cc @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/ortools/graph/cliques.h b/ortools/graph/cliques.h index 387c49a42c..6afe4a9f8a 100644 --- a/ortools/graph/cliques.h +++ b/ortools/graph/cliques.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Maximal clique algorithms, based on the Bron-Kerbosch algorithm. // See http://en.wikipedia.org/wiki/Bron-Kerbosch_algorithm // and diff --git a/ortools/graph/linear_assignment.h b/ortools/graph/linear_assignment.h index 188a46ddab..c77ad79b1f 100644 --- a/ortools/graph/linear_assignment.h +++ b/ortools/graph/linear_assignment.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // An implementation of a cost-scaling push-relabel algorithm for the // assignment problem (minimum-cost perfect bipartite matching), from // the paper of Goldberg and Kennedy (1995). diff --git a/ortools/graph/solve_flow_model.cc b/ortools/graph/solve_flow_model.cc index 31e98c162f..fd42f9fc6e 100644 --- a/ortools/graph/solve_flow_model.cc +++ b/ortools/graph/solve_flow_model.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // This code loads flow-graph models (as Dimacs file or binary FlowModel proto) // and solves them with the OR-tools flow algorithms. // diff --git a/ortools/linear_solver/cbc_interface.cc b/ortools/linear_solver/cbc_interface.cc index c3c8d45e4d..dc97d229eb 100644 --- a/ortools/linear_solver/cbc_interface.cc +++ b/ortools/linear_solver/cbc_interface.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// #if defined(USE_CBC) #include diff --git a/ortools/linear_solver/clp_interface.cc b/ortools/linear_solver/clp_interface.cc index 31220cefeb..d23df0eec5 100644 --- a/ortools/linear_solver/clp_interface.cc +++ b/ortools/linear_solver/clp_interface.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// #if defined(USE_CLP) || defined(USE_CBC) #include diff --git a/ortools/linear_solver/proto_solver/BUILD.bazel b/ortools/linear_solver/proto_solver/BUILD.bazel index eea9aa0f12..e7047ebc6f 100644 --- a/ortools/linear_solver/proto_solver/BUILD.bazel +++ b/ortools/linear_solver/proto_solver/BUILD.bazel @@ -72,6 +72,7 @@ cc_library( srcs = ["sat_solver_utils.cc"], hdrs = ["sat_solver_utils.h"], deps = [ + ":preprocessor", "//ortools/glop:parameters_cc_proto", "//ortools/glop:preprocessor", "//ortools/linear_solver:linear_solver_cc_proto", @@ -195,3 +196,22 @@ cc_library( "@highs", ], ) + +cc_library( + name = "preprocessor", + srcs = ["preprocessor.cc"], + hdrs = ["preprocessor.h"], + deps = [ + "//ortools/glop:preprocessor", + "//ortools/lp_data", + "//ortools/lp_data:base", + "//ortools/lp_data:lp_utils", + "//ortools/lp_data:sparse", + "//ortools/lp_data:sparse_column", + "//ortools/util:fp_utils", + "//ortools/util:return_macros", + "//ortools/util:stats", + "@abseil-cpp//absl/log", + "@abseil-cpp//absl/log:check", + ], +) diff --git a/ortools/linear_solver/proto_solver/preprocessor.cc b/ortools/linear_solver/proto_solver/preprocessor.cc new file mode 100644 index 0000000000..fa62477f66 --- /dev/null +++ b/ortools/linear_solver/proto_solver/preprocessor.cc @@ -0,0 +1,492 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/linear_solver/proto_solver/preprocessor.h" + +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/log/log.h" +#include "ortools/lp_data/lp_data.h" +#include "ortools/lp_data/lp_types.h" +#include "ortools/lp_data/lp_utils.h" +#include "ortools/lp_data/sparse.h" +#include "ortools/lp_data/sparse_column.h" +#include "ortools/util/fp_utils.h" +#include "ortools/util/return_macros.h" +#include "ortools/util/stats.h" + +using ::operations_research::glop::ColIndex; +using ::operations_research::glop::ColToRowIndex; +using ::operations_research::glop::Fractional; +using ::operations_research::glop::kInfinity; +using ::operations_research::glop::LinearProgram; +using ::operations_research::glop::ProblemStatus; +using ::operations_research::glop::RowIndex; +using ::operations_research::glop::SparseColumn; +using ::operations_research::glop::SparseMatrix; +using ::operations_research::glop::StrictITIVector; +using ::operations_research::glop::SumWithNegativeInfiniteAndOneMissing; +using ::operations_research::glop::SumWithPositiveInfiniteAndOneMissing; + +namespace operations_research { + +// Helper function to check the bounds of the SetVariableBounds() and +// SetConstraintBounds() functions. +inline bool AreBoundsValid(Fractional lower_bound, Fractional upper_bound) { + if (std::isnan(lower_bound)) return false; + if (std::isnan(upper_bound)) return false; + if (lower_bound == kInfinity && upper_bound == kInfinity) return false; + if (lower_bound == -kInfinity && upper_bound == -kInfinity) return false; + if (lower_bound > upper_bound) return false; + return true; +} + +// -------------------------------------------------------- +// IntegerBoundsPreprocessor +// -------------------------------------------------------- + +bool IntegerBoundsPreprocessor::Run(LinearProgram* linear_program) { + SCOPED_INSTRUCTION_COUNT(time_limit_); + RETURN_VALUE_IF_NULL(linear_program, false); + const Fractional tolerance = integer_solution_tolerance_; + + // Make integer the bounds of integer variables. + // NOTE(user): it may happen that the new bound will be less strict (but at + // most it will be off by integer_solution_tolerance). + int64_t num_changed_bounds = 0; + for (ColIndex col : linear_program->IntegerVariablesList()) { + const Fractional lb = + ceil(linear_program->variable_lower_bounds()[col] - tolerance); + const Fractional ub = + floor(linear_program->variable_upper_bounds()[col] + tolerance); + if (!AreBoundsValid(lb, ub)) { + status_ = glop::ProblemStatus::PRIMAL_INFEASIBLE; + return false; + } + if (lb != linear_program->variable_lower_bounds()[col] || + ub != linear_program->variable_upper_bounds()[col]) { + num_changed_bounds++; + } + linear_program->SetVariableBounds(col, lb, ub); + } + VLOG(2) << "IntegerBoundsPreprocessor changed " << num_changed_bounds + << " variable bounds."; + + // Make integer the bounds of integer constraints, if it makes them stricter. + const SparseMatrix& transpose = linear_program->GetTransposeSparseMatrix(); + num_changed_bounds = 0; + for (RowIndex row = RowIndex(0); row < linear_program->num_constraints(); + ++row) { + bool integer_constraint = true; + for (const SparseColumn::Entry var : transpose.column(RowToColIndex(row))) { + // Don't affect the constraint if it has a non-integer variable. + if (!linear_program->IsVariableInteger(RowToColIndex(var.row()))) { + integer_constraint = false; + break; + } + // Don't affect the constraint if it has a non-integer coefficient. Note + // that we require each coefficient to be precisely an integer in order to + // avoid floating point errors. + // + // TODO(user): checking integer constraints can go further, e.g., + // x + 2 * y = 4 for binary x and y can never be satisfied. But this + // perhaps starts to resemble bound propagation, which might be too much + // for a lightweighted preprocessor like this one. + if (round(var.coefficient()) != var.coefficient()) { + integer_constraint = false; + break; + } + } + if (integer_constraint) { + const Fractional lb = + std::ceil(linear_program->constraint_lower_bounds()[row] - tolerance); + const Fractional ub = std::floor( + linear_program->constraint_upper_bounds()[row] + tolerance); + if (!AreBoundsValid(lb, ub)) { + status_ = ProblemStatus::PRIMAL_INFEASIBLE; + return false; + } + if (lb != linear_program->constraint_lower_bounds()[row] || + ub != linear_program->constraint_upper_bounds()[row]) { + num_changed_bounds++; + } + linear_program->SetConstraintBounds(row, lb, ub); + } + } + VLOG(2) << "IntegerBoundsPreprocessor changed " << num_changed_bounds + << " constraint bounds."; + DCHECK(linear_program->BoundsOfIntegerVariablesAreInteger(tolerance)); + DCHECK(linear_program->BoundsOfIntegerConstraintsAreInteger(tolerance)); + return false; +} + +// -------------------------------------------------------- +// BoundPropagationPreprocessor +// -------------------------------------------------------- +// TODO(user): This preprocessor is not as efficient as it could be because each +// time we process a constraint, we rescan all the involved variables. Make it +// more efficient if it becomes needed. Note that this kind of propagation is +// probably something we want to do each time we take a branch in the mip +// search, so probably an efficient class for this will be created at some +// point. +bool BoundPropagationPreprocessor::Run(LinearProgram* linear_program) { + SCOPED_INSTRUCTION_COUNT(time_limit_); + RETURN_VALUE_IF_NULL(linear_program, false); + const Fractional tolerance = integer_solution_tolerance_; + + // Starts by adding all the row in the 'to_process' queue. + StrictITIVector in_queue(linear_program->num_constraints(), + false); + std::deque to_process; + for (RowIndex row(0); row < linear_program->num_constraints(); ++row) { + to_process.push_back(row); + in_queue[row] = true; + } + + // This preprocessor will need to access the constraints row by row. + const SparseMatrix& transpose = linear_program->GetTransposeSparseMatrix(); + + // Now process all the rows until none are left, or a limit on the number of + // processed rows is reached. The limit is mainly here to prevent infinite + // loops on corner cases problems. It should not be reached often in practice. + const int kMaxNumberOfProcessedRows = + linear_program->num_constraints().value() * 10; + for (int i = 0; i < kMaxNumberOfProcessedRows && !to_process.empty(); ++i) { + const RowIndex row = to_process.front(); + in_queue[row] = false; + to_process.pop_front(); + + // For each variable of a constraint on n variables, we want the bound + // implied by the (n - 1) other variables and the constraint bounds. We use + // two handy utility classes that allow us to do that efficiently while + // dealing properly with infinite bounds. + SumWithNegativeInfiniteAndOneMissing lb_sum; + SumWithPositiveInfiniteAndOneMissing ub_sum; + + // Initialize the sums. + bool skip = false; + for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { + const ColIndex col = RowToColIndex(e.row()); + Fractional entry_lb = + e.coefficient() * linear_program->variable_lower_bounds()[col]; + Fractional entry_ub = + e.coefficient() * linear_program->variable_upper_bounds()[col]; + if (e.coefficient() < 0.0) std::swap(entry_lb, entry_ub); + if (entry_lb == kInfinity || entry_ub == -kInfinity) { + // TODO(user): our SumWithOneMissing does not deal well with infinity of + // the wrong sign. For now when this happen we skip this constraint. + // Note however than the other implied bounds could still be used. + skip = true; + break; + } + lb_sum.Add(entry_lb); + ub_sum.Add(entry_ub); + } + if (skip) continue; + + // The inequality + // constraint_lb <= sum(entries) <= constraint_ub + // can be rewritten as: + // sum(entries) + (-activity) = 0, + // where (-activity) has bounds [-constraint_ub, -constraint_lb]. + // We use this latter convention to simplify our code. + lb_sum.Add(-linear_program->constraint_upper_bounds()[row]); + ub_sum.Add(-linear_program->constraint_lower_bounds()[row]); + + // Process the variables one by one and check if the implied bounds are + // more restrictive. + for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { + const ColIndex col = RowToColIndex(e.row()); + const Fractional coeff = e.coefficient(); + const Fractional var_lb = linear_program->variable_lower_bounds()[col]; + const Fractional var_ub = linear_program->variable_upper_bounds()[col]; + Fractional entry_lb = coeff * var_lb; + Fractional entry_ub = coeff * var_ub; + if (coeff < 0.0) std::swap(entry_lb, entry_ub); + + // If X is the variable with index col and Y the sum of all the other + // variables and of (-activity), then coeff * X + Y = 0. Since Y's bounds + // are [lb_sum without X, ub_sum without X], it is easy to derive the + // implied bounds on X. + Fractional implied_lb = -ub_sum.SumWithout(entry_ub) / coeff; + Fractional implied_ub = -lb_sum.SumWithout(entry_lb) / coeff; + if (coeff < 0.0) std::swap(implied_lb, implied_ub); + + // If the variable is integer, make the implied bounds integer. + if (linear_program->IsVariableInteger(col)) { + implied_lb = std::ceil(implied_lb - tolerance); + implied_ub = std::floor(implied_ub + tolerance); + } + + // more restrictive? If yes, sets the bounds, and add all the impacted + // row back into to_process if they are not already there. + if (implied_lb > var_lb || implied_ub < var_ub) { + Fractional new_lb = std::max(implied_lb, var_lb); + Fractional new_ub = std::min(implied_ub, var_ub); + if (new_lb > new_ub) { + // TODO(user): Investigate what tolerance we should use here. + if (new_lb - tolerance > new_ub) { + status_ = ProblemStatus::PRIMAL_INFEASIBLE; + return false; + } else { + // We choose the nearest integer for an integer variable, or the + // middle value for a non-integer one. + if (linear_program->IsVariableInteger(col)) { + new_lb = new_ub = round(new_lb); + } else { + new_lb = new_ub = (new_lb + new_ub) / 2.0; + } + } + } + + // This extra test avoids reprocessing many rows for no reason. + // It can be false if we run into the case new_lb > new_ub above. + if (new_ub != var_ub || new_lb != var_lb) { + linear_program->SetVariableBounds(col, new_lb, new_ub); + for (SparseColumn::Entry e : linear_program->GetSparseColumn(col)) { + if (!in_queue[e.row()]) { + to_process.push_back(e.row()); + in_queue[e.row()] = true; + } + } + } + } + } + } + if (!to_process.empty()) { + LOG_FIRST_N(WARNING, 10) + << "Propagation limit reached in the BoundPropagationPreprocessor, " + << "maybe the limit should be increased."; + } + DCHECK(linear_program->BoundsOfIntegerVariablesAreInteger( + integer_solution_tolerance_)); + DCHECK(linear_program->BoundsOfIntegerConstraintsAreInteger( + integer_solution_tolerance_)); + return false; +} + +// -------------------------------------------------------- +// ImpliedIntegerPreprocessor +// -------------------------------------------------------- +bool ImpliedIntegerPreprocessor::Run(LinearProgram* linear_program) { + SCOPED_INSTRUCTION_COUNT(time_limit_); + RETURN_VALUE_IF_NULL(linear_program, false); + int64_t num_implied_integer_variables = 0; + const Fractional tolerance = integer_solution_tolerance_; + for (ColIndex col(0); col < linear_program->num_variables(); ++col) { + DCHECK_EQ(linear_program->GetFirstSlackVariable(), glop::kInvalidCol); + + // Skip the integer variables. + if (linear_program->GetVariableType(col) != + LinearProgram::VariableType::CONTINUOUS) { + continue; + } + + const bool is_implied_integer = + VariableOccursInAtLeastOneEqualityConstraint(*linear_program, col) + ? AnyEqualityConstraintImpliesIntegrality(*linear_program, col) + : AllInequalityConstraintsImplyIntegrality(*linear_program, col); + + if (is_implied_integer) { + linear_program->SetVariableType( + col, LinearProgram::VariableType::IMPLIED_INTEGER); + num_implied_integer_variables++; + VLOG(2) << "Marked col " << col << " implied integer."; + + // We need to tighten its bounds if they are not integer, otherwise + // other preprocessor complains. + const Fractional lb = + std::ceil(linear_program->variable_lower_bounds()[col] - tolerance); + const Fractional ub = + std::floor(linear_program->variable_upper_bounds()[col] + tolerance); + if (!AreBoundsValid(lb, ub)) { + status_ = ProblemStatus::PRIMAL_INFEASIBLE; + return false; + } + linear_program->SetVariableBounds(col, lb, ub); + } + } + VLOG(2) << "ImpliedIntegerPreprocessor detected " + << num_implied_integer_variables << " implied integer variables."; + + DCHECK(linear_program->BoundsOfIntegerVariablesAreInteger( + integer_solution_tolerance_)); + + // TODO(user): Because this presolve step detects new integer variables and + // does not tighten the bounds of a constraint if all its variables become + // integer, this invariant is currently not enforced: + // DCHECK(linear_program->BoundsOfIntegerConstraintsAreInteger( + // integer_solution_tolerance_)); + + return false; // Does not require postsolve. +} + +bool ImpliedIntegerPreprocessor::AnyEqualityConstraintImpliesIntegrality( + const LinearProgram& linear_program, ColIndex variable) { + for (const SparseColumn::Entry entry : + linear_program.GetSparseColumn(variable)) { + // Process only equality constraints. + if (linear_program.constraint_upper_bounds()[entry.row()] == + linear_program.constraint_lower_bounds()[entry.row()]) { + if (ConstraintSupportsImpliedIntegrality(linear_program, variable, + entry.row())) { + return true; + } + } + } + return false; +} + +bool ImpliedIntegerPreprocessor::AllInequalityConstraintsImplyIntegrality( + const LinearProgram& linear_program, ColIndex variable) { + // Check variable bounds. + Fractional lower_bound = linear_program.variable_lower_bounds()[variable]; + Fractional upper_bound = linear_program.variable_upper_bounds()[variable]; + if (!IsIntegerWithinTolerance(lower_bound, integer_solution_tolerance_) || + !IsIntegerWithinTolerance(upper_bound, integer_solution_tolerance_)) { + // The bounds are not integer. + // We cannot deduce anything if the variable as an objective. + // + // TODO(user): Actually we can if the bound that minimize the cost is + // integer but not the other. Improve the code. + if (linear_program.objective_coefficients()[variable] != 0.0) return false; + + // No objective. If the variable domain contains an integer point, then + // there is a chance for this variable to be integer. This is because if the + // condition on the constraints below is true, then the constraints will + // always imply the variable to be inside a [integer_lb, integer_ub] domain. + // And if the intersection of this domain with the variable domain is + // non-empty, then it contains one or more integer points and we can always + // set the variable to one of these integer values. + if (std::ceil(lower_bound) > std::floor(upper_bound)) return false; + } + + // Primal detection for each constraint containing variable. + for (const SparseColumn::Entry entry : + linear_program.GetSparseColumn(variable)) { + if (!ConstraintSupportsImpliedIntegrality(linear_program, variable, + entry.row())) { + return false; + } + } + return true; +} + +bool ImpliedIntegerPreprocessor::ConstraintSupportsImpliedIntegrality( + const LinearProgram& linear_program, ColIndex variable, RowIndex row) { + const SparseMatrix& coefficients_transpose = + linear_program.GetTransposeSparseMatrix(); + const Fractional variable_coefficient = coefficients_transpose.LookUpValue( + ColToRowIndex(variable), RowToColIndex(row)); + + for (const SparseColumn::Entry entry : + coefficients_transpose.column(RowToColIndex(row))) { + const ColIndex col = RowToColIndex(entry.row()); + if (col == variable) continue; + + // Check if the variables in the row are all integers. + if (!linear_program.IsVariableInteger(col)) { + return false; + } + + // Check if the coefficient ratios are all integers. + const Fractional coefficient_ratio = + entry.coefficient() / variable_coefficient; + if (!IsIntegerWithinTolerance(coefficient_ratio, + integer_solution_tolerance_)) { + return false; + } + } + + // Check if the constraint bound ratios are integers. + // Note that we ignore infinities. + if (linear_program.constraint_lower_bounds()[row] != -kInfinity) { + const Fractional constraint_lower_bound_ratio = + linear_program.constraint_lower_bounds()[row] / variable_coefficient; + if (!IsIntegerWithinTolerance(constraint_lower_bound_ratio, + integer_solution_tolerance_)) { + return false; + } + } + if (linear_program.constraint_upper_bounds()[row] != kInfinity) { + const Fractional constraint_upper_bound_ratio = + linear_program.constraint_upper_bounds()[row] / variable_coefficient; + if (!IsIntegerWithinTolerance(constraint_upper_bound_ratio, + integer_solution_tolerance_)) { + return false; + } + } + return true; +} + +bool ImpliedIntegerPreprocessor::VariableOccursInAtLeastOneEqualityConstraint( + const LinearProgram& linear_program, ColIndex variable) { + for (const SparseColumn::Entry entry : + linear_program.GetSparseColumn(variable)) { + // Check if the constraint is an equality. + if (linear_program.constraint_upper_bounds()[entry.row()] == + linear_program.constraint_lower_bounds()[entry.row()]) { + return true; + } + } + return false; +} + +// -------------------------------------------------------- +// ReduceCostOverExclusiveOrConstraintPreprocessor +// -------------------------------------------------------- + +bool ReduceCostOverExclusiveOrConstraintPreprocessor::Run( + LinearProgram* linear_program) { + SCOPED_INSTRUCTION_COUNT(time_limit_); + RETURN_VALUE_IF_NULL(linear_program, false); + const RowIndex num_constraints = linear_program->num_constraints(); + const SparseMatrix& transpose = linear_program->GetTransposeSparseMatrix(); + for (RowIndex row(0); row < num_constraints; ++row) { + if (linear_program->constraint_lower_bounds()[row] != 1.0) continue; + if (linear_program->constraint_upper_bounds()[row] != 1.0) continue; + Fractional min_cost = std::numeric_limits::infinity(); + bool constraint_is_exclusive_or = true; + for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { + const ColIndex var = RowToColIndex(e.row()); + if (!linear_program->IsVariableInteger(var) || + linear_program->variable_lower_bounds()[var] != 0.0 || + linear_program->variable_upper_bounds()[var] != 1.0 || + e.coefficient() != 1.0) { + constraint_is_exclusive_or = false; + break; + } + min_cost = + std::min(min_cost, linear_program->objective_coefficients()[var]); + } + if (constraint_is_exclusive_or && min_cost > 0.0 && + glop::IsFinite(min_cost)) { + for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) { + const ColIndex var = RowToColIndex(e.row()); + const Fractional cost = linear_program->objective_coefficients()[var]; + linear_program->SetObjectiveCoefficient(var, cost - min_cost); + } + linear_program->SetObjectiveOffset(linear_program->objective_offset() + + min_cost); + } + } + return false; +} + +} // namespace operations_research diff --git a/ortools/linear_solver/proto_solver/preprocessor.h b/ortools/linear_solver/proto_solver/preprocessor.h new file mode 100644 index 0000000000..fcae6cc025 --- /dev/null +++ b/ortools/linear_solver/proto_solver/preprocessor.h @@ -0,0 +1,198 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_PREPROCESSOR_H_ +#define OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_PREPROCESSOR_H_ + +#include "ortools/glop/preprocessor.h" +#include "ortools/lp_data/lp_data.h" +#include "ortools/lp_data/lp_types.h" + +namespace operations_research { + +// -------------------------------------------------------- +// IntegerBoundsPreprocessor +// -------------------------------------------------------- + +// Makes the bounds of integer variables integer. Makes the bounds of +// constraints involving only integer variables with integer coefficients +// integer. +class IntegerBoundsPreprocessor : public glop::Preprocessor { + public: + IntegerBoundsPreprocessor(const glop::GlopParameters* parameters, + glop::Fractional integer_solution_tolerance) + : glop::Preprocessor(parameters), + integer_solution_tolerance_(integer_solution_tolerance) {} + + IntegerBoundsPreprocessor(const IntegerBoundsPreprocessor&) = delete; + IntegerBoundsPreprocessor& operator=(const IntegerBoundsPreprocessor&) = + delete; + ~IntegerBoundsPreprocessor() override = default; + + bool Run(glop::LinearProgram* linear_program) override; + void RecoverSolution(glop::ProblemSolution* /*solution*/) const override {} + + private: + const glop::Fractional integer_solution_tolerance_; +}; + +// -------------------------------------------------------- +// BoundPropagationPreprocessor +// -------------------------------------------------------- + +// It is possible to compute "implied" bounds on a variable from the bounds of +// all the other variables and the constraints in which this variable take +// place. These "implied" bounds can be used to restrict the variable bounds. +// This preprocessor just does that until no more bounds can be propagated or +// a given limit on the number of propagations is reached. +// +// Note(user): In particular, this preprocessor will remove any singleton row. +// +// Note(user): This seems like a general LP preprocessor but it is really +// difficult to postsolve it correctly in the LP context when one wants to have +// a basic optimal solution at the end. By contrast, in Mip context one is happy +// with any form of an optimal solution at the end, thus restoring the full +// solution is trivial. Consequently, bound propagation is implemented as a mip +// preprocessor. +class BoundPropagationPreprocessor : public glop::Preprocessor { + public: + BoundPropagationPreprocessor(const glop::GlopParameters* parameters, + glop::Fractional integer_solution_tolerance) + : glop::Preprocessor(parameters), + integer_solution_tolerance_(integer_solution_tolerance) {} + + BoundPropagationPreprocessor(const BoundPropagationPreprocessor&) = delete; + BoundPropagationPreprocessor& operator=(const BoundPropagationPreprocessor&) = + delete; + ~BoundPropagationPreprocessor() override = default; + + bool Run(glop::LinearProgram* linear_program) override; + void RecoverSolution(glop::ProblemSolution* /*solution*/) const override {} + + private: + const glop::Fractional integer_solution_tolerance_; +}; + +// -------------------------------------------------------- +// ImpliedIntegerPreprocessor +// -------------------------------------------------------- + +// In this preprocessor, we find continuous variables which can only take +// integer values and mark them as integer variables. +// +// There are two methods for detecting implied integer variables: 1) primal +// and 2) dual detection. If the variable appears in at least one equality +// constraint then we use primal detection otherwise we use dual detection. +class ImpliedIntegerPreprocessor : public glop::Preprocessor { + public: + explicit ImpliedIntegerPreprocessor( + const glop::GlopParameters* parameters, + glop::Fractional integer_solution_tolerance) + : glop::Preprocessor(parameters), + integer_solution_tolerance_(integer_solution_tolerance) {} + + ImpliedIntegerPreprocessor(const ImpliedIntegerPreprocessor&) = delete; + ImpliedIntegerPreprocessor& operator=(const ImpliedIntegerPreprocessor&) = + delete; + ~ImpliedIntegerPreprocessor() override = default; + + // TODO(user): When some variable are detected to be implied integer, other + // can in turn be detected as such. Change the code to reach a fixed point. + // Calling this multiple time has a similar effect, but is a lot less + // efficient and can require O(num_variables) calls to reach the fix point. + bool Run(glop::LinearProgram* linear_program) override; + void RecoverSolution(glop::ProblemSolution* /*solution*/) const override {} + + private: + // Returns true if the given variable is implied integer. This method is used + // for continuous variables appearing in at least one equality constraint. + // This is sometimes called "primal" detection in the literature. + // + // For each equality constraint s in which the given continuous variable x_j + // appears, this method checks the primal detection criteria by using + // ConstraintSupportsImpliedIntegrality() method. + bool AnyEqualityConstraintImpliesIntegrality( + const glop::LinearProgram& linear_program, glop::ColIndex variable); + + // Returns true if given variable is implied integer variable. This method is + // used for continuous variables for which primal detection is not applicable + // i.e. all constraints containing the given variable are inequalities. This + // is sometimes called "dual" detection in the literature. + // + // This method checks the following for the givan continuous variable x_j. + // a) The lower and upper bound of x_j are integers or the variable has no + // cost and its domain contains an integer point. + // b) For all constraint containing x_j, when treated as equality under primal + // detection, implies x_j as integer variable. + // If both conditions are satisfied then the variable x_j is implied integer + // variable. + bool AllInequalityConstraintsImplyIntegrality( + const glop::LinearProgram& linear_program, glop::ColIndex variable); + + // Returns true if the following conditions are satisfied. + // + // Let the constraint be lb_s <= \sum_{i=1..n}(a_si*x_i) + a_sj*x_j <= ub_s + // a) lb_s / a_sj and ub_s / a_sj are integers. + // b) a_si / a_sj is integer for all i. + // c) x_i are all integer variables. + bool ConstraintSupportsImpliedIntegrality( + const glop::LinearProgram& linear_program, glop::ColIndex variable, + glop::RowIndex row); + + // Returns true if the variable occurs in at least one equality constraint. + bool VariableOccursInAtLeastOneEqualityConstraint( + const glop::LinearProgram& linear_program, glop::ColIndex variable); + + private: + const glop::Fractional integer_solution_tolerance_; +}; + +// -------------------------------------------------------- +// ReduceCostOverExclusiveOrConstraintPreprocessor +// -------------------------------------------------------- + +// For an "exclusive or" constraint (sum Boolean = 1), if all the costs of the +// Boolean variables are positive, then we can subtract the cost of the one +// with minimum cost from the cost of all the others. We can do that for all +// such constraints one by one. +// +// ex: if x,y,z are Boolean variables with respective cost 1,2,1 and x+y+z=1, +// then we can change their costs to 0,1,0 and add 1 to the objective offset +// without changing the cost of any feasible solution. +// +// This seems pretty trivial, but can have a big impact depending on the +// technique we use to solve the MIP. It also makes the objective sparser which +// can only be a good thing. +// +// TODO(user): In more generality, in presence of an exclusive or constraint we +// can shift the cost by any value (even negative), so it may be good to +// maximize the number of coefficients at zero. To investigate. +class ReduceCostOverExclusiveOrConstraintPreprocessor + : public glop::Preprocessor { + public: + explicit ReduceCostOverExclusiveOrConstraintPreprocessor( + const glop::GlopParameters* mip_parameters) + : glop::Preprocessor(mip_parameters) {} + ReduceCostOverExclusiveOrConstraintPreprocessor( + const ReduceCostOverExclusiveOrConstraintPreprocessor&) = delete; + ReduceCostOverExclusiveOrConstraintPreprocessor& operator=( + const ReduceCostOverExclusiveOrConstraintPreprocessor&) = delete; + ~ReduceCostOverExclusiveOrConstraintPreprocessor() override = default; + + bool Run(glop::LinearProgram* linear_program) override; + void RecoverSolution(glop::ProblemSolution* /*solution*/) const override {} +}; + +} // namespace operations_research + +#endif // OR_TOOLS_LINEAR_SOLVER_PROTO_SOLVER_PREPROCESSOR_H_ diff --git a/ortools/linear_solver/proto_solver/sat_solver_utils.cc b/ortools/linear_solver/proto_solver/sat_solver_utils.cc index b986539ee9..5aabb5652e 100644 --- a/ortools/linear_solver/proto_solver/sat_solver_utils.cc +++ b/ortools/linear_solver/proto_solver/sat_solver_utils.cc @@ -21,6 +21,7 @@ #include "absl/log/check.h" #include "ortools/glop/parameters.pb.h" #include "ortools/glop/preprocessor.h" +#include "ortools/linear_solver/proto_solver/preprocessor.h" #include "ortools/lp_data/lp_data.h" #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/proto_utils.h" @@ -29,9 +30,10 @@ namespace operations_research { -#define ADD_LP_PREPROCESSOR(name) \ - names.push_back(#name); \ - lp_preprocessors.push_back(std::make_unique(&glop_params)); +#define ADD_LP_PREPROCESSOR(name, ...) \ + names.push_back(#name); \ + lp_preprocessors.push_back( \ + std::make_unique(&glop_params __VA_OPT__(, ) __VA_ARGS__)); glop::ProblemStatus ApplyMipPresolveSteps( const glop::GlopParameters& glop_params, MPModelProto* model, @@ -60,13 +62,13 @@ glop::ProblemStatus ApplyMipPresolveSteps( // These presolve might change the problem size. // // TODO(user): transform the hint instead of disabling presolve. + std::vector names; + std::vector> lp_preprocessors; + const std::string header = + "Running basic LP presolve, initial problem dimensions: "; if (!hint_is_present) { - const std::string header = - "Running basic LP presolve, initial problem dimensions: "; SOLVER_LOG(logger, ""); SOLVER_LOG(logger, header, lp.GetDimensionString()); - std::vector names; - std::vector> lp_preprocessors; ADD_LP_PREPROCESSOR(glop::FixedVariablePreprocessor); ADD_LP_PREPROCESSOR(glop::SingletonPreprocessor); ADD_LP_PREPROCESSOR(glop::ForcingAndImpliedFreeConstraintPreprocessor); @@ -77,19 +79,30 @@ glop::ProblemStatus ApplyMipPresolveSteps( // for the conversion, it is better to have tight bounds even if the bound // propagator is supposed to undo what this presolve would have done. ADD_LP_PREPROCESSOR(glop::UnconstrainedVariablePreprocessor); + } - for (int i = 0; i < lp_preprocessors.size(); ++i) { - if (time_limit->LimitReached()) break; - auto& preprocessor = lp_preprocessors[i]; - preprocessor->SetTimeLimit(time_limit.get()); - preprocessor->UseInMipContext(); - const bool need_postsolve = preprocessor->Run(&lp); - names[i].resize(header.size(), ' '); // padding. - SOLVER_LOG(logger, names[i], lp.GetDimensionString()); - const glop::ProblemStatus status = preprocessor->status(); - if (status != glop::ProblemStatus::INIT) return status; - if (need_postsolve) for_postsolve->push_back(std::move(preprocessor)); - } + // These preprocessors do not need postsolve. + ADD_LP_PREPROCESSOR(IntegerBoundsPreprocessor, 1e-6); + ADD_LP_PREPROCESSOR(BoundPropagationPreprocessor, 1e-6); + ADD_LP_PREPROCESSOR(ImpliedIntegerPreprocessor, 1e-6); + + // We need to re-run this after the ImpliedIntegerPreprocessor because the + // latter does not round the bounds of the constraints involving only + // integer variables and coefficients. + ADD_LP_PREPROCESSOR(IntegerBoundsPreprocessor, 1e-6); + ADD_LP_PREPROCESSOR(ReduceCostOverExclusiveOrConstraintPreprocessor); + + for (int i = 0; i < lp_preprocessors.size(); ++i) { + if (time_limit->LimitReached()) break; + auto& preprocessor = lp_preprocessors[i]; + preprocessor->SetTimeLimit(time_limit.get()); + preprocessor->UseInMipContext(); + const bool need_postsolve = preprocessor->Run(&lp); + names[i].resize(header.size(), ' '); // padding. + SOLVER_LOG(logger, names[i], lp.GetDimensionString()); + const glop::ProblemStatus status = preprocessor->status(); + if (status != glop::ProblemStatus::INIT) return status; + if (need_postsolve) for_postsolve->push_back(std::move(preprocessor)); } // Finally, we make sure all domains contain zero. diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index 8036828159..2855f6b2cb 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -586,8 +586,7 @@ PYBIND11_MODULE(model_builder_helper, m) { absl::StrCat("Evaluating a BoundedLinearExpression '", self.ToString(), "'instance as a Boolean is " - "not supported.") - .c_str()); + "not supported.")); return false; }) .def("__str__", &BoundedLinearExpression::ToString) diff --git a/ortools/lp_data/BUILD.bazel b/ortools/lp_data/BUILD.bazel index 6e3ad9bd97..619de54c9a 100644 --- a/ortools/lp_data/BUILD.bazel +++ b/ortools/lp_data/BUILD.bazel @@ -254,17 +254,6 @@ cc_library( ], ) -#cc_library( -# name = "lp_constraint_classifier", -# srcs = ["lp_constraint_classifier.cc"], -# hdrs = ["lp_constraint_classifier.h"], -# copts = SAFE_FP_CODE, -# deps = [ -# ":lp_data", -# "//ortools/util:fp_utils", -# ], -#) - cc_library( name = "lp_print_utils", srcs = ["lp_print_utils.cc"], diff --git a/ortools/lp_data/README.md b/ortools/lp_data/README.md index e69de29bb2..52d4764280 100644 --- a/ortools/lp_data/README.md +++ b/ortools/lp_data/README.md @@ -0,0 +1,86 @@ +# LP Data + +This directory contains a rich collection of C++ libraries for handling Linear +Programming (LP) data structures. + +It provides core components for representing, manipulating, and solving linear +programs, with a focus on efficient handling of sparse data and various utility +functions for pre-processing and analysis. + +## Core Data Structures + +This set of libraries provides the fundamental building blocks for representing +and working with linear programming problems. + +* [`lp_types.h`][lp_types_h]: Defines common types and constants used throughout + the linear programming solver. +* [`lp_data.h`][lp_data_h]: Provides the main `LinearProgram` class for storing + the complete data of a linear program, including the objective function, + constraint matrix, and variable bounds. +* [`lp_utils.h`][lp_utils_h]: Contains basic utility functions for operations on + fractional numbers and row/column vectors. + +## Sparse Data Representation + +Given that large-scale linear programs are often sparse, this directory offers a +suite of libraries for efficient sparse data handling. + +* [`sparse.h`][sparse_h]: Implements data structures for sparse matrices, based + on well-established references in the field of direct methods for sparse + matrices. +* [`sparse_vector.h`][sparse_vector_h]: Provides classes to represent sparse + vectors efficiently. +* [`sparse_column.h`][sparse_column_h] & [`sparse_row.h`][sparse_row_h]: + Specializations of sparse vectors for column-oriented and row-oriented matrix + storage schemes. +* [`scattered_vector.h`][scattered_vector_h]: Implements vectors that offer a + sparse interface to what is internally a dense storage, which can be useful + for certain computations. + +## LP Solvers and Utilities + +A collection of tools for preprocessing, analyzing, and manipulating linear +programs. + +* [`matrix_scaler.h`][matrix_scaler_h]: Provides the `SparseMatrixScaler` class, + which scales a `SparseMatrix` to improve numerical stability during the + solving process. +* [`lp_decomposer.h`][lp_decomposer_h]: Implements a tool to decompose a large + `LinearProgram` into several smaller, independent subproblems by identifying + disconnected components in the constraint matrix. +* [`permutation.h`][permutation_h]: Contains utilities for handling row and + column permutations on LP data structures. + +## Parsers and I/O Utilities + +This group of libraries handles reading and writing LP data in various formats. + +* [`lp_parser.h`][lp_parser_h]: A simple parser for creating a linear program + from a string representation. +* [`mps_reader.h`][mps_reader_h]: A reader for the industry-standard MPS file + format for mathematical programming problems. +* [`sol_reader.h`][sol_reader_h]: A reader for .sol files, which are used to + specify solution values for a given model. +* [`proto_utils.h`][proto_utils_h]: Provides utilities to convert + `LinearProgram` objects to and from the MPModelProto protobuf format. +* [`lp_print_utils.h`][lp_print_utils_h]: Contains utilities to display linear + expressions in a human-readable way, including rational approximations. + + + +[lp_types_h]: ../lp_data/lp_types.h +[lp_data_h]: ../lp_data/lp_data.h +[lp_utils_h]: ../lp_data/lp_utils.h +[sparse_h]: ../lp_data/sparse.h +[sparse_vector_h]: ../lp_data/sparse_vector.h +[sparse_column_h]: ../lp_data/sparse_column.h +[sparse_row_h]: ../lp_data/sparse_row.h +[scattered_vector_h]: ../lp_data/scattered_vector.h +[matrix_scaler_h]: ../lp_data/matrix_scaler.h +[lp_decomposer_h]: ../lp_data/lp_decomposer.h +[permutation_h]: ../lp_data/permutation.h +[lp_parser_h]: ../lp_data/lp_parser.h +[mps_reader_h]: ../lp_data/mps_reader.h +[sol_reader_h]: ../lp_data/sol_reader.h +[proto_utils_h]: ../lp_data/proto_utils.h +[lp_print_utils_h]: ../lp_data/lp_print_utils.h diff --git a/ortools/lp_data/lp_data.h b/ortools/lp_data/lp_data.h index 236423daa1..478a1a35be 100644 --- a/ortools/lp_data/lp_data.h +++ b/ortools/lp_data/lp_data.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Storage classes for Linear Programs. // // LinearProgram stores the complete data for a Linear Program: diff --git a/ortools/lp_data/sparse.h b/ortools/lp_data/sparse.h index fecc42a79c..cc856fc3a8 100644 --- a/ortools/lp_data/sparse.h +++ b/ortools/lp_data/sparse.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // The following are very good references for terminology, data structures, // and algorithms: // diff --git a/ortools/math_opt/cpp/solver_resources.cc b/ortools/math_opt/cpp/solver_resources.cc index 70ee369993..db6dee14f1 100644 --- a/ortools/math_opt/cpp/solver_resources.cc +++ b/ortools/math_opt/cpp/solver_resources.cc @@ -13,6 +13,7 @@ #include "ortools/math_opt/cpp/solver_resources.h" +#include #include #include @@ -23,6 +24,11 @@ namespace operations_research::math_opt { +std::ostream& operator<<(std::ostream& out, const SolverResources& resources) { + out << '{' << AbslUnparseFlag(resources) << '}'; + return out; +} + SolverResourcesProto SolverResources::Proto() const { SolverResourcesProto ret; if (cpu.has_value()) { diff --git a/ortools/math_opt/cpp/solver_resources.h b/ortools/math_opt/cpp/solver_resources.h index ffccb37db6..54ca852a84 100644 --- a/ortools/math_opt/cpp/solver_resources.h +++ b/ortools/math_opt/cpp/solver_resources.h @@ -18,6 +18,7 @@ #define OR_TOOLS_MATH_OPT_CPP_SOLVER_RESOURCES_H_ #include +#include #include #include "absl/status/statusor.h" @@ -74,6 +75,8 @@ struct SolverResources { const SolverResourcesProto& proto); }; +std::ostream& operator<<(std::ostream& out, const SolverResources& resources); + bool AbslParseFlag(absl::string_view text, SolverResources* solver_resources, std::string* error); diff --git a/ortools/math_opt/labs/BUILD.bazel b/ortools/math_opt/labs/BUILD.bazel index 5fd8f93eab..a786ffdb3a 100644 --- a/ortools/math_opt/labs/BUILD.bazel +++ b/ortools/math_opt/labs/BUILD.bazel @@ -88,3 +88,17 @@ cc_library( ], alwayslink = 1, ) + +cc_library( + name = "scaler_util", + srcs = ["scaler_util.cc"], + hdrs = ["scaler_util.h"], + visibility = ["//visibility:public"], + deps = [ + "//ortools/base", + "//ortools/base:types", + "//ortools/util:fp_utils", + "@abseil-cpp//absl/log:check", + "@abseil-cpp//absl/strings:str_format", + ], +) diff --git a/ortools/math_opt/python/BUILD.bazel b/ortools/math_opt/python/BUILD.bazel index facc0b7e87..ce37a84b4d 100644 --- a/ortools/math_opt/python/BUILD.bazel +++ b/ortools/math_opt/python/BUILD.bazel @@ -51,66 +51,6 @@ py_library( deps = ["//ortools/math_opt/python/elemental"], ) -py_library( - name = "model_storage", - srcs = ["model_storage.py"], - visibility = ["//ortools/math_opt/python:__subpackages__"], - deps = [ - "//ortools/math_opt:model_py_pb2", - "//ortools/math_opt:model_update_py_pb2", - ], -) - -py_library( - name = "hash_model_storage", - srcs = ["hash_model_storage.py"], - deps = [ - ":model_storage", - "//ortools/math_opt:model_py_pb2", - "//ortools/math_opt:model_update_py_pb2", - "//ortools/math_opt:sparse_containers_py_pb2", - ], -) - -py_test( - name = "hash_model_storage_test", - size = "small", - srcs = ["hash_model_storage_test.py"], - deps = [ - ":hash_model_storage", - requirement("absl-py"), - ], -) - -py_test( - name = "model_storage_test", - size = "small", - srcs = ["model_storage_test.py"], - deps = [ - ":hash_model_storage", - ":model_storage", - requirement("absl-py"), - "//ortools/math_opt:model_py_pb2", - "//ortools/math_opt:sparse_containers_py_pb2", - "//ortools/math_opt/python/testing:compare_proto", - ], -) - -py_test( - name = "model_storage_update_test", - size = "small", - srcs = ["model_storage_update_test.py"], - deps = [ - ":hash_model_storage", - ":model_storage", - requirement("absl-py"), - "//ortools/math_opt:model_py_pb2", - "//ortools/math_opt:model_update_py_pb2", - "//ortools/math_opt:sparse_containers_py_pb2", - "//ortools/math_opt/python/testing:compare_proto", - ], -) - py_library( name = "model", srcs = ["model.py"], diff --git a/ortools/math_opt/python/hash_model_storage.py b/ortools/math_opt/python/hash_model_storage.py deleted file mode 100644 index 96cad3aa77..0000000000 --- a/ortools/math_opt/python/hash_model_storage.py +++ /dev/null @@ -1,843 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""A minimal pure python implementation of model_storage.ModelStorage.""" - -from typing import Dict, Iterable, Iterator, Optional, Set, Tuple -import weakref - -from ortools.math_opt import model_pb2 -from ortools.math_opt import model_update_pb2 -from ortools.math_opt import sparse_containers_pb2 -from ortools.math_opt.python import model_storage - -_QuadraticKey = model_storage.QuadraticTermIdKey - - -class _UpdateTracker(model_storage.StorageUpdateTracker): - """Tracks model updates for HashModelStorage.""" - - def __init__(self, mod: "HashModelStorage"): - self.retired: bool = False - self.model: "HashModelStorage" = mod - # Changes for variables with id < variables_checkpoint are explicitly - # tracked. - self.variables_checkpoint: int = self.model._next_var_id - # Changes for linear constraints with id < linear_constraints_checkpoint - # are explicitly tracked. - self.linear_constraints_checkpoint: int = self.model._next_lin_con_id - - self.objective_direction: bool = False - self.objective_offset: bool = False - - self.variable_deletes: Set[int] = set() - self.variable_lbs: Set[int] = set() - self.variable_ubs: Set[int] = set() - self.variable_integers: Set[int] = set() - - self.linear_objective_coefficients: Set[int] = set() - self.quadratic_objective_coefficients: Set[_QuadraticKey] = set() - - self.linear_constraint_deletes: Set[int] = set() - self.linear_constraint_lbs: Set[int] = set() - self.linear_constraint_ubs: Set[int] = set() - - self.linear_constraint_matrix: Set[Tuple[int, int]] = set() - - def export_update(self) -> Optional[model_update_pb2.ModelUpdateProto]: - if self.retired: - raise model_storage.UsedUpdateTrackerAfterRemovalError() - if ( - self.variables_checkpoint == self.model.next_variable_id() - and ( - self.linear_constraints_checkpoint - == self.model.next_linear_constraint_id() - ) - and not self.objective_direction - and not self.objective_offset - and not self.variable_deletes - and not self.variable_lbs - and not self.variable_ubs - and not self.variable_integers - and not self.linear_objective_coefficients - and not self.quadratic_objective_coefficients - and not self.linear_constraint_deletes - and not self.linear_constraint_lbs - and not self.linear_constraint_ubs - and not self.linear_constraint_matrix - ): - return None - result = model_update_pb2.ModelUpdateProto() - result.deleted_variable_ids[:] = sorted(self.variable_deletes) - result.deleted_linear_constraint_ids[:] = sorted(self.linear_constraint_deletes) - # Variable updates - _set_sparse_double_vector( - sorted((vid, self.model.get_variable_lb(vid)) for vid in self.variable_lbs), - result.variable_updates.lower_bounds, - ) - _set_sparse_double_vector( - sorted((vid, self.model.get_variable_ub(vid)) for vid in self.variable_ubs), - result.variable_updates.upper_bounds, - ) - _set_sparse_bool_vector( - sorted( - (vid, self.model.get_variable_is_integer(vid)) - for vid in self.variable_integers - ), - result.variable_updates.integers, - ) - # Linear constraint updates - _set_sparse_double_vector( - sorted( - (cid, self.model.get_linear_constraint_lb(cid)) - for cid in self.linear_constraint_lbs - ), - result.linear_constraint_updates.lower_bounds, - ) - _set_sparse_double_vector( - sorted( - (cid, self.model.get_linear_constraint_ub(cid)) - for cid in self.linear_constraint_ubs - ), - result.linear_constraint_updates.upper_bounds, - ) - # New variables and constraints - new_vars = [] - for vid in range(self.variables_checkpoint, self.model.next_variable_id()): - var = self.model.variables.get(vid) - if var is not None: - new_vars.append((vid, var)) - _variables_to_proto(new_vars, result.new_variables) - new_lin_cons = [] - for lin_con_id in range( - self.linear_constraints_checkpoint, - self.model.next_linear_constraint_id(), - ): - lin_con = self.model.linear_constraints.get(lin_con_id) - if lin_con is not None: - new_lin_cons.append((lin_con_id, lin_con)) - _linear_constraints_to_proto(new_lin_cons, result.new_linear_constraints) - # Objective update - if self.objective_direction: - result.objective_updates.direction_update = self.model.get_is_maximize() - if self.objective_offset: - result.objective_updates.offset_update = self.model.get_objective_offset() - _set_sparse_double_vector( - sorted( - (var, self.model.get_linear_objective_coefficient(var)) - for var in self.linear_objective_coefficients - ), - result.objective_updates.linear_coefficients, - ) - for new_var in range(self.variables_checkpoint, self.model.next_variable_id()): - # NOTE: the value will be 0.0 if either the coefficient is not set or the - # variable has been deleted. Calling - # model.get_linear_objective_coefficient() throws an exception if the - # variable has been deleted. - obj_coef = self.model.linear_objective_coefficient.get(new_var, 0.0) - if obj_coef: - result.objective_updates.linear_coefficients.ids.append(new_var) - result.objective_updates.linear_coefficients.values.append(obj_coef) - - quadratic_objective_updates = [ - ( - key.id1, - key.id2, - self.model.get_quadratic_objective_coefficient(key.id1, key.id2), - ) - for key in self.quadratic_objective_coefficients - ] - for new_var in range(self.variables_checkpoint, self.model.next_variable_id()): - if self.model.variable_exists(new_var): - for other_var in self.model.get_quadratic_objective_adjacent_variables( - new_var - ): - key = _QuadraticKey(new_var, other_var) - if new_var >= other_var: - key = _QuadraticKey(new_var, other_var) - quadratic_objective_updates.append( - ( - key.id1, - key.id2, - self.model.get_quadratic_objective_coefficient( - key.id1, key.id2 - ), - ) - ) - quadratic_objective_updates.sort() - if quadratic_objective_updates: - first_var_ids, second_var_ids, coefficients = zip( - *quadratic_objective_updates - ) - result.objective_updates.quadratic_coefficients.row_ids[:] = first_var_ids - result.objective_updates.quadratic_coefficients.column_ids[:] = ( - second_var_ids - ) - result.objective_updates.quadratic_coefficients.coefficients[:] = ( - coefficients - ) - # Linear constraint matrix updates - matrix_updates = [ - (l, v, self.model.get_linear_constraint_coefficient(l, v)) - for (l, v) in self.linear_constraint_matrix - ] - for new_var in range(self.variables_checkpoint, self.model.next_variable_id()): - if self.model.variable_exists(new_var): - for lin_con in self.model.get_linear_constraints_with_variable(new_var): - matrix_updates.append( - ( - lin_con, - new_var, - self.model.get_linear_constraint_coefficient( - lin_con, new_var - ), - ) - ) - for new_lin_con in range( - self.linear_constraints_checkpoint, - self.model.next_linear_constraint_id(), - ): - if self.model.linear_constraint_exists(new_lin_con): - for var in self.model.get_variables_for_linear_constraint(new_lin_con): - # We have already gotten the new variables above. Note that we do at - # most twice as much work as we should from this. - if var < self.variables_checkpoint: - matrix_updates.append( - ( - new_lin_con, - var, - self.model.get_linear_constraint_coefficient( - new_lin_con, var - ), - ) - ) - matrix_updates.sort() - if matrix_updates: - lin_cons, variables, coefs = zip(*matrix_updates) - result.linear_constraint_matrix_updates.row_ids[:] = lin_cons - result.linear_constraint_matrix_updates.column_ids[:] = variables - result.linear_constraint_matrix_updates.coefficients[:] = coefs - return result - - def advance_checkpoint(self) -> None: - if self.retired: - raise model_storage.UsedUpdateTrackerAfterRemovalError() - self.objective_direction = False - self.objective_offset = False - self.variable_deletes = set() - self.variable_lbs = set() - self.variable_ubs = set() - self.variable_integers = set() - self.linear_objective_coefficients = set() - self.linear_constraint_deletes = set() - self.linear_constraint_lbs = set() - self.linear_constraint_ubs = set() - self.linear_constraint_matrix = set() - - self.variables_checkpoint = self.model.next_variable_id() - self.linear_constraints_checkpoint = self.model.next_linear_constraint_id() - - -class _VariableStorage: - """Data specific to each decision variable in the optimization problem.""" - - def __init__(self, lb: float, ub: float, is_integer: bool, name: str) -> None: - self.lower_bound: float = lb - self.upper_bound: float = ub - self.is_integer: bool = is_integer - self.name: str = name - self.linear_constraint_nonzeros: Set[int] = set() - - -class _LinearConstraintStorage: - """Data specific to each linear constraint in the optimization problem.""" - - def __init__(self, lb: float, ub: float, name: str) -> None: - self.lower_bound: float = lb - self.upper_bound: float = ub - self.name: str = name - self.variable_nonzeros: Set[int] = set() - - -class _QuadraticTermStorage: - """Data describing quadratic terms with non-zero coefficients.""" - - def __init__(self) -> None: - self._coefficients: Dict[_QuadraticKey, float] = {} - # For a variable i that does not appear in a quadratic objective term with - # a non-zero coefficient, we may have self._adjacent_variable[i] being an - # empty set or i not appearing in self._adjacent_variable.keys() (e.g. - # depeding on whether the variable previously appeared in a quadratic term). - self._adjacent_variables: Dict[int, Set[int]] = {} - - def __bool__(self) -> bool: - """Returns true if and only if there are any quadratic terms with non-zero coefficients.""" - return bool(self._coefficients) - - def get_adjacent_variables(self, variable_id: int) -> Iterator[int]: - """Yields the variables multiplying a variable in the stored quadratic terms. - - If variable_id is not in the model the function yields the empty set. - - Args: - variable_id: Function yields the variables multiplying variable_id in the - stored quadratic terms. - - Yields: - The variables multiplying variable_id in the stored quadratic terms. - """ - yield from self._adjacent_variables.get(variable_id, ()) - - def keys(self) -> Iterator[_QuadraticKey]: - """Yields the variable-pair keys associated to the stored quadratic terms.""" - yield from self._coefficients.keys() - - def coefficients(self) -> Iterator[model_storage.QuadraticEntry]: - """Yields the stored quadratic terms as QuadraticEntry.""" - for key, coef in self._coefficients.items(): - yield model_storage.QuadraticEntry(id_key=key, coefficient=coef) - - def delete_variable(self, variable_id: int) -> None: - """Updates the data structure to consider variable_id as deleted.""" - if variable_id not in self._adjacent_variables.keys(): - return - for adjacent_variable_id in self._adjacent_variables[variable_id]: - if variable_id != adjacent_variable_id: - self._adjacent_variables[adjacent_variable_id].remove(variable_id) - del self._coefficients[_QuadraticKey(variable_id, adjacent_variable_id)] - self._adjacent_variables[variable_id].clear() - - def clear(self) -> None: - """Clears the data structure.""" - self._coefficients.clear() - self._adjacent_variables.clear() - - def set_coefficient( - self, first_variable_id: int, second_variable_id: int, value: float - ) -> bool: - """Sets the coefficient for the quadratic term associated to the product between two variables. - - The ordering of the input variables does not matter. - - Args: - first_variable_id: The first variable in the product. - second_variable_id: The second variable in the product. - value: The value of the coefficient. - - Returns: - True if the coefficient is updated, False otherwise. - """ - key = _QuadraticKey(first_variable_id, second_variable_id) - if value == self._coefficients.get(key, 0.0): - return False - if value == 0.0: - # Assuming self._coefficients/_adjacent_variables are filled according - # to get_coefficient(key) != 0.0. - del self._coefficients[key] - self._adjacent_variables[first_variable_id].remove(second_variable_id) - if first_variable_id != second_variable_id: - self._adjacent_variables[second_variable_id].remove(first_variable_id) - else: - if first_variable_id not in self._adjacent_variables.keys(): - self._adjacent_variables[first_variable_id] = set() - if second_variable_id not in self._adjacent_variables.keys(): - self._adjacent_variables[second_variable_id] = set() - self._coefficients[key] = value - self._adjacent_variables[first_variable_id].add(second_variable_id) - self._adjacent_variables[second_variable_id].add(first_variable_id) - return True - - def get_coefficient(self, first_variable_id: int, second_variable_id: int) -> float: - """Gets the objective coefficient for the quadratic term associated to the product between two variables. - - The ordering of the input variables does not matter. - - Args: - first_variable_id: The first variable in the product. - second_variable_id: The second variable in the product. - - Returns: - The value of the coefficient. - """ - return self._coefficients.get( - _QuadraticKey(first_variable_id, second_variable_id), 0.0 - ) - - -class HashModelStorage(model_storage.ModelStorage): - """A simple, pure python implementation of ModelStorage. - - Attributes: - _linear_constraint_matrix: A dictionary with (linear_constraint_id, - variable_id) keys and numeric values, representing the matrix A for the - constraints lb_c <= A*x <= ub_c. Invariant: the values have no zeros. - linear_objective_coefficient: A dictionary with variable_id keys and - numeric values, representing the linear terms in the objective. - Invariant: the values have no zeros. - _quadratic_objective_coefficients: A data structure containing quadratic - terms in the objective. - """ - - def __init__(self, name: str = "") -> None: - super().__init__() - self._name: str = name - self.variables: Dict[int, _VariableStorage] = {} - self.linear_constraints: Dict[int, _LinearConstraintStorage] = {} - self._linear_constraint_matrix: Dict[Tuple[int, int], float] = {} # - self._is_maximize: bool = False - self._objective_offset: float = 0.0 - self.linear_objective_coefficient: Dict[int, float] = {} - self._quadratic_objective_coefficients: _QuadraticTermStorage = ( - _QuadraticTermStorage() - ) - self._next_var_id: int = 0 - self._next_lin_con_id: int = 0 - self._update_trackers: weakref.WeakSet[_UpdateTracker] = weakref.WeakSet() - - @property - def name(self) -> str: - return self._name - - def add_variable(self, lb: float, ub: float, is_integer: bool, name: str) -> int: - var_id = self._next_var_id - self._next_var_id += 1 - self.variables[var_id] = _VariableStorage(lb, ub, is_integer, name) - return var_id - - def delete_variable(self, variable_id: int) -> None: - self._check_variable_id(variable_id) - variable = self.variables[variable_id] - # First update the watchers - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.variable_deletes.add(variable_id) - watcher.variable_lbs.discard(variable_id) - watcher.variable_ubs.discard(variable_id) - watcher.variable_integers.discard(variable_id) - watcher.linear_objective_coefficients.discard(variable_id) - for ( - other_variable_id - ) in self._quadratic_objective_coefficients.get_adjacent_variables( - variable_id - ): - key = _QuadraticKey(variable_id, other_variable_id) - watcher.quadratic_objective_coefficients.discard(key) - for lin_con_id in variable.linear_constraint_nonzeros: - if lin_con_id < watcher.linear_constraints_checkpoint: - watcher.linear_constraint_matrix.discard( - (lin_con_id, variable_id) - ) - # Then update self. - for lin_con_id in variable.linear_constraint_nonzeros: - self.linear_constraints[lin_con_id].variable_nonzeros.remove(variable_id) - del self._linear_constraint_matrix[(lin_con_id, variable_id)] - del self.variables[variable_id] - self.linear_objective_coefficient.pop(variable_id, None) - self._quadratic_objective_coefficients.delete_variable(variable_id) - - def variable_exists(self, variable_id: int) -> bool: - return variable_id in self.variables - - def next_variable_id(self) -> int: - return self._next_var_id - - def set_variable_lb(self, variable_id: int, lb: float) -> None: - self._check_variable_id(variable_id) - if lb == self.variables[variable_id].lower_bound: - return - self.variables[variable_id].lower_bound = lb - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.variable_lbs.add(variable_id) - - def set_variable_ub(self, variable_id: int, ub: float) -> None: - self._check_variable_id(variable_id) - if ub == self.variables[variable_id].upper_bound: - return - self.variables[variable_id].upper_bound = ub - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.variable_ubs.add(variable_id) - - def set_variable_is_integer(self, variable_id: int, is_integer: bool) -> None: - self._check_variable_id(variable_id) - if is_integer == self.variables[variable_id].is_integer: - return - self.variables[variable_id].is_integer = is_integer - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.variable_integers.add(variable_id) - - def get_variable_lb(self, variable_id: int) -> float: - self._check_variable_id(variable_id) - return self.variables[variable_id].lower_bound - - def get_variable_ub(self, variable_id: int) -> float: - self._check_variable_id(variable_id) - return self.variables[variable_id].upper_bound - - def get_variable_is_integer(self, variable_id: int) -> bool: - self._check_variable_id(variable_id) - return self.variables[variable_id].is_integer - - def get_variable_name(self, variable_id: int) -> str: - self._check_variable_id(variable_id) - return self.variables[variable_id].name - - def get_variables(self) -> Iterator[int]: - yield from self.variables.keys() - - def add_linear_constraint(self, lb: float, ub: float, name: str) -> int: - lin_con_id = self._next_lin_con_id - self._next_lin_con_id += 1 - self.linear_constraints[lin_con_id] = _LinearConstraintStorage(lb, ub, name) - return lin_con_id - - def delete_linear_constraint(self, linear_constraint_id: int) -> None: - self._check_linear_constraint_id(linear_constraint_id) - con = self.linear_constraints[linear_constraint_id] - # First update the watchers - for watcher in self._update_trackers: - if linear_constraint_id < watcher.linear_constraints_checkpoint: - watcher.linear_constraint_deletes.add(linear_constraint_id) - watcher.linear_constraint_lbs.discard(linear_constraint_id) - watcher.linear_constraint_ubs.discard(linear_constraint_id) - for var_id in con.variable_nonzeros: - if var_id < watcher.variables_checkpoint: - watcher.linear_constraint_matrix.discard( - (linear_constraint_id, var_id) - ) - # Then update self. - for var_id in con.variable_nonzeros: - self.variables[var_id].linear_constraint_nonzeros.remove( - linear_constraint_id - ) - del self._linear_constraint_matrix[(linear_constraint_id, var_id)] - del self.linear_constraints[linear_constraint_id] - - def linear_constraint_exists(self, linear_constraint_id: int) -> bool: - return linear_constraint_id in self.linear_constraints - - def next_linear_constraint_id(self) -> int: - return self._next_lin_con_id - - def set_linear_constraint_lb(self, linear_constraint_id: int, lb: float) -> None: - self._check_linear_constraint_id(linear_constraint_id) - if lb == self.linear_constraints[linear_constraint_id].lower_bound: - return - self.linear_constraints[linear_constraint_id].lower_bound = lb - for watcher in self._update_trackers: - if linear_constraint_id < watcher.linear_constraints_checkpoint: - watcher.linear_constraint_lbs.add(linear_constraint_id) - - def set_linear_constraint_ub(self, linear_constraint_id: int, ub: float) -> None: - self._check_linear_constraint_id(linear_constraint_id) - if ub == self.linear_constraints[linear_constraint_id].upper_bound: - return - self.linear_constraints[linear_constraint_id].upper_bound = ub - for watcher in self._update_trackers: - if linear_constraint_id < watcher.linear_constraints_checkpoint: - watcher.linear_constraint_ubs.add(linear_constraint_id) - - def get_linear_constraint_lb(self, linear_constraint_id: int) -> float: - self._check_linear_constraint_id(linear_constraint_id) - return self.linear_constraints[linear_constraint_id].lower_bound - - def get_linear_constraint_ub(self, linear_constraint_id: int) -> float: - self._check_linear_constraint_id(linear_constraint_id) - return self.linear_constraints[linear_constraint_id].upper_bound - - def get_linear_constraint_name(self, linear_constraint_id: int) -> str: - self._check_linear_constraint_id(linear_constraint_id) - return self.linear_constraints[linear_constraint_id].name - - def get_linear_constraints(self) -> Iterator[int]: - yield from self.linear_constraints.keys() - - def set_linear_constraint_coefficient( - self, linear_constraint_id: int, variable_id: int, value: float - ) -> None: - self._check_linear_constraint_id(linear_constraint_id) - self._check_variable_id(variable_id) - if value == self._linear_constraint_matrix.get( - (linear_constraint_id, variable_id), 0.0 - ): - return - if value == 0.0: - self._linear_constraint_matrix.pop( - (linear_constraint_id, variable_id), None - ) - self.variables[variable_id].linear_constraint_nonzeros.discard( - linear_constraint_id - ) - self.linear_constraints[linear_constraint_id].variable_nonzeros.discard( - variable_id - ) - else: - self._linear_constraint_matrix[(linear_constraint_id, variable_id)] = value - self.variables[variable_id].linear_constraint_nonzeros.add( - linear_constraint_id - ) - self.linear_constraints[linear_constraint_id].variable_nonzeros.add( - variable_id - ) - for watcher in self._update_trackers: - if ( - variable_id < watcher.variables_checkpoint - and linear_constraint_id < watcher.linear_constraints_checkpoint - ): - watcher.linear_constraint_matrix.add( - (linear_constraint_id, variable_id) - ) - - def get_linear_constraint_coefficient( - self, linear_constraint_id: int, variable_id: int - ) -> float: - self._check_linear_constraint_id(linear_constraint_id) - self._check_variable_id(variable_id) - return self._linear_constraint_matrix.get( - (linear_constraint_id, variable_id), 0.0 - ) - - def get_linear_constraints_with_variable(self, variable_id: int) -> Iterator[int]: - self._check_variable_id(variable_id) - yield from self.variables[variable_id].linear_constraint_nonzeros - - def get_variables_for_linear_constraint( - self, linear_constraint_id: int - ) -> Iterator[int]: - self._check_linear_constraint_id(linear_constraint_id) - yield from self.linear_constraints[linear_constraint_id].variable_nonzeros - - def get_linear_constraint_matrix_entries( - self, - ) -> Iterator[model_storage.LinearConstraintMatrixIdEntry]: - for (constraint, variable), coef in self._linear_constraint_matrix.items(): - yield model_storage.LinearConstraintMatrixIdEntry( - linear_constraint_id=constraint, - variable_id=variable, - coefficient=coef, - ) - - def clear_objective(self) -> None: - for variable_id in self.linear_objective_coefficient: - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.linear_objective_coefficients.add(variable_id) - self.linear_objective_coefficient.clear() - for key in self._quadratic_objective_coefficients.keys(): - for watcher in self._update_trackers: - if key.id2 < watcher.variables_checkpoint: - watcher.quadratic_objective_coefficients.add(key) - self._quadratic_objective_coefficients.clear() - self.set_objective_offset(0.0) - - def set_linear_objective_coefficient(self, variable_id: int, value: float) -> None: - self._check_variable_id(variable_id) - if value == self.linear_objective_coefficient.get(variable_id, 0.0): - return - if value == 0.0: - self.linear_objective_coefficient.pop(variable_id, None) - else: - self.linear_objective_coefficient[variable_id] = value - for watcher in self._update_trackers: - if variable_id < watcher.variables_checkpoint: - watcher.linear_objective_coefficients.add(variable_id) - - def get_linear_objective_coefficient(self, variable_id: int) -> float: - self._check_variable_id(variable_id) - return self.linear_objective_coefficient.get(variable_id, 0.0) - - def get_linear_objective_coefficients( - self, - ) -> Iterator[model_storage.LinearObjectiveEntry]: - for var_id, coef in self.linear_objective_coefficient.items(): - yield model_storage.LinearObjectiveEntry( - variable_id=var_id, coefficient=coef - ) - - def set_quadratic_objective_coefficient( - self, first_variable_id: int, second_variable_id: int, value: float - ) -> None: - self._check_variable_id(first_variable_id) - self._check_variable_id(second_variable_id) - updated = self._quadratic_objective_coefficients.set_coefficient( - first_variable_id, second_variable_id, value - ) - if updated: - for watcher in self._update_trackers: - if ( - max(first_variable_id, second_variable_id) - < watcher.variables_checkpoint - ): - watcher.quadratic_objective_coefficients.add( - _QuadraticKey(first_variable_id, second_variable_id) - ) - - def get_quadratic_objective_coefficient( - self, first_variable_id: int, second_variable_id: int - ) -> float: - self._check_variable_id(first_variable_id) - self._check_variable_id(second_variable_id) - return self._quadratic_objective_coefficients.get_coefficient( - first_variable_id, second_variable_id - ) - - def get_quadratic_objective_coefficients( - self, - ) -> Iterator[model_storage.QuadraticEntry]: - yield from self._quadratic_objective_coefficients.coefficients() - - def get_quadratic_objective_adjacent_variables( - self, variable_id: int - ) -> Iterator[int]: - self._check_variable_id(variable_id) - yield from self._quadratic_objective_coefficients.get_adjacent_variables( - variable_id - ) - - def set_is_maximize(self, is_maximize: bool) -> None: - if self._is_maximize == is_maximize: - return - self._is_maximize = is_maximize - for watcher in self._update_trackers: - watcher.objective_direction = True - - def get_is_maximize(self) -> bool: - return self._is_maximize - - def set_objective_offset(self, offset: float) -> None: - if self._objective_offset == offset: - return - self._objective_offset = offset - for watcher in self._update_trackers: - watcher.objective_offset = True - - def get_objective_offset(self) -> float: - return self._objective_offset - - def export_model(self) -> model_pb2.ModelProto: - m: model_pb2.ModelProto = model_pb2.ModelProto() - m.name = self._name - _variables_to_proto(self.variables.items(), m.variables) - _linear_constraints_to_proto( - self.linear_constraints.items(), m.linear_constraints - ) - m.objective.maximize = self._is_maximize - m.objective.offset = self._objective_offset - if self.linear_objective_coefficient: - obj_ids, obj_coefs = zip(*sorted(self.linear_objective_coefficient.items())) - m.objective.linear_coefficients.ids.extend(obj_ids) - m.objective.linear_coefficients.values.extend(obj_coefs) - if self._quadratic_objective_coefficients: - first_var_ids, second_var_ids, coefficients = zip( - *sorted( - [ - (entry.id_key.id1, entry.id_key.id2, entry.coefficient) - for entry in self._quadratic_objective_coefficients.coefficients() - ] - ) - ) - m.objective.quadratic_coefficients.row_ids.extend(first_var_ids) - m.objective.quadratic_coefficients.column_ids.extend(second_var_ids) - m.objective.quadratic_coefficients.coefficients.extend(coefficients) - if self._linear_constraint_matrix: - flat_matrix_items = [ - (con_id, var_id, coef) - for ((con_id, var_id), coef) in self._linear_constraint_matrix.items() - ] - lin_con_ids, var_ids, lin_con_coefs = zip(*sorted(flat_matrix_items)) - m.linear_constraint_matrix.row_ids.extend(lin_con_ids) - m.linear_constraint_matrix.column_ids.extend(var_ids) - m.linear_constraint_matrix.coefficients.extend(lin_con_coefs) - return m - - def add_update_tracker(self) -> model_storage.StorageUpdateTracker: - tracker = _UpdateTracker(self) - self._update_trackers.add(tracker) - return tracker - - def remove_update_tracker( - self, tracker: model_storage.StorageUpdateTracker - ) -> None: - self._update_trackers.remove(tracker) - tracker.retired = True - - def _check_variable_id(self, variable_id: int) -> None: - if variable_id not in self.variables: - raise model_storage.BadVariableIdError(variable_id) - - def _check_linear_constraint_id(self, linear_constraint_id: int) -> None: - if linear_constraint_id not in self.linear_constraints: - raise model_storage.BadLinearConstraintIdError(linear_constraint_id) - - -def _set_sparse_double_vector( - id_value_pairs: Iterable[Tuple[int, float]], - proto: sparse_containers_pb2.SparseDoubleVectorProto, -) -> None: - """id_value_pairs must be sorted, proto is filled.""" - if not id_value_pairs: - return - ids, values = zip(*id_value_pairs) - proto.ids[:] = ids - proto.values[:] = values - - -def _set_sparse_bool_vector( - id_value_pairs: Iterable[Tuple[int, bool]], - proto: sparse_containers_pb2.SparseBoolVectorProto, -) -> None: - """id_value_pairs must be sorted, proto is filled.""" - if not id_value_pairs: - return - ids, values = zip(*id_value_pairs) - proto.ids[:] = ids - proto.values[:] = values - - -def _variables_to_proto( - variables: Iterable[Tuple[int, _VariableStorage]], - proto: model_pb2.VariablesProto, -) -> None: - """Exports variables to proto.""" - has_named_var = False - for _, var_storage in variables: - if var_storage.name: - has_named_var = True - break - for var_id, var_storage in variables: - proto.ids.append(var_id) - proto.lower_bounds.append(var_storage.lower_bound) - proto.upper_bounds.append(var_storage.upper_bound) - proto.integers.append(var_storage.is_integer) - if has_named_var: - proto.names.append(var_storage.name) - - -def _linear_constraints_to_proto( - linear_constraints: Iterable[Tuple[int, _LinearConstraintStorage]], - proto: model_pb2.LinearConstraintsProto, -) -> None: - """Exports variables to proto.""" - has_named_lin_con = False - for _, lin_con_storage in linear_constraints: - if lin_con_storage.name: - has_named_lin_con = True - break - for lin_con_id, lin_con_storage in linear_constraints: - proto.ids.append(lin_con_id) - proto.lower_bounds.append(lin_con_storage.lower_bound) - proto.upper_bounds.append(lin_con_storage.upper_bound) - if has_named_lin_con: - proto.names.append(lin_con_storage.name) diff --git a/ortools/math_opt/python/hash_model_storage_test.py b/ortools/math_opt/python/hash_model_storage_test.py deleted file mode 100644 index 7c5feb37b3..0000000000 --- a/ortools/math_opt/python/hash_model_storage_test.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for hash_model_storage that cannot be covered by model_storage_(update)_test.""" - -from absl.testing import absltest -from ortools.math_opt.python import hash_model_storage - - -class HashModelStorageTest(absltest.TestCase): - - def test_quadratic_term_storage(self): - storage = hash_model_storage._QuadraticTermStorage() - storage.set_coefficient(0, 1, 1.0) - storage.delete_variable(0) - self.assertEmpty(list(storage.get_adjacent_variables(0))) - - -if __name__ == "__main__": - absltest.main() diff --git a/ortools/math_opt/python/model_storage.py b/ortools/math_opt/python/model_storage.py deleted file mode 100644 index cf67f6a87d..0000000000 --- a/ortools/math_opt/python/model_storage.py +++ /dev/null @@ -1,441 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""An interface for in memory storage of optimization problems.""" - -import abc -import dataclasses -from typing import Iterator, Optional, Type, TypeVar - -from ortools.math_opt import model_pb2 -from ortools.math_opt import model_update_pb2 - - -# TODO(b/231426528): remove __slots__ and set slots=True when Python 3.10 is -# available. -@dataclasses.dataclass(frozen=True) -class LinearConstraintMatrixIdEntry: - __slots__ = "linear_constraint_id", "variable_id", "coefficient" - linear_constraint_id: int - variable_id: int - coefficient: float - - -# TODO(b/231426528): remove __slots__ and set slots=True when Python 3.10 is -# available. -@dataclasses.dataclass(frozen=True) -class LinearObjectiveEntry: - __slots__ = "variable_id", "coefficient" - variable_id: int - coefficient: float - - -# TODO(b/231426528): remove __slots__ and set slots=True when Python 3.10 is -# available. -@dataclasses.dataclass(frozen=True) -class QuadraticTermIdKey: - """An ordered pair of ints used as a key for quadratic terms. - - QuadraticTermIdKey.id1 <= QuadraticTermIdKey.id2. - """ - - __slots__ = "id1", "id2" - id1: int - id2: int - - def __init__(self, a: int, b: int): - """Ints a and b will be ordered internally.""" - id1 = a - id2 = b - if id1 > id2: - id1, id2 = id2, id1 - object.__setattr__(self, "id1", id1) - object.__setattr__(self, "id2", id2) - - -# TODO(b/231426528): remove __slots__ and set slots=True when Python 3.10 is -# available. -@dataclasses.dataclass(frozen=True) -class QuadraticEntry: - """Represents an id-indexed quadratic term.""" - - __slots__ = "id_key", "coefficient" - id_key: QuadraticTermIdKey - coefficient: float - - -class StorageUpdateTracker(abc.ABC): - """Tracks updates to an optimization model from a ModelStorage. - - Do not instantiate directly, instead create through - ModelStorage.add_update_tracker(). - - Interacting with an update tracker after it has been removed from the model - will result in an UsedUpdateTrackerAfterRemovalError error. - - Example: - mod = model_storage.ModelStorage() - x = mod.add_variable(0.0, 1.0, True, 'x') - y = mod.add_variable(0.0, 1.0, True, 'y') - tracker = mod.add_update_tracker() - mod.set_variable_ub(x, 3.0) - tracker.export_update() - => "variable_updates: {upper_bounds: {ids: [0], values[3.0] }" - mod.set_variable_ub(y, 2.0) - tracker.export_update() - => "variable_updates: {upper_bounds: {ids: [0, 1], values[3.0, 2.0] }" - tracker.advance_checkpoint() - tracker.export_update() - => "" - mod.set_variable_ub(y, 4.0) - tracker.export_update() - => "variable_updates: {upper_bounds: {ids: [1], values[4.0] }" - tracker.advance_checkpoint() - mod.remove_update_tracker(tracker) - => "" - """ - - @abc.abstractmethod - def export_update(self) -> Optional[model_update_pb2.ModelUpdateProto]: - """Returns changes to the model since last call to checkpoint/creation, or None if no changes occurred.""" - pass - - @abc.abstractmethod - def advance_checkpoint(self) -> None: - """Track changes to the model only after this function call.""" - pass - - -class UsedUpdateTrackerAfterRemovalError(RuntimeError): - - def __init__(self): - super().__init__( - "Attempted to use update tracker after removing it from model storage." - ) - - -class BadVariableIdError(LookupError): - """Raised by ModelStorage when a bad variable id is given.""" - - def __init__(self, variable_id): - super().__init__(f"Unexpected variable id: {variable_id}") - self.id = variable_id - - -class BadLinearConstraintIdError(LookupError): - """Raised by ModelStorage when a bad linear constraint id is given.""" - - def __init__(self, linear_constraint_id): - super().__init__(f"Unexpected linear constraint id: {linear_constraint_id}") - self.id = linear_constraint_id - - -class ModelStorage(abc.ABC): - """An interface for in memory storage of an optimization model. - - Most users should not use this class directly and use Model defined in - model.py. - - Stores an mixed integer programming problem of the form: - - {max/min} c*x + d - s.t. lb_c <= A * x <= ub_c - lb_v <= x <= ub_v - x_i integer for i in I - - where x is a vector of n decision variables, d is a number, lb_v, ub_v, and c - are vectors of n numbers, lb_c and ub_c are vectors of m numbers, A is a - m by n matrix, and I is a subset of {1,..., n}. - - Each of the n variables and m constraints have an integer id that you use to - get/set the problem data (c, A, lb_c etc.). Ids begin at zero and increase - sequentially. They are not reused after deletion. Note that if a variable is - deleted, your model has nonconsecutive variable ids. - - For all methods taking an id (e.g. set_variable_lb), providing a bad id - (including the id of a deleted variable) will raise a BadVariableIdError or - BadLinearConstraintIdError. Further, the ModelStorage instance is assumed to - be in a bad state after any such error and there are no guarantees on further - interactions. - - All implementations must have a constructor taking a str argument for the - model name with a default value of the empty string. - - Any ModelStorage can be exported to model_pb2.ModelProto, the format consumed - by MathOpt solvers. Changes to a model can be exported to a - model_update_pb2.ModelUpdateProto with an UpdateTracker, see the UpdateTracker - documentation for details. - - When solving this optimization problem we will additionally require that: - * No numbers are NaN, - * c, d, and A are all finite, - * lb_c and lb_v are not +inf, - * ub_c and ub_v are not -inf, - but those assumptions are not checked or enforced here (NaNs and infinite - values can be used anywhere). - """ - - @property - @abc.abstractmethod - def name(self) -> str: - pass - - @abc.abstractmethod - def add_variable(self, lb: float, ub: float, is_integer: bool, name: str) -> int: - pass - - @abc.abstractmethod - def delete_variable(self, variable_id: int) -> None: - pass - - @abc.abstractmethod - def variable_exists(self, variable_id: int) -> bool: - pass - - @abc.abstractmethod - def next_variable_id(self) -> int: - pass - - @abc.abstractmethod - def set_variable_lb(self, variable_id: int, lb: float) -> None: - pass - - @abc.abstractmethod - def set_variable_ub(self, variable_id: int, ub: float) -> None: - pass - - @abc.abstractmethod - def set_variable_is_integer(self, variable_id: int, is_integer: bool) -> None: - pass - - @abc.abstractmethod - def get_variable_lb(self, variable_id: int) -> float: - pass - - @abc.abstractmethod - def get_variable_ub(self, variable_id: int) -> float: - pass - - @abc.abstractmethod - def get_variable_is_integer(self, variable_id: int) -> bool: - pass - - @abc.abstractmethod - def get_variable_name(self, variable_id: int) -> str: - pass - - @abc.abstractmethod - def get_variables(self) -> Iterator[int]: - """Yields the variable ids in order of creation.""" - pass - - @abc.abstractmethod - def add_linear_constraint(self, lb: float, ub: float, name: str) -> int: - pass - - @abc.abstractmethod - def delete_linear_constraint(self, linear_constraint_id: int) -> None: - pass - - @abc.abstractmethod - def linear_constraint_exists(self, linear_constraint_id: int) -> bool: - pass - - @abc.abstractmethod - def next_linear_constraint_id(self) -> int: - pass - - @abc.abstractmethod - def set_linear_constraint_lb(self, linear_constraint_id: int, lb: float) -> None: - pass - - @abc.abstractmethod - def set_linear_constraint_ub(self, linear_constraint_id: int, ub: float) -> None: - pass - - @abc.abstractmethod - def get_linear_constraint_lb(self, linear_constraint_id: int) -> float: - pass - - @abc.abstractmethod - def get_linear_constraint_ub(self, linear_constraint_id: int) -> float: - pass - - @abc.abstractmethod - def get_linear_constraint_name(self, linear_constraint_id: int) -> str: - pass - - @abc.abstractmethod - def get_linear_constraints(self) -> Iterator[int]: - """Yields the linear constraint ids in order of creation.""" - pass - - @abc.abstractmethod - def set_linear_constraint_coefficient( - self, linear_constraint_id: int, variable_id: int, lb: float - ) -> None: - pass - - @abc.abstractmethod - def get_linear_constraint_coefficient( - self, linear_constraint_id: int, variable_id: int - ) -> float: - pass - - @abc.abstractmethod - def get_linear_constraints_with_variable(self, variable_id: int) -> Iterator[int]: - """Yields the linear constraints with nonzero coefficient for a variable in undefined order.""" - pass - - @abc.abstractmethod - def get_variables_for_linear_constraint( - self, linear_constraint_id: int - ) -> Iterator[int]: - """Yields the variables with nonzero coefficient in a linear constraint in undefined order.""" - pass - - @abc.abstractmethod - def get_linear_constraint_matrix_entries( - self, - ) -> Iterator[LinearConstraintMatrixIdEntry]: - """Yields the nonzero elements of the linear constraint matrix in undefined order.""" - pass - - @abc.abstractmethod - def clear_objective(self) -> None: - """Clears objective coefficients and offset. Does not change direction.""" - - @abc.abstractmethod - def set_linear_objective_coefficient(self, variable_id: int, value: float) -> None: - pass - - @abc.abstractmethod - def get_linear_objective_coefficient(self, variable_id: int) -> float: - pass - - @abc.abstractmethod - def get_linear_objective_coefficients(self) -> Iterator[LinearObjectiveEntry]: - """Yields the nonzero linear objective terms in undefined order.""" - pass - - @abc.abstractmethod - def set_quadratic_objective_coefficient( - self, first_variable_id: int, second_variable_id: int, value: float - ) -> None: - """Sets the objective coefficient for the product of two variables. - - The ordering of the input variables does not matter. - - Args: - first_variable_id: The first variable in the product. - second_variable_id: The second variable in the product. - value: The value of the coefficient. - - Raises: - BadVariableIdError if first_variable_id or second_variable_id are not in - the model. - """ - - @abc.abstractmethod - def get_quadratic_objective_coefficient( - self, first_variable_id: int, second_variable_id: int - ) -> float: - """Gets the objective coefficient for the product of two variables. - - The ordering of the input variables does not matter. - - Args: - first_variable_id: The first variable in the product. - second_variable_id: The second variable in the product. - - Raises: - BadVariableIdError if first_variable_id or second_variable_id are not in - the model. - - Returns: - The value of the coefficient. - """ - - @abc.abstractmethod - def get_quadratic_objective_coefficients(self) -> Iterator[QuadraticEntry]: - """Yields the nonzero quadratic objective terms in undefined order.""" - - @abc.abstractmethod - def get_quadratic_objective_adjacent_variables( - self, variable_id: int - ) -> Iterator[int]: - """Yields the variables multiplying a variable in the objective function. - - Variables are returned in an unspecified order. - - For example, if variables x and y have ids 0 and 1 respectively, and the - quadratic portion of the objective is x^2 + 2 x*y, then - get_quadratic_objective_adjacent_variables(0) = (0, 1). - - Args: - variable_id: Function yields the variables multiplying variable_id in the - objective function. - - Yields: - The variables multiplying variable_id in the objective function. - - Raises: - BadVariableIdError if variable_id is not in the model. - """ - - @abc.abstractmethod - def set_is_maximize(self, is_maximize: bool) -> None: - pass - - @abc.abstractmethod - def get_is_maximize(self) -> bool: - pass - - @abc.abstractmethod - def set_objective_offset(self, offset: float) -> None: - pass - - @abc.abstractmethod - def get_objective_offset(self) -> float: - pass - - @abc.abstractmethod - def export_model(self) -> model_pb2.ModelProto: - pass - - @abc.abstractmethod - def add_update_tracker(self) -> StorageUpdateTracker: - """Creates a StorageUpdateTracker registered with self to view model changes.""" - pass - - @abc.abstractmethod - def remove_update_tracker(self, tracker: StorageUpdateTracker): - """Stops tracker from getting updates on model changes in self. - - An error will be raised if tracker is not a StorageUpdateTracker created by - this Model that has not previously been removed. - - Using an UpdateTracker (via checkpoint or export_update) after it has been - removed will result in an error. - - Args: - tracker: The StorageUpdateTracker to unregister. - - Raises: - KeyError: The tracker was created by another model or was already removed. - """ - pass - - -ModelStorageImpl = TypeVar("ModelStorageImpl", bound=ModelStorage) -ModelStorageImplClass = Type[ModelStorageImpl] diff --git a/ortools/math_opt/python/model_storage_test.py b/ortools/math_opt/python/model_storage_test.py deleted file mode 100644 index 64590af629..0000000000 --- a/ortools/math_opt/python/model_storage_test.py +++ /dev/null @@ -1,941 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from typing import Any, Callable - -from absl.testing import absltest -from absl.testing import parameterized -from ortools.math_opt import model_pb2 -from ortools.math_opt import sparse_containers_pb2 -from ortools.math_opt.python import hash_model_storage -from ortools.math_opt.python import model_storage -from ortools.math_opt.python.testing import compare_proto - -_StorageClass = model_storage.ModelStorageImplClass -_MatEntry = model_storage.LinearConstraintMatrixIdEntry -_ObjEntry = model_storage.LinearObjectiveEntry - - -@parameterized.parameters((hash_model_storage.HashModelStorage,)) -class ModelStorageTest(compare_proto.MathOptProtoAssertions, parameterized.TestCase): - - def test_add_and_read_variables(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - self.assertEqual(0, storage.next_variable_id()) - v1 = storage.add_variable(-1.0, 2.5, True, "x") - v2 = storage.add_variable(-math.inf, math.inf, False, "") - self.assertEqual("test_model", storage.name) - - self.assertEqual(-1.0, storage.get_variable_lb(v1)) - self.assertEqual(2.5, storage.get_variable_ub(v1)) - self.assertTrue(storage.get_variable_is_integer(v1)) - self.assertEqual("x", storage.get_variable_name(v1)) - self.assertEqual(0, v1) - self.assertTrue(storage.variable_exists(v1)) - - self.assertEqual(-math.inf, storage.get_variable_lb(v2)) - self.assertEqual(math.inf, storage.get_variable_ub(v2)) - self.assertFalse(storage.get_variable_is_integer(v2)) - self.assertEqual("", storage.get_variable_name(v2)) - self.assertEqual(1, v2) - self.assertTrue(storage.variable_exists(v2)) - - self.assertFalse(storage.variable_exists(max(v1, v2) + 1)) - self.assertListEqual([v1, v2], list(storage.get_variables())) - self.assertEqual(2, storage.next_variable_id()) - - def test_set_variable_lb(self, storage_class: _StorageClass) -> None: - storage = storage_class() - v1 = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_variable_lb(v1, -5.5) - self.assertEqual(-5.5, storage.get_variable_lb(v1)) - - def test_set_variable_ub(self, storage_class: _StorageClass) -> None: - storage = storage_class() - v1 = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_variable_ub(v1, 1.2) - self.assertEqual(1.2, storage.get_variable_ub(v1)) - - def test_set_variable_is_integer(self, storage_class: _StorageClass) -> None: - storage = storage_class() - v1 = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_variable_is_integer(v1, False) - self.assertFalse(storage.get_variable_is_integer(v1)) - - def test_add_and_read_linear_constraints( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - self.assertEqual(0, storage.next_linear_constraint_id()) - c1 = storage.add_linear_constraint(-1.0, 2.5, "c") - c2 = storage.add_linear_constraint(-math.inf, math.inf, "") - - self.assertEqual(-1.0, storage.get_linear_constraint_lb(c1)) - self.assertEqual(2.5, storage.get_linear_constraint_ub(c1)) - self.assertEqual("c", storage.get_linear_constraint_name(c1)) - self.assertEqual(0, c1) - self.assertTrue(storage.linear_constraint_exists(c1)) - - self.assertEqual(-math.inf, storage.get_linear_constraint_lb(c2)) - self.assertEqual(math.inf, storage.get_linear_constraint_ub(c2)) - self.assertEqual("", storage.get_linear_constraint_name(c2)) - self.assertEqual(1, c2) - self.assertTrue(storage.linear_constraint_exists(c2)) - - self.assertListEqual([c1, c2], list(storage.get_linear_constraints())) - self.assertFalse(storage.linear_constraint_exists(1 + max(c1, c2))) - self.assertEqual(2, storage.next_linear_constraint_id()) - - def test_set_linear_constraint_lb(self, storage_class: _StorageClass) -> None: - storage = storage_class() - c1 = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_lb(c1, -5.5) - self.assertEqual(-5.5, storage.get_linear_constraint_lb(c1)) - - def test_set_linear_constraint_ub(self, storage_class: _StorageClass) -> None: - storage = storage_class() - c1 = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_ub(c1, 1.2) - self.assertEqual(1.2, storage.get_linear_constraint_ub(c1)) - - def test_delete_variable_get_other(self, storage_class: _StorageClass) -> None: - storage = storage_class() - v1 = storage.add_variable(-1.0, 2.5, True, "x") - v2 = storage.add_variable(-3.0, 4.5, False, "y") - storage.delete_variable(v1) - self.assertEqual(-3.0, storage.get_variable_lb(v2)) - self.assertEqual(4.5, storage.get_variable_ub(v2)) - self.assertFalse(storage.get_variable_is_integer(v2)) - self.assertEqual("y", storage.get_variable_name(v2)) - self.assertEqual(1, v2) - self.assertFalse(storage.variable_exists(v1)) - self.assertTrue(storage.variable_exists(v2)) - - self.assertListEqual([v2], list(storage.get_variables())) - - def test_double_variable_delete(self, storage_class: _StorageClass) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.delete_variable(x) - self.assertEqual(x, cm.exception.id) - - def _deleted_variable_invoke_lookup( - self, - storage_class: _StorageClass, - getter: Callable[[model_storage.ModelStorage, int], Any], - ) -> None: - storage = storage_class() - v1 = storage.add_variable(-1.0, 2.5, True, "x") - storage.delete_variable(v1) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - getter(storage, v1) - self.assertEqual(v1, cm.exception.id) - - def test_delete_variable_lb_error(self, storage_class: _StorageClass) -> None: - self._deleted_variable_invoke_lookup( - storage_class, storage_class.get_variable_lb - ) - - def test_delete_variable_ub_error(self, storage_class: _StorageClass) -> None: - self._deleted_variable_invoke_lookup( - storage_class, storage_class.get_variable_ub - ) - - def test_delete_variable_is_integer_error( - self, storage_class: _StorageClass - ) -> None: - self._deleted_variable_invoke_lookup( - storage_class, storage_class.get_variable_is_integer - ) - - def test_delete_variable_name_error(self, storage_class: _StorageClass) -> None: - self._deleted_variable_invoke_lookup( - storage_class, storage_class.get_variable_name - ) - - def test_delete_variable_set_lb_error(self, storage_class: _StorageClass) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_variable_lb(x, -2.0) - self.assertEqual(x, cm.exception.id) - - def test_delete_variable_set_ub_error(self, storage_class: _StorageClass) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_variable_ub(x, 12.0) - self.assertEqual(x, cm.exception.id) - - def test_delete_variable_set_integer_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_variable_is_integer(x, False) - self.assertEqual(x, cm.exception.id) - - def test_delete_linear_constraint_get_other( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - c1 = storage.add_linear_constraint(-1.0, 2.5, "c1") - c2 = storage.add_linear_constraint(-math.inf, 5.0, "c2") - storage.delete_linear_constraint(c1) - self.assertEqual(-math.inf, storage.get_linear_constraint_lb(c2)) - self.assertEqual(5.0, storage.get_linear_constraint_ub(c2)) - self.assertEqual("c2", storage.get_linear_constraint_name(c2)) - self.assertEqual(1, c2) - - self.assertListEqual([c2], list(storage.get_linear_constraints())) - - def test_double_linear_constraint_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - storage.delete_linear_constraint(c) - self.assertEqual(c, cm.exception.id) - - def _deleted_linear_constraint_invoke_lookup( - self, - storage_class: _StorageClass, - getter: Callable[[model_storage.ModelStorage, int], Any], - ) -> None: - storage = storage_class() - c1 = storage.add_linear_constraint(-1.0, 2.5, "c1") - storage.delete_linear_constraint(c1) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - getter(storage, c1) - self.assertEqual(c1, cm.exception.id) - - def test_delete_linear_constraint_lb_error( - self, storage_class: _StorageClass - ) -> None: - self._deleted_linear_constraint_invoke_lookup( - storage_class, storage_class.get_linear_constraint_lb - ) - - def test_delete_linear_constraint_ub_error( - self, storage_class: _StorageClass - ) -> None: - self._deleted_linear_constraint_invoke_lookup( - storage_class, storage_class.get_linear_constraint_ub - ) - - def test_delete_linear_constraint_name_error( - self, storage_class: _StorageClass - ) -> None: - self._deleted_linear_constraint_invoke_lookup( - storage_class, storage_class.get_linear_constraint_name - ) - - def test_delete_linear_constraint_set_lb_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - storage.set_linear_constraint_lb(c, -2.0) - self.assertEqual(c, cm.exception.id) - - def test_delete_linear_constraint_set_ub_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - storage.set_linear_constraint_ub(c, 12.0) - self.assertEqual(c, cm.exception.id) - - def test_objective_offset(self, storage_class: _StorageClass) -> None: - storage = storage_class() - self.assertEqual(0.0, storage.get_objective_offset()) - storage.set_objective_offset(1.5) - self.assertEqual(1.5, storage.get_objective_offset()) - - def test_objective_direction(self, storage_class: _StorageClass) -> None: - storage = storage_class() - self.assertFalse(storage.get_is_maximize()) - storage.set_is_maximize(True) - self.assertTrue(storage.get_is_maximize()) - storage.set_is_maximize(False) - self.assertFalse(storage.get_is_maximize()) - - def test_set_linear_objective_coefficient( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - storage.set_linear_objective_coefficient(x, 2.0) - storage.set_linear_objective_coefficient(z, -5.5) - self.assertEqual(2.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(-5.5, storage.get_linear_objective_coefficient(z)) - - self.assertCountEqual( - [ - _ObjEntry(variable_id=x, coefficient=2.0), - _ObjEntry(variable_id=z, coefficient=-5.5), - ], - storage.get_linear_objective_coefficients(), - ) - - def test_clear_linear_objective_coefficient( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - storage.set_linear_objective_coefficient(x, 2.0) - storage.set_linear_objective_coefficient(z, -5.5) - storage.set_objective_offset(1.0) - self.assertEqual(2.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(-5.5, storage.get_linear_objective_coefficient(z)) - self.assertEqual(1.0, storage.get_objective_offset()) - storage.clear_objective() - self.assertEqual(0.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(z)) - self.assertEqual(0.0, storage.get_objective_offset()) - - def test_set_linear_objective_coefficient_bad_id( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_linear_objective_coefficient(x + 1, 2.0) - self.assertEqual(x + 1, cm.exception.id) - - def test_set_linear_objective_coefficient_deleted_id( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_linear_objective_coefficient(y, 3.0) - storage.delete_variable(x) - self.assertEqual(3.0, storage.get_linear_objective_coefficient(y)) - self.assertCountEqual( - [model_storage.LinearObjectiveEntry(variable_id=y, coefficient=3.0)], - storage.get_linear_objective_coefficients(), - ) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_linear_objective_coefficient(x, 2.0) - self.assertEqual(x, cm.exception.id) - - def test_get_linear_objective_coefficient_deleted_nonzero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_linear_objective_coefficient(x, 1.0) - storage.set_linear_objective_coefficient(y, 3.0) - storage.delete_variable(x) - self.assertEqual(3.0, storage.get_linear_objective_coefficient(y)) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.get_linear_objective_coefficient(x) - self.assertEqual(x, cm.exception.id) - - def test_set_quadratic_objective_coefficient( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - storage.set_quadratic_objective_coefficient(x, y, 2.0) - storage.set_quadratic_objective_coefficient(z, z, -5.5) - storage.set_quadratic_objective_coefficient(z, y, 1.5) - self.assertEqual(2.0, storage.get_quadratic_objective_coefficient(x, y)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(y, y)) - self.assertEqual(-5.5, storage.get_quadratic_objective_coefficient(z, z)) - self.assertEqual(1.5, storage.get_quadratic_objective_coefficient(y, z)) - - self.assertCountEqual( - [ - model_storage.QuadraticEntry( - id_key=model_storage.QuadraticTermIdKey(x, y), coefficient=2.0 - ), - model_storage.QuadraticEntry( - id_key=model_storage.QuadraticTermIdKey(z, z), coefficient=-5.5 - ), - model_storage.QuadraticEntry( - id_key=model_storage.QuadraticTermIdKey(y, z), coefficient=1.5 - ), - ], - storage.get_quadratic_objective_coefficients(), - ) - - self.assertCountEqual( - [y], storage.get_quadratic_objective_adjacent_variables(x) - ) - self.assertCountEqual( - [x, z], storage.get_quadratic_objective_adjacent_variables(y) - ) - self.assertCountEqual( - [y, z], storage.get_quadratic_objective_adjacent_variables(z) - ) - - def test_clear_quadratic_objective_coefficient( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - storage.set_linear_objective_coefficient(x, 2.0) - storage.set_linear_objective_coefficient(z, -5.5) - storage.set_quadratic_objective_coefficient(x, y, 2.0) - storage.set_quadratic_objective_coefficient(z, z, -5.5) - storage.set_quadratic_objective_coefficient(z, y, 1.5) - storage.set_objective_offset(1.0) - storage.clear_objective() - self.assertEqual(0.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(z)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(x, y)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(y, y)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(z, z)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(y, z)) - self.assertEqual(0.0, storage.get_objective_offset()) - self.assertEmpty(list(storage.get_quadratic_objective_adjacent_variables(x))) - self.assertEmpty(list(storage.get_quadratic_objective_adjacent_variables(y))) - self.assertEmpty(list(storage.get_quadratic_objective_adjacent_variables(z))) - - def test_set_quadratic_objective_coefficient_bad_id( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_quadratic_objective_coefficient(x, x + 1, 2.0) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_quadratic_objective_coefficient(x + 1, x, 2.0) - self.assertEqual(x + 1, cm.exception.id) - - def test_get_quadratic_objective_coefficient_bad_id( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.get_quadratic_objective_coefficient(x, x + 1) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.get_quadratic_objective_coefficient(x + 1, x) - self.assertEqual(x + 1, cm.exception.id) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - list(storage.get_quadratic_objective_adjacent_variables(x + 1)) - self.assertEqual(x + 1, cm.exception.id) - - def test_set_quadratic_objective_coefficient_existing_to_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, x, -1.0) - storage.set_quadratic_objective_coefficient(x, y, 1.0) - storage.set_quadratic_objective_coefficient(y, y, 3.0) - - storage.set_quadratic_objective_coefficient(x, x, 0.0) - storage.set_quadratic_objective_coefficient(x, y, 0.0) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(x, x)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(x, y)) - self.assertEqual(3.0, storage.get_quadratic_objective_coefficient(y, y)) - self.assertCountEqual( - [y], storage.get_quadratic_objective_adjacent_variables(y) - ) - self.assertEmpty(list(storage.get_quadratic_objective_adjacent_variables(x))) - - def test_set_quadratic_objective_coefficient_deleted_id( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 1.0) - storage.set_quadratic_objective_coefficient(y, y, 3.0) - storage.delete_variable(x) - self.assertEqual(3.0, storage.get_quadratic_objective_coefficient(y, y)) - self.assertCountEqual( - [y], storage.get_quadratic_objective_adjacent_variables(y) - ) - - def test_set_quadratic_objective_coefficient_deleted_id_get_coeff_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 1.0) - storage.set_quadratic_objective_coefficient(y, y, 3.0) - storage.delete_variable(x) - - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.get_quadratic_objective_coefficient(x, y) - self.assertEqual(x, cm.exception.id) - - def test_set_quadratic_objective_coefficient_deleted_id_set_coeff_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 1.0) - storage.set_quadratic_objective_coefficient(y, y, 3.0) - storage.delete_variable(x) - - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_quadratic_objective_coefficient(x, y, 1.0) - self.assertEqual(x, cm.exception.id) - - def test_set_quadratic_objective_coefficient_deleted_id_adjacent_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 1.0) - storage.set_quadratic_objective_coefficient(y, y, 3.0) - storage.delete_variable(x) - - with self.assertRaises(model_storage.BadVariableIdError) as cm: - list(storage.get_quadratic_objective_adjacent_variables(x)) - self.assertEqual(x, cm.exception.id) - - def test_constraint_matrix(self, storage_class: _StorageClass) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - d = storage.add_linear_constraint(-math.inf, 1.0, "d") - storage.set_linear_constraint_coefficient(c, y, 1.0) - storage.set_linear_constraint_coefficient(d, x, 2.0) - storage.set_linear_constraint_coefficient(d, y, -1.0) - storage.set_linear_constraint_coefficient(d, z, 1.0) - storage.set_linear_constraint_coefficient(d, z, 0.0) - - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(c, x)) - self.assertEqual(1.0, storage.get_linear_constraint_coefficient(c, y)) - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(c, z)) - - self.assertEqual(2.0, storage.get_linear_constraint_coefficient(d, x)) - self.assertEqual(-1.0, storage.get_linear_constraint_coefficient(d, y)) - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(d, z)) - - self.assertCountEqual([y], storage.get_variables_for_linear_constraint(c)) - self.assertCountEqual([x, y], storage.get_variables_for_linear_constraint(d)) - - self.assertCountEqual([d], storage.get_linear_constraints_with_variable(x)) - self.assertCountEqual([c, d], storage.get_linear_constraints_with_variable(y)) - self.assertCountEqual([], storage.get_linear_constraints_with_variable(z)) - - self.assertCountEqual( - [ - _MatEntry(linear_constraint_id=c, variable_id=y, coefficient=1.0), - _MatEntry(linear_constraint_id=d, variable_id=x, coefficient=2.0), - _MatEntry(linear_constraint_id=d, variable_id=y, coefficient=-1.0), - ], - storage.get_linear_constraint_matrix_entries(), - ) - - def test_constraint_matrix_zero_unset_entry( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.set_linear_constraint_coefficient(c, x, 0.0) - self.assertEmpty(list(storage.get_linear_objective_coefficients())) - self.assertEmpty(list(storage.get_variables_for_linear_constraint(c))) - self.assertEmpty(list(storage.get_linear_constraints_with_variable(x))) - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(c, x)) - - def test_constraint_matrix_with_deletion( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - d = storage.add_linear_constraint(-math.inf, 1.0, "d") - storage.set_linear_constraint_coefficient(c, y, 1.0) - storage.set_linear_constraint_coefficient(d, x, 2.0) - storage.set_linear_constraint_coefficient(d, y, -1.0) - storage.set_linear_constraint_coefficient(c, z, 1.0) - - storage.delete_variable(y) - storage.delete_linear_constraint(c) - - self.assertEqual(2.0, storage.get_linear_constraint_coefficient(d, x)) - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(d, z)) - - self.assertCountEqual([x], storage.get_variables_for_linear_constraint(d)) - - self.assertCountEqual([d], storage.get_linear_constraints_with_variable(x)) - self.assertCountEqual([], storage.get_linear_constraints_with_variable(z)) - - self.assertCountEqual( - [_MatEntry(linear_constraint_id=d, variable_id=x, coefficient=2.0)], - storage.get_linear_constraint_matrix_entries(), - ) - - def test_variables_for_linear_constraint_deleted_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - list(storage.get_variables_for_linear_constraint(c)) - self.assertEqual(c, cm.exception.id) - - def test_linear_constraints_with_variable_deleted_error( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - list(storage.get_linear_constraints_with_variable(x)) - self.assertEqual(x, cm.exception.id) - - def test_constraint_matrix_set_deleted_var( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.set_linear_constraint_coefficient(c, x, 2.0) - self.assertEqual(x, cm.exception.id) - - def test_constraint_matrix_get_deleted_var( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.delete_variable(x) - with self.assertRaises(model_storage.BadVariableIdError) as cm: - storage.get_linear_constraint_coefficient(c, x) - self.assertEqual(x, cm.exception.id) - - def test_constraint_matrix_set_deleted_constraint( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - storage.set_linear_constraint_coefficient(c, x, 2.0) - self.assertEqual(c, cm.exception.id) - - def test_constraint_matrix_get_deleted_constraint( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-math.inf, 3.0, "c") - storage.delete_linear_constraint(c) - with self.assertRaises(model_storage.BadLinearConstraintIdError) as cm: - storage.get_linear_constraint_coefficient(c, x) - self.assertEqual(c, cm.exception.id) - - def test_proto_export(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, False, "") - z = storage.add_variable(0.0, 1.0, True, "z") - c = storage.add_linear_constraint(-math.inf, 3.0, "") - d = storage.add_linear_constraint(0.0, 1.0, "d") - storage.set_linear_constraint_coefficient(c, y, 1.0) - storage.set_linear_constraint_coefficient(d, x, 2.0) - storage.set_linear_constraint_coefficient(d, y, -1.0) - storage.set_linear_constraint_coefficient(d, z, 1.0) - storage.set_linear_constraint_coefficient(d, z, 0.0) - storage.set_linear_objective_coefficient(x, 2.5) - storage.set_linear_objective_coefficient(z, -1.0) - storage.set_quadratic_objective_coefficient(x, x, 3.0) - storage.set_quadratic_objective_coefficient(x, y, 4.0) - storage.set_quadratic_objective_coefficient(x, z, 5.0) - storage.set_is_maximize(True) - storage.set_objective_offset(7.0) - - expected = model_pb2.ModelProto( - name="test_model", - variables=model_pb2.VariablesProto( - ids=[0, 1, 2], - lower_bounds=[-1.0, -1.0, 0.0], - upper_bounds=[2.5, 2.5, 1.0], - integers=[True, False, True], - names=["x", "", "z"], - ), - linear_constraints=model_pb2.LinearConstraintsProto( - ids=[0, 1], - lower_bounds=[-math.inf, 0.0], - upper_bounds=[3.0, 1.0], - names=["", "d"], - ), - objective=model_pb2.ObjectiveProto( - maximize=True, - offset=7.0, - linear_coefficients=sparse_containers_pb2.SparseDoubleVectorProto( - ids=[0, 2], values=[2.5, -1.0] - ), - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 0, 0], - column_ids=[0, 1, 2], - coefficients=[3.0, 4.0, 5.0], - ), - ), - linear_constraint_matrix=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 1, 1], - column_ids=[1, 0, 1], - coefficients=[1.0, 2.0, -1.0], - ), - ) - self.assert_protos_equiv(expected, storage.export_model()) - - def test_proto_export_with_deletes(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, False, "") - z = storage.add_variable(0.0, 1.0, True, "z") - c = storage.add_linear_constraint(-math.inf, 3.0, "") - d = storage.add_linear_constraint(0.0, 1.0, "d") - storage.set_linear_constraint_coefficient(c, y, 1.0) - storage.set_linear_constraint_coefficient(d, x, 2.0) - storage.set_linear_constraint_coefficient(d, y, -1.0) - storage.set_linear_constraint_coefficient(d, z, 1.0) - storage.set_linear_constraint_coefficient(d, z, 0.0) - storage.set_linear_objective_coefficient(x, 2.5) - storage.set_quadratic_objective_coefficient(x, x, 3.0) - storage.set_quadratic_objective_coefficient(x, y, 4.0) - storage.set_quadratic_objective_coefficient(x, z, 5.0) - storage.set_is_maximize(False) - storage.delete_variable(y) - storage.delete_linear_constraint(c) - - expected = model_pb2.ModelProto( - name="test_model", - variables=model_pb2.VariablesProto( - ids=[0, 2], - lower_bounds=[-1.0, 0.0], - upper_bounds=[2.5, 1.0], - integers=[True, True], - names=["x", "z"], - ), - linear_constraints=model_pb2.LinearConstraintsProto( - ids=[1], lower_bounds=[0.0], upper_bounds=[1.0], names=["d"] - ), - objective=model_pb2.ObjectiveProto( - maximize=False, - offset=0.0, - linear_coefficients=sparse_containers_pb2.SparseDoubleVectorProto( - ids=[0], values=[2.5] - ), - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 0], column_ids=[0, 2], coefficients=[3.0, 5.0] - ), - ), - linear_constraint_matrix=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[1], column_ids=[0], coefficients=[2.0] - ), - ) - self.assert_protos_equiv(expected, storage.export_model()) - - def test_proto_export_empty(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - expected = model_pb2.ModelProto(name="test_model") - self.assert_protos_equiv(expected, storage.export_model()) - - def test_proto_export_feasibility(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - storage.add_variable(-1.0, 2.5, True, "x") - expected = model_pb2.ModelProto( - name="test_model", - variables=model_pb2.VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ), - ) - self.assert_protos_equiv(expected, storage.export_model()) - - def test_proto_export_empty_names(self, storage_class: _StorageClass) -> None: - storage = storage_class("") - storage.add_variable(-1.0, 2.5, True, "") - storage.add_linear_constraint(0.0, 1.0, "") - expected = model_pb2.ModelProto( - variables=model_pb2.VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - # NOTE: names is the empty list not a list with an empty string. - names=[], - ), - linear_constraints=model_pb2.LinearConstraintsProto( - ids=[0], - lower_bounds=[0.0], - upper_bounds=[1.0], - # NOTE: names is the empty list not a list with an empty string. - names=[], - ), - ) - self.assert_protos_equiv(expected, storage.export_model()) - - def _assert_nan(self, x): - self.assertTrue(math.isnan(x), f"Expected nan, found {x}") - - # Ensure that we don't silently drop NaNs. - def test_nans_pass_through(self, storage_class: _StorageClass) -> None: - storage = storage_class("nan_model") - nan = math.nan - x = storage.add_variable(nan, 2.5, True, "x") - y = storage.add_variable(-1.0, nan, True, "y") - c = storage.add_linear_constraint(nan, math.inf, "c") - d = storage.add_linear_constraint(0.0, nan, "d") - storage.set_objective_offset(nan) - storage.set_linear_objective_coefficient(x, 1.0) - storage.set_linear_objective_coefficient(y, nan) - storage.set_quadratic_objective_coefficient(x, x, 3.0) - storage.set_quadratic_objective_coefficient(x, y, nan) - storage.set_linear_constraint_coefficient(c, x, nan) - storage.set_linear_constraint_coefficient(c, y, 1.0) - storage.set_linear_constraint_coefficient(d, y, nan) - - # Test the getters. - self.assertEqual("nan_model", storage.name) - self._assert_nan(storage.get_objective_offset()) - self._assert_nan(storage.get_variable_lb(x)) - self.assertEqual(2.5, storage.get_variable_ub(x)) - self.assertEqual(-1.0, storage.get_variable_lb(y)) - self._assert_nan(storage.get_variable_ub(y)) - self.assertEqual(1.0, storage.get_linear_objective_coefficient(x)) - self._assert_nan(storage.get_linear_objective_coefficient(y)) - self._assert_nan(storage.get_linear_constraint_lb(c)) - self.assertEqual(math.inf, storage.get_linear_constraint_ub(c)) - self.assertEqual(0.0, storage.get_linear_constraint_lb(d)) - self._assert_nan(storage.get_linear_constraint_ub(d)) - self._assert_nan(storage.get_linear_constraint_coefficient(c, x)) - self.assertEqual(1.0, storage.get_linear_constraint_coefficient(c, y)) - self.assertEqual(0.0, storage.get_linear_constraint_coefficient(d, x)) - self.assertEqual(3.0, storage.get_quadratic_objective_coefficient(x, x)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(y, y)) - self._assert_nan(storage.get_quadratic_objective_coefficient(x, y)) - self._assert_nan(storage.get_linear_constraint_coefficient(d, y)) - - # Test the iterators that interact with the NaN values. - self.assertCountEqual([x, y], storage.get_variables_for_linear_constraint(c)) - self.assertCountEqual([y], storage.get_variables_for_linear_constraint(d)) - - self.assertCountEqual([c], storage.get_linear_constraints_with_variable(x)) - self.assertCountEqual([c, d], storage.get_linear_constraints_with_variable(y)) - - mat_entries = {} - for e in storage.get_linear_constraint_matrix_entries(): - key = (e.linear_constraint_id, e.variable_id) - self.assertNotIn( - key, - mat_entries, - msg=f"found key:{key} twice, e:{e} mat_entries:{mat_entries}", - ) - mat_entries[key] = e.coefficient - self.assertSetEqual(set(mat_entries.keys()), set(((c, x), (c, y), (d, y)))) - self._assert_nan(mat_entries[(c, x)]) - self.assertEqual(mat_entries[(c, y)], 1.0) - self._assert_nan(mat_entries[(d, y)]) - - obj_entries = {} - for e in storage.get_linear_objective_coefficients(): - self.assertNotIn( - e.variable_id, - obj_entries, - msg=( - f"found variable:{e.variable_id} twice," - f" e:{e} obj_entries:{obj_entries}" - ), - ) - obj_entries[e.variable_id] = e.coefficient - self.assertSetEqual(set(obj_entries.keys()), set((x, y))) - self.assertEqual(obj_entries[x], 1.0) - self._assert_nan(obj_entries[y]) - - # Export to proto - expected = model_pb2.ModelProto( - name="nan_model", - variables=model_pb2.VariablesProto( - ids=[0, 1], - lower_bounds=[nan, -1.0], - upper_bounds=[2.5, nan], - integers=[True, True], - names=["x", "y"], - ), - linear_constraints=model_pb2.LinearConstraintsProto( - ids=[0, 1], - lower_bounds=[nan, 0.0], - upper_bounds=[math.inf, nan], - names=["c", "d"], - ), - objective=model_pb2.ObjectiveProto( - maximize=False, - offset=nan, - linear_coefficients=sparse_containers_pb2.SparseDoubleVectorProto( - ids=[0, 1], values=[1.0, nan] - ), - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 0], column_ids=[0, 1], coefficients=[3.0, nan] - ), - ), - linear_constraint_matrix=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 0, 1], - column_ids=[0, 1, 1], - coefficients=[nan, 1.0, nan], - ), - ) - self.assert_protos_equiv(expected, storage.export_model()) - - -if __name__ == "__main__": - absltest.main() diff --git a/ortools/math_opt/python/model_storage_update_test.py b/ortools/math_opt/python/model_storage_update_test.py deleted file mode 100644 index 9419fc1e28..0000000000 --- a/ortools/math_opt/python/model_storage_update_test.py +++ /dev/null @@ -1,1175 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from absl.testing import absltest -from absl.testing import parameterized -from ortools.math_opt import model_pb2 -from ortools.math_opt import model_update_pb2 -from ortools.math_opt import sparse_containers_pb2 -from ortools.math_opt.python import hash_model_storage -from ortools.math_opt.python import model_storage -from ortools.math_opt.python.testing import compare_proto - -_StorageClass = model_storage.ModelStorageImplClass - -_ModelUpdateProto = model_update_pb2.ModelUpdateProto -_VariableUpdatesProto = model_update_pb2.VariableUpdatesProto -_LinearConstraintUpdatesProto = model_update_pb2.LinearConstraintUpdatesProto -_SparseDoubleVectorProto = sparse_containers_pb2.SparseDoubleVectorProto -_SparseBoolVectorProto = sparse_containers_pb2.SparseBoolVectorProto -_SparseDoubleMatrixProto = sparse_containers_pb2.SparseDoubleMatrixProto -_VariablesProto = model_pb2.VariablesProto -_LinearConstraintsProto = model_pb2.LinearConstraintsProto -_ObjectiveUpdatesProto = model_update_pb2.ObjectiveUpdatesProto - - -@parameterized.parameters((hash_model_storage.HashModelStorage,)) -class ModelStorageTest(compare_proto.MathOptProtoAssertions, parameterized.TestCase): - - def test_simple_delete_var(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_variable_ids=[0]), tracker.export_update() - ) - - def test_simple_delete_lin_con(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.delete_linear_constraint(c) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_linear_constraint_ids=[0]), - tracker.export_update(), - ) - - def test_update_var_lb(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_lb(x, -7.0) - self.assert_protos_equiv( - _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - lower_bounds=_SparseDoubleVectorProto(ids=[0], values=[-7.0]) - ) - ), - tracker.export_update(), - ) - - def test_update_var_lb_same_value(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_lb(x, -1.0) - self.assertIsNone(tracker.export_update()) - - def test_update_var_ub(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_ub(x, 12.5) - self.assert_protos_equiv( - _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - upper_bounds=_SparseDoubleVectorProto(ids=[0], values=[12.5]) - ) - ), - tracker.export_update(), - ) - - def test_update_var_ub_same_value(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_ub(x, 2.5) - self.assertIsNone(tracker.export_update()) - - def test_update_var_integer(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_is_integer(x, False) - self.assert_protos_equiv( - _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - integers=_SparseBoolVectorProto(ids=[0], values=[False]) - ) - ), - tracker.export_update(), - ) - - def test_update_var_integer_same_value(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_is_integer(x, True) - self.assertIsNone(tracker.export_update()) - - def test_update_var_then_delete(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_variable_lb(x, -3.0) - storage.set_variable_ub(x, 5.0) - storage.set_variable_is_integer(x, False) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_variable_ids=[0]), tracker.export_update() - ) - - def test_update_lin_con_lb(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_lb(c, -7.0) - self.assert_protos_equiv( - _ModelUpdateProto( - linear_constraint_updates=_LinearConstraintUpdatesProto( - lower_bounds=_SparseDoubleVectorProto(ids=[0], values=[-7.0]) - ) - ), - tracker.export_update(), - ) - - def test_update_lin_con_lb_same_value(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_lb(c, -1.0) - self.assertIsNone(tracker.export_update()) - - def test_update_lin_con_ub(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_ub(c, 12.5) - self.assert_protos_equiv( - _ModelUpdateProto( - linear_constraint_updates=_LinearConstraintUpdatesProto( - upper_bounds=_SparseDoubleVectorProto(ids=[0], values=[12.5]) - ) - ), - tracker.export_update(), - ) - - def test_update_lin_con_ub_same_value(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_ub(c, 2.5) - self.assertIsNone(tracker.export_update()) - - def test_update_lin_con_then_delete(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_lb(c, -3.0) - storage.set_linear_constraint_ub(c, 5.0) - storage.delete_linear_constraint(c) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_linear_constraint_ids=[0]), - tracker.export_update(), - ) - - def test_new_var(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.add_variable(-1.0, 2.5, True, "x") - expected = _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_modify_new_var(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_variable_lb(x, -4.0) - storage.set_variable_ub(x, 5.0) - storage.set_variable_is_integer(x, False) - expected = _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-4.0], - upper_bounds=[5.0], - integers=[False], - names=["x"], - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_new_var_with_deletes(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(0.0, 1.0, False, "x") - storage.add_variable(-1.0, 2.5, True, "y") - storage.delete_variable(x) - expected = _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[1], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["y"], - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_delete_var_before_first_update(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - tracker.advance_checkpoint() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.add_variable(-2.0, 3.5, True, "y") - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[1], - lower_bounds=[-2.0], - upper_bounds=[3.5], - integers=[True], - names=["y"], - ) - ), - tracker.export_update(), - ) - - def test_new_lin_con(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.add_linear_constraint(-1.0, 2.5, "c") - expected = _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-1.0], upper_bounds=[2.5], names=["c"] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_modify_new_lin_con(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_lb(c, -4.0) - storage.set_linear_constraint_ub(c, 5.0) - expected = _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-4.0], upper_bounds=[5.0], names=["c"] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_new_lin_con_with_deletes(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(0.0, 1.0, "c") - storage.add_linear_constraint(-1.0, 2.5, "d") - storage.delete_linear_constraint(c) - expected = _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[1], lower_bounds=[-1.0], upper_bounds=[2.5], names=["d"] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_delete_lin_con_before_first_update( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - tracker.advance_checkpoint() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.add_linear_constraint(-2.0, 3.5, "d") - storage.delete_linear_constraint(c) - self.assert_protos_equiv( - _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[1], lower_bounds=[-2.0], upper_bounds=[3.5], names=["d"] - ) - ), - tracker.export_update(), - ) - - def test_update_objective_direction(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.set_is_maximize(True) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto(direction_update=True) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_update_objective_direction_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.set_is_maximize(False) - self.assertIsNone(tracker.export_update()) - - def test_update_objective_offset(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.set_objective_offset(5.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto(offset_update=5.0) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_update_objective_offset_same(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - storage.set_objective_offset(0.0) - self.assertIsNone(tracker.export_update()) - - def test_objective_update_existing_zero(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 3.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - linear_coefficients=_SparseDoubleVectorProto(ids=[0], values=[3.0]) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_objective_update_existing_zero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 0.0) - self.assertIsNone(tracker.export_update()) - - def test_objective_update_existing_nonzero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 3.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - linear_coefficients=_SparseDoubleVectorProto(ids=[0], values=[3.0]) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_objective_update_existing_nonzero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 4.0) - self.assertIsNone(tracker.export_update()) - - def test_objective_update_clear(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - - storage.set_linear_objective_coefficient(x, 2.0) - storage.set_linear_objective_coefficient(z, -5.5) - storage.set_objective_offset(1.0) - self.assertEqual(2.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(-5.5, storage.get_linear_objective_coefficient(z)) - self.assertEqual(1.0, storage.get_objective_offset()) - tracker.advance_checkpoint() - w = storage.add_variable(0.0, 1.0, True, "w") - storage.set_linear_objective_coefficient(w, 1.0) - storage.clear_objective() - expected = _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[3], - lower_bounds=[0.0], - upper_bounds=[1.0], - integers=[True], - names=["w"], - ), - objective_updates=_ObjectiveUpdatesProto( - offset_update=0.0, - linear_coefficients=_SparseDoubleVectorProto( - ids=[x, z], values=[0.0, 0.0] - ), - ), - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_objective_update_existing_to_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 0.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - linear_coefficients=_SparseDoubleVectorProto(ids=[0], values=[0.0]) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_objective_update_existing_then_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - tracker.advance_checkpoint() - storage.set_linear_objective_coefficient(x, 2.0) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_variable_ids=[0]), tracker.export_update() - ) - - def test_objective_update_new(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ), - objective_updates=_ObjectiveUpdatesProto( - linear_coefficients=_SparseDoubleVectorProto(ids=[0], values=[4.0]) - ), - ), - tracker.export_update(), - ) - - def test_objective_update_new_zero(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - storage.set_linear_objective_coefficient(x, 0.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ) - ), - tracker.export_update(), - ) - - def test_objective_update_new_then_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_objective_coefficient(x, 4.0) - storage.delete_variable(x) - self.assert_protos_equiv(_ModelUpdateProto(), tracker.export_update()) - - def test_objective_update_old_new_ordering( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - old_handles = [] - for i in range(4): - x = storage.add_variable(-1.0, 2.5, True, f"x_{i}") - storage.set_linear_objective_coefficient(x, i + 1.0) - old_handles.append(x) - tracker.advance_checkpoint() - for i in range(4): - x = storage.add_variable(-1.0, 2.5, True, f"x_{i+4}") - storage.set_linear_objective_coefficient(x, i + 10.0) - for i, h in enumerate(old_handles): - storage.set_linear_objective_coefficient(h, -2.0 * i) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[4, 5, 6, 7], - lower_bounds=[-1.0, -1.0, -1.0, -1.0], - upper_bounds=[2.5, 2.5, 2.5, 2.5], - integers=[True, True, True, True], - names=["x_4", "x_5", "x_6", "x_7"], - ), - objective_updates=_ObjectiveUpdatesProto( - linear_coefficients=_SparseDoubleVectorProto( - ids=[0, 1, 2, 3, 4, 5, 6, 7], - values=[0.0, -2.0, -4.0, -6.0, 10.0, 11.0, 12.0, 13.0], - ) - ), - ), - tracker.export_update(), - ) - - def test_quadratic_objective_update_existing_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 3.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0], column_ids=[1], coefficients=[3.0] - ) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_quadratic_objective_update_existing_zero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 0.0) - self.assertIsNone(tracker.export_update()) - - def test_quadratic_objective_update_existing_nonzero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 3.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0], column_ids=[1], coefficients=[3.0] - ) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_quadratic_objective_update_existing_nonzero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 4.0) - self.assertIsNone(tracker.export_update()) - - def test_quadratic_objective_update_clear( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(0.0, 1.0, False, "y") - z = storage.add_variable(0.0, 1.0, True, "z") - - storage.set_linear_objective_coefficient(x, 2.0) - storage.set_linear_objective_coefficient(z, -5.5) - storage.set_quadratic_objective_coefficient(x, y, 4.0) - storage.set_objective_offset(1.0) - self.assertEqual(2.0, storage.get_linear_objective_coefficient(x)) - self.assertEqual(0.0, storage.get_linear_objective_coefficient(y)) - self.assertEqual(-5.5, storage.get_linear_objective_coefficient(z)) - self.assertEqual(0.0, storage.get_quadratic_objective_coefficient(x, x)) - self.assertEqual(4.0, storage.get_quadratic_objective_coefficient(x, y)) - self.assertEqual(1.0, storage.get_objective_offset()) - tracker.advance_checkpoint() - w = storage.add_variable(0.0, 1.0, True, "w") - storage.set_linear_objective_coefficient(w, 1.0) - storage.set_quadratic_objective_coefficient(w, w, 2.0) - storage.clear_objective() - expected = _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[3], - lower_bounds=[0.0], - upper_bounds=[1.0], - integers=[True], - names=["w"], - ), - objective_updates=_ObjectiveUpdatesProto( - offset_update=0.0, - linear_coefficients=_SparseDoubleVectorProto( - ids=[x, z], values=[0.0, 0.0] - ), - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[x], column_ids=[y], coefficients=[0.0] - ), - ), - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_quadratic_objective_update_existing_to_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 0.0) - expected = _ModelUpdateProto( - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[x], column_ids=[y], coefficients=[0.0] - ) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_quadratic_objective_update_existing_then_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - tracker.advance_checkpoint() - storage.set_quadratic_objective_coefficient(x, y, 2.0) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_variable_ids=[0]), tracker.export_update() - ) - - def test_quadratic_objective_update_new(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_quadratic_objective_coefficient(x, x, 4.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ), - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[x], column_ids=[x], coefficients=[4.0] - ) - ), - ), - tracker.export_update(), - ) - - def test_quadratic_objective_update_new_old_deleted( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - old_var1 = storage.add_variable(-1.0, 2.5, True, "old1") - old_var2 = storage.add_variable(-1.0, 2.5, True, "old2") - deleted_var1 = storage.add_variable(-1.0, 2.5, True, "deleted1") - deleted_var2 = storage.add_variable(-1.0, 2.5, True, "deleted2") - tracker.advance_checkpoint() - new_var1 = storage.add_variable(0.0, 1.0, True, "new1") - new_var2 = storage.add_variable(0.0, 1.0, True, "new2") - storage.set_quadratic_objective_coefficient(old_var1, old_var1, 1.0) - storage.set_quadratic_objective_coefficient(old_var1, old_var2, 2.0) - storage.set_quadratic_objective_coefficient(old_var1, new_var1, 3.0) - storage.set_quadratic_objective_coefficient(new_var1, new_var1, 4.0) - storage.set_quadratic_objective_coefficient(new_var1, new_var2, 5.0) - storage.set_quadratic_objective_coefficient(deleted_var1, deleted_var1, 6.0) - storage.set_quadratic_objective_coefficient(deleted_var1, deleted_var2, 7.0) - storage.set_quadratic_objective_coefficient(deleted_var1, old_var1, 8.0) - storage.set_quadratic_objective_coefficient(deleted_var1, new_var1, 9.0) - storage.delete_variable(deleted_var1) - storage.delete_variable(deleted_var2) - self.assert_protos_equiv( - _ModelUpdateProto( - deleted_variable_ids=[deleted_var1, deleted_var2], - new_variables=_VariablesProto( - ids=[new_var1, new_var2], - lower_bounds=[0.0, 0.0], - upper_bounds=[1.0, 1.0], - integers=[True, True], - names=["new1", "new2"], - ), - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[old_var1, old_var1, old_var1, new_var1, new_var1], - column_ids=[ - old_var1, - old_var2, - new_var1, - new_var1, - new_var2, - ], - coefficients=[1.0, 2.0, 3.0, 4.0, 5.0], - ) - ), - ), - tracker.export_update(), - ) - - def test_quadratic_objective_update_new_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - storage.set_quadratic_objective_coefficient(x, y, 0.0) - storage.set_linear_objective_coefficient(x, 0.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0, 1], - lower_bounds=[-1.0, -1.0], - upper_bounds=[2.5, 2.5], - integers=[True, True], - names=["x", "y"], - ) - ), - tracker.export_update(), - ) - - def test_quadratic_objective_update_new_then_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - y = storage.add_variable(-1.0, 2.5, True, "y") - storage.set_quadratic_objective_coefficient(x, y, 4.0) - storage.delete_variable(x) - storage.delete_variable(y) - self.assert_protos_equiv(_ModelUpdateProto(), tracker.export_update()) - - def test_quadratic_objective_update_old_new_ordering( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - old_handles = [] - for i in range(4): - x = storage.add_variable(-1.0, 2.5, True, f"x_{i}") - old_handles.append(x) - for i in range(3): - storage.set_quadratic_objective_coefficient( - old_handles[i], old_handles[i + 1], i + 1 - ) - tracker.advance_checkpoint() - new_handles = [] - for i in range(4): - x = storage.add_variable(-1.0, 2.5, True, f"x_{i+4}") - new_handles.append(x) - for i in range(3): - storage.set_quadratic_objective_coefficient( - new_handles[i], new_handles[i + 1], i + 10 - ) - for i in range(3): - storage.set_quadratic_objective_coefficient( - old_handles[i], old_handles[i + 1], -2.0 * i - ) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[4, 5, 6, 7], - lower_bounds=[-1.0, -1.0, -1.0, -1.0], - upper_bounds=[2.5, 2.5, 2.5, 2.5], - integers=[True, True, True, True], - names=["x_4", "x_5", "x_6", "x_7"], - ), - objective_updates=_ObjectiveUpdatesProto( - quadratic_coefficients=sparse_containers_pb2.SparseDoubleMatrixProto( - row_ids=[0, 1, 2, 4, 5, 6], - column_ids=[1, 2, 3, 5, 6, 7], - coefficients=[0, -2.0, -4.0, 10, 11, 12], - ) - ), - ), - tracker.export_update(), - ) - - def test_update_lin_con_mat_existing_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 3.0) - expected = _ModelUpdateProto( - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[3.0] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_update_lin_con_mat_existing_zero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 0.0) - self.assertIsNone(tracker.export_update()) - - def test_lin_con_mat_update_existing_nonzero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 3.0) - expected = _ModelUpdateProto( - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[3.0] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_lin_con_mat_update_existing_nonzero_same( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 1.0) - self.assertIsNone(tracker.export_update()) - - def test_lin_con_mat_update_existing_to_zero( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 0.0) - expected = _ModelUpdateProto( - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[0.0] - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - - def test_lin_con_mat_update_existing_then_delete_var( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 6.0) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_variable_ids=[0]), tracker.export_update() - ) - - def test_lin_con_mat_update_existing_then_delete_lin_con( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 6.0) - storage.delete_linear_constraint(c) - self.assert_protos_equiv( - _ModelUpdateProto(deleted_linear_constraint_ids=[0]), - tracker.export_update(), - ) - - def test_lin_con_mat_update_existing_then_delete_both( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 1.0) - tracker.advance_checkpoint() - storage.set_linear_constraint_coefficient(c, x, 6.0) - storage.delete_linear_constraint(c) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto( - deleted_variable_ids=[0], deleted_linear_constraint_ids=[0] - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_new_var(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - tracker.advance_checkpoint() - x = storage.add_variable(-1.0, 2.5, True, "x") - storage.set_linear_constraint_coefficient(c, x, 4.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ), - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[4.0] - ), - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_new_lin_con(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 4.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-1.0], upper_bounds=[2.5], names=["c"] - ), - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[4.0] - ), - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_new_both(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 4.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[0], - lower_bounds=[-1.0], - upper_bounds=[2.5], - integers=[True], - names=["x"], - ), - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-1.0], upper_bounds=[2.5], names=["c"] - ), - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0], column_ids=[0], coefficients=[4.0] - ), - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_new_zero(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 4.0) - storage.set_linear_constraint_coefficient(c, x, 0.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-1.0], upper_bounds=[2.5], names=["c"] - ) - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_new_then_delete( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - x = storage.add_variable(-1.0, 2.5, True, "x") - tracker.advance_checkpoint() - c = storage.add_linear_constraint(-1.0, 2.5, "c") - storage.set_linear_constraint_coefficient(c, x, 4.0) - storage.delete_variable(x) - self.assert_protos_equiv( - _ModelUpdateProto( - deleted_variable_ids=[0], - new_linear_constraints=_LinearConstraintsProto( - ids=[0], lower_bounds=[-1.0], upper_bounds=[2.5], names=["c"] - ), - ), - tracker.export_update(), - ) - - def test_lin_con_mat_update_old_new_ordering( - self, storage_class: _StorageClass - ) -> None: - storage = storage_class("test_model") - tracker = storage.add_update_tracker() - var_handles = [storage.add_variable(0.0, 1.0, True, "") for _ in range(2)] - lin_con_handles = [ - storage.add_linear_constraint(0.0, 1.0, "") for _ in range(2) - ] - for v in var_handles: - for l in lin_con_handles: - storage.set_linear_constraint_coefficient(l, v, 1.0) - tracker.advance_checkpoint() - x = storage.add_variable(0.0, 1.0, True, "x") - c = storage.add_linear_constraint(0.0, 1.0, "c") - storage.set_linear_constraint_coefficient( - lin_con_handles[0], var_handles[0], 5.0 - ) - storage.set_linear_constraint_coefficient(lin_con_handles[0], x, 4.0) - storage.set_linear_constraint_coefficient(c, var_handles[1], 3.0) - storage.set_linear_constraint_coefficient(c, x, 2.0) - self.assert_protos_equiv( - _ModelUpdateProto( - new_variables=_VariablesProto( - ids=[2], - lower_bounds=[0.0], - upper_bounds=[1.0], - integers=[True], - names=["x"], - ), - new_linear_constraints=_LinearConstraintsProto( - ids=[2], lower_bounds=[0.0], upper_bounds=[1.0], names=["c"] - ), - linear_constraint_matrix_updates=_SparseDoubleMatrixProto( - row_ids=[0, 0, 2, 2], - column_ids=[0, 2, 1, 2], - coefficients=[5.0, 4.0, 3.0, 2.0], - ), - ), - tracker.export_update(), - ) - - def test_remove_update_tracker(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - x = storage.add_variable(0.0, 1.0, True, "x") - tracker = storage.add_update_tracker() - storage.set_variable_ub(x, 7.0) - expected = _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - upper_bounds=_SparseDoubleVectorProto(ids=[0], values=[7.0]) - ) - ) - self.assert_protos_equiv(expected, tracker.export_update()) - storage.remove_update_tracker(tracker) - with self.assertRaises(model_storage.UsedUpdateTrackerAfterRemovalError): - tracker.export_update() - with self.assertRaises(model_storage.UsedUpdateTrackerAfterRemovalError): - tracker.advance_checkpoint() - with self.assertRaises(KeyError): - storage.remove_update_tracker(tracker) - - def test_remove_update_tracker_wrong_model( - self, storage_class: _StorageClass - ) -> None: - storage1 = storage_class("test_model1") - storage2 = storage_class("test_model2") - tracker1 = storage1.add_update_tracker() - with self.assertRaises(KeyError): - storage2.remove_update_tracker(tracker1) - - def test_multiple_update_tracker(self, storage_class: _StorageClass) -> None: - storage = storage_class("test_model") - x = storage.add_variable(0.0, 1.0, True, "x") - y = storage.add_variable(0.0, 1.0, True, "y") - tracker1 = storage.add_update_tracker() - storage.set_variable_ub(x, 7.0) - tracker2 = storage.add_update_tracker() - storage.set_variable_ub(y, 3.0) - self.assert_protos_equiv( - _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - upper_bounds=_SparseDoubleVectorProto(ids=[0, 1], values=[7.0, 3.0]) - ) - ), - tracker1.export_update(), - ) - self.assert_protos_equiv( - _ModelUpdateProto( - variable_updates=_VariableUpdatesProto( - upper_bounds=_SparseDoubleVectorProto(ids=[1], values=[3.0]) - ) - ), - tracker2.export_update(), - ) - - -if __name__ == "__main__": - absltest.main() diff --git a/ortools/routing/parsers/solomon_parser_test.cc b/ortools/routing/parsers/solomon_parser_test.cc index 0791d364e1..42070a6b04 100644 --- a/ortools/routing/parsers/solomon_parser_test.cc +++ b/ortools/routing/parsers/solomon_parser_test.cc @@ -24,7 +24,7 @@ #define ROOT_DIR "_main/" ABSL_FLAG(std::string, solomon_test_archive, - "ortools/bench/solomon/" + "ortools/routing/benchmarks/solomon/" "testdata/solomon.zip", "Solomon: testing archive"); ABSL_FLAG(std::string, solomon_test_instance, "google2.txt", diff --git a/ortools/routing/samples/cvrp_disjoint_tw.cc b/ortools/routing/samples/cvrp_disjoint_tw.cc index 18e51bd1ec..b23e515acc 100644 --- a/ortools/routing/samples/cvrp_disjoint_tw.cc +++ b/ortools/routing/samples/cvrp_disjoint_tw.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Capacitated Vehicle Routing Problem with Disjoint Time Windows (and optional // orders). // A description of the problem can be found here: diff --git a/ortools/routing/samples/cvrptw.cc b/ortools/routing/samples/cvrptw.cc index b1ab211b2e..16aa4f01c7 100644 --- a/ortools/routing/samples/cvrptw.cc +++ b/ortools/routing/samples/cvrptw.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Capacitated Vehicle Routing Problem with Time Windows (and optional orders). // A description of the problem can be found here: // http://en.wikipedia.org/wiki/Vehicle_routing_problem. diff --git a/ortools/routing/samples/cvrptw_with_breaks.cc b/ortools/routing/samples/cvrptw_with_breaks.cc index 791be7a200..5ee85a3129 100644 --- a/ortools/routing/samples/cvrptw_with_breaks.cc +++ b/ortools/routing/samples/cvrptw_with_breaks.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Capacitated Vehicle Routing Problem with Time Windows and Breaks. // A description of the Capacitated Vehicle Routing Problem with Time Windows // can be found here: diff --git a/ortools/routing/samples/cvrptw_with_precedences.cc b/ortools/routing/samples/cvrptw_with_precedences.cc index bbf3f40d56..f001b217f8 100644 --- a/ortools/routing/samples/cvrptw_with_precedences.cc +++ b/ortools/routing/samples/cvrptw_with_precedences.cc @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Capacitated Vehicle Routing Problem with Time Windows (and optional orders). // A description of the problem can be found here: // http://en.wikipedia.org/wiki/Vehicle_routing_problem. diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index a67fc0cd6f..7c4a6e67e8 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1500,6 +1500,7 @@ cc_library( "//ortools/util:bitset", "//ortools/util:integer_pq", "//ortools/util:strong_integers", + "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/types:span", ], @@ -4279,6 +4280,7 @@ cc_library( "//ortools/util:running_stat", "//ortools/util:strong_integers", "//ortools/util:time_limit", + "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/base:core_headers", "@abseil-cpp//absl/container:flat_hash_map", "@abseil-cpp//absl/container:flat_hash_set", diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index 3e22dabbf5..e64e37ae16 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -863,6 +863,10 @@ void RegisterLinear2BoundsImport(SharedLinear2Bounds* shared_linear2_bounds, for (const auto& [proto_expr, bounds] : new_bounds) { // Lets create the corresponding LinearExpression2. LinearExpression2 expr; + if (!cp_model_mapping->IsInteger(proto_expr.vars[0]) || + !cp_model_mapping->IsInteger(proto_expr.vars[1])) { + continue; + } for (const int i : {0, 1}) { expr.vars[i] = cp_model_mapping->Integer(proto_expr.vars[i]); expr.coeffs[i] = proto_expr.coeffs[i]; diff --git a/ortools/sat/sat_solver.cc b/ortools/sat/sat_solver.cc index ef554832d4..c27cc650f5 100644 --- a/ortools/sat/sat_solver.cc +++ b/ortools/sat/sat_solver.cc @@ -1013,9 +1013,9 @@ SatSolver::Status SatSolver::EnqueueDecisionAndBacktrackOnConflict( bool SatSolver::EnqueueDecisionIfNotConflicting(Literal true_literal) { SCOPED_TIME_STAT(&stats_); + if (model_is_unsat_) return kUnsatTrailIndex; DCHECK(PropagationIsDone()); - if (model_is_unsat_) return kUnsatTrailIndex; const int current_level = CurrentDecisionLevel(); EnqueueNewDecision(true_literal); if (Propagate()) { diff --git a/ortools/sat/synchronization.cc b/ortools/sat/synchronization.cc index 18f37e7cfb..f09a824c4d 100644 --- a/ortools/sat/synchronization.cc +++ b/ortools/sat/synchronization.cc @@ -1559,7 +1559,7 @@ void SharedClausesManager::Synchronize() { void SharedLinear2Bounds::Add(int id, Key expr, IntegerValue lb, IntegerValue ub) { - DCHECK(expr.IsCanonicalized()); + DCHECK(expr.IsCanonicalized()) << expr; absl::MutexLock mutex_lock(&mutex_); auto [it, inserted] = shared_bounds_.insert({expr, {lb, ub}}); diff --git a/ortools/sat/synchronization.h b/ortools/sat/synchronization.h index a9cd377fdb..83085ff27e 100644 --- a/ortools/sat/synchronization.h +++ b/ortools/sat/synchronization.h @@ -913,7 +913,7 @@ class SharedLinear2Bounds { IntegerValue coeffs[2]; bool IsCanonicalized() { - return coeffs[0] > 0 && coeffs[1] != 0 && vars[0] < vars[1] && + return vars[0] >= 0 && vars[1] >= 0 && vars[0] < vars[1] && std::gcd(coeffs[0].value(), coeffs[1].value()) == 1; } @@ -927,6 +927,12 @@ class SharedLinear2Bounds { return H::combine(std::move(h), k.vars[0], k.vars[1], k.coeffs[0], k.coeffs[1]); } + + template + friend void AbslStringify(Sink& sink, const Key& k) { + absl::Format(&sink, "%d X%d + %d X%d", k.coeffs[0].value(), k.vars[0], + k.coeffs[1].value(), k.vars[1]); + } }; // Exports new bounds on the given expr (should be canonicalized). From 7088dabd1e274a137e6c373120ebc63ca768bea2 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 1 Jul 2025 16:52:06 +0200 Subject: [PATCH 139/509] new example --- examples/python/BUILD.bazel | 2 + .../python/car_sequencing_optimization_sat.py | 271 ++++++++++++++++++ 2 files changed, 273 insertions(+) create mode 100644 examples/python/car_sequencing_optimization_sat.py diff --git a/examples/python/BUILD.bazel b/examples/python/BUILD.bazel index b1989553a9..632e68bbb6 100644 --- a/examples/python/BUILD.bazel +++ b/examples/python/BUILD.bazel @@ -23,6 +23,8 @@ code_sample_py("balance_group_sat") code_sample_py("bus_driver_scheduling_sat") +code_sample_py("car_sequencing_optimization_sat") + code_sample_py("chemical_balance_sat") code_sample_py("clustering_sat") diff --git a/examples/python/car_sequencing_optimization_sat.py b/examples/python/car_sequencing_optimization_sat.py new file mode 100644 index 0000000000..81e030d7fc --- /dev/null +++ b/examples/python/car_sequencing_optimization_sat.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python3 +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Solve the car sequencing problem as an optimization problem. + +Problem Description: The Car Sequencing Problem with Optimization +----------------------------------------------------------------- + +See https://en.wikipedia.org/wiki/Car_sequencing_problem for more details. + +We are tasked with determining the optimal production sequence for a set of cars +on an assembly line. This is a classic and challenging combinatorial +optimization problem with the following characteristics: + +Fixed Production Demand: There is a specific, non-negotiable number of cars of +different types (or 'classes') that must be produced. In our case, we have 6 +distinct classes of cars, and we must produce exactly 5 of each, for a total of +30 'real' cars. + +Diverse Car Configurations: Each car class is defined by a unique combination of +optional features. For example, 'Class 1' might require a sunroof (Option 1) and +a special engine (Option 4), while 'Class 3' only requires air conditioning +(Option 2). + +Specialized Assembly Stations: The assembly line is composed of a series of +specialized stations. Each station is responsible for installing one specific +option. For example, there is one station for sunroofs, one for special engines, +and so on. + +Capacity-Limited Stations: The core challenge of the problem lies here. The +stations cannot handle an unlimited, dense flow of cars requiring their specific +option. Their capacity is defined by a 'sliding window' constraint. For example, +the sunroof station might have a constraint of 'at most 1 car with a sunroof in +any sequence of 3 consecutive cars'. This means sequences like [Sunroof, No, No, +Sunroof] are valid, but [Sunroof, No, Sunroof, No] are not. + +The Need for Spacing (Optimization): The combination of high demand for certain +options and tight capacity constraints may make it impossible to produce the 30 +real cars consecutively. To create a valid sequence, we may need to insert +'dummy' or 'filler' cars into the production line. These dummy cars have no +options and therefore do not consume capacity at any station. They serve purely +as spacers to break up dense sequences of option-heavy cars. + +The Goal: The objective is to find a production sequence that fulfills the +demand for all 30 real cars while using the minimum number of dummy cars. This +is equivalent to finding the shortest possible total production schedule (real +cars + dummy cars). + +Modeling and Solution Approach with CP-SAT +------------------------------------------ + +To solve this problem, we use the CP-SAT solver from Google's OR-Tools library. +This is a constraint programming approach, which works by defining variables, +constraints, and an objective function. + +1. Decision Variables +The fundamental decision the solver must make is: 'Which class of car should be +placed in each production slot?' +We define a large number of boolean variables: produces[c][s]. This variable is +True if a car of class c is scheduled in slot s, and False otherwise. We create +these for all car classes (including the dummy class) and for an extended number +of slots (30 real + a buffer of 20 for dummies). +We introduce a key integer variable: makespan. This variable represents the +total length of the 'meaningful' part of our schedule. It's the slot number +where the first dummy car appears, after which all subsequent cars are also +dummies. + +2. Constraints (The Rules of the Game) +We translate the problem's rules into mathematical constraints that the solver +must obey: + +One Car Per Slot: For every production slot s, exactly one car class can be +assigned. We enforce this using an AddExactlyOne constraint over all +produces[c][s] variables for that slot. + +Fulfill Real Car Demand: The total number of times each real car class c appears +across all slots must equal its required demand (5 in our case). This is a +simple Add(sum(...) == 5) constraint. + +Station Capacity (Sliding Window): This is the most critical constraint. For +each option (e.g., 'sunroof') and its capacity rule (e.g., '1 in 3'), we create +constraints for every possible sliding window. For every subsequence of 3 slots, +we sum up the produces variables corresponding to car classes that require that +option and constrain this sum to be less than or equal to 1. + +Makespan Definition: This is the clever part of the model. We link our makespan +objective variable to the placement of dummy cars using logical equivalences for +each slot s: +(makespan <= s) is equivalent to (slot s contains a dummy car) +This ensures that if the solver chooses a makespan of 32, for example, it is +forced to place dummy cars in slots 32, 33, 34, and so on. Conversely, if the +solver is forced to place a dummy car in slot 32 to satisfy a capacity +constraint, the makespan must be at most 32. + +3. The Objective Function + +The objective is simple and directly tied to our goal: + +Minimize makespan: By instructing the solver to find a solution with the +smallest possible value for the makespan variable, we are asking it to find the +shortest possible production schedule that satisfies all the rules. This +inherently minimizes the number of dummy cars used. + +By defining the problem in this way, we let the CP-SAT solver explore the vast +search space of possible sequences efficiently, using its powerful constraint +propagation and search techniques to find an optimal arrangement that meets all +our complex requirements. +""" + +from collections.abc import Sequence + +from absl import app + +from ortools.sat.python import cp_model + + +def solve_car_sequencing_optimization() -> None: + """Solves the car sequencing problem with an optimization approach.""" + + # -------------------- + # 1. Data + # -------------------- + num_real_cars: int = 30 + max_dummy_cars: int = 20 + num_slots = num_real_cars + max_dummy_cars + all_slots = range(num_slots) + + class_options = [ + # Options: 1 2 3 4 5 + [0, 0, 0, 0, 0], # Class 0 (Dummy) + [1, 0, 0, 1, 0], # Class 1 + [0, 1, 0, 0, 1], # Class 2 + [0, 1, 0, 0, 0], # Class 3 + [0, 0, 1, 1, 0], # Class 4 + [0, 0, 1, 0, 0], # Class 5 + [0, 0, 0, 0, 1], # Class 6 + ] + num_classes = len(class_options) + all_classes = range(num_classes) + real_classes = range(1, num_classes) + dummy_class = 0 + + demands = [5, 5, 5, 5, 5, 5] + + capacity_constraints = [(1, 3), (1, 2), (1, 3), (2, 5), (1, 5)] + num_options = len(capacity_constraints) + all_options = range(num_options) + + classes_with_option = [ + [c for c in real_classes if class_options[c][o] == 1] for o in all_options + ] + + # -------------------- + # 2. Model Creation + # -------------------- + model = cp_model.CpModel() + + # -------------------- + # 3. Decision Variables + # -------------------- + produces = {} + for c in all_classes: + for s in all_slots: + produces[(c, s)] = model.new_bool_var(f"produces_c{c}_s{s}") + + makespan = model.new_int_var(num_real_cars, num_slots, "makespan") + + # -------------------- + # 4. Constraints + # -------------------- + + # Constraint 1: Only one car produced per slot. + for s in all_slots: + model.add_exactly_one([produces[(c, s)] for c in all_classes]) + + # Constraint 2: Meet the demand of real cars. + for i, c in enumerate(real_classes): + model.add(sum(produces[(c, s)] for s in all_slots) == demands[i]) + + # Constraint 3: Enforce the capacity constraints on options. + for o in all_options: + max_cars, subsequence_len = capacity_constraints[o] + for start in range(num_slots - subsequence_len + 1): + window = range(start, start + subsequence_len) + cars_with_option_in_window = [] + for c in classes_with_option[o]: + for s in window: + cars_with_option_in_window.append(produces[(c, s)]) + model.add(sum(cars_with_option_in_window) <= max_cars) + + # Constraint 4 (Link objective and dummy cars at the end of the schedule) + for s in all_slots: + makespan_le_s = model.new_bool_var(f"makespan_le_{s}") + + # Enforce makespan_le_s <=> (makespan <= s) + model.add(makespan <= s).only_enforce_if(makespan_le_s) + # Use ~ for negation + model.add(makespan > s).only_enforce_if(~makespan_le_s) + + # Enforce makespan_le_s => produces[dummy_class, s] + model.add_implication(makespan_le_s, produces[dummy_class, s]) + + # -------------------- + # 5. Objective + # -------------------- + model.minimize(makespan) + + # -------------------- + # 6. Solve and Print Solution + # -------------------- + solver = cp_model.CpSolver() + solver.parameters.max_time_in_seconds = 30.0 + solver.parameters.num_search_workers = 1 # The problem is easy to solve. + # solver.parameters.log_search_progress = True # uncomment to see the log. + + status = solver.Solve(model) + + if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE: + final_makespan = int(solver.ObjectiveValue()) + num_dummies_needed = final_makespan - num_real_cars + + print( + f'\n{"Optimal" if status == cp_model.OPTIMAL else "Feasible"}' + f" solution found with a makespan of {final_makespan}." + ) + print( + f"This requires the conceptual equivalent of {num_dummies_needed} dummy" + " car(s) to be used as spacers." + ) + + sequence = [-1] * num_slots + for s in all_slots: + for c in all_classes: + if solver.Value(produces[(c, s)]) == 1: + sequence[s] = c + break + + print("\nFull Production Sequence (Class 0 is dummy):") + print("Slot: | " + " | ".join(f"{i:2}" for i in range(num_slots)) + " |") + print("-------|-" + "--|-" * num_slots) + print("Class: | " + " | ".join(f"{c:2}" for c in sequence) + " |") + + elif status == cp_model.INFEASIBLE: + print("\nNo solution found.") + + else: + print(f"\nSomething went wrong. Solver status: {status}") + + print("\nSolver statistics:") + print(solver.response_stats()) + + +def main(argv: Sequence[str]) -> None: + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + solve_car_sequencing_optimization() + + +if __name__ == "__main__": + app.run(main) From 3b63125457261a91381e96654e4407dd251b7f17 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 1 Jul 2025 16:52:19 +0200 Subject: [PATCH 140/509] update code --- ortools/util/fp_utils.h | 5 +++++ ortools/util/permutation.h | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index a5e6cec283..33bf542edd 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -253,6 +253,11 @@ inline FloatType Interpolate(FloatType x, FloatType y, FloatType alpha) { return alpha * x + (1 - alpha) * y; } +inline int fast_ilogb(double value) { return ilogb(value); } +inline double fast_scalbn(double value, int exponent) { + return scalbn(value, exponent); +} + } // namespace operations_research #endif // OR_TOOLS_UTIL_FP_UTILS_H_ diff --git a/ortools/util/permutation.h b/ortools/util/permutation.h index e44906b324..f8cab1b074 100644 --- a/ortools/util/permutation.h +++ b/ortools/util/permutation.h @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// // Classes for permuting indexable, ordered containers of data without // depending on that data to be accessible in any particular way. The // client needs to give us two things: From 6c09169099feecb5aa9ccc2de7dc131863ffacae Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 1 Jul 2025 17:07:29 +0200 Subject: [PATCH 141/509] missing files --- ortools/math_opt/labs/scaler_util.cc | 362 +++++++++++++++++++++++++++ ortools/math_opt/labs/scaler_util.h | 173 +++++++++++++ 2 files changed, 535 insertions(+) create mode 100644 ortools/math_opt/labs/scaler_util.cc create mode 100644 ortools/math_opt/labs/scaler_util.h diff --git a/ortools/math_opt/labs/scaler_util.cc b/ortools/math_opt/labs/scaler_util.cc new file mode 100644 index 0000000000..84c8afa264 --- /dev/null +++ b/ortools/math_opt/labs/scaler_util.cc @@ -0,0 +1,362 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/math_opt/labs/scaler_util.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "absl/strings/str_format.h" +#include "ortools/base/logging.h" +#include "ortools/base/types.h" +#include "ortools/util/fp_utils.h" + +// This file provides an implementation of the ideas exposed in +// go/mpsolver-scaling. The rationale of why scaling is important in the +// context of mathematical programming, the limits imposed by the common +// solver implementations, and the algorithmic ideas are explored there. + +namespace operations_research { + +std::string AutoShiftingBitmask65::DebugString() const { + return absl::StrFormat("msb: %3d mantissa: 0x%016lx", msb_, mask_); +} + +// The computation of bit_diff relies on the hardware using two's complement +// representation for negative numbers. Test that the corner case holds. +// Note that according to the C++ standard, static_cast behaves as if two's +// complement +static_assert(static_cast( + static_cast(std::numeric_limits::max()) - + static_cast(std::numeric_limits::min())) == + static_cast(std::numeric_limits::max()) - + static_cast(std::numeric_limits::min())); +void AutoShiftingBitmask65::SetBit(int bit) { + // Update mask_ if the distance between msb_ and bit is up to 65 bit + // positions. + if (bit < msb_) { + // The correctness of this operation relies on the machine using two's + // complement representation for negative numbers. We test that in the + // preceding static_assert. + const uint32_t bit_diff = + static_cast(msb_) - static_cast(bit); + if (bit_diff <= 64) mask_ |= uint64_t{1} << (64 - bit_diff); + return; + } + // Nothing to do, already set. + if (bit == msb_) return; + // Set new msb_. + const uint32_t bit_diff = + static_cast(bit) - static_cast(msb_); + msb_ = bit; + // If the bits are too far apart, it is equivalent to set the mask to zero + // and return. Also catch extreme case where resulting mask_ is 1. + if (bit_diff >= 64) { + mask_ = bit_diff == 64 ? 1 : 0; + return; + } + // Regular case: shift mask to adjust to new maximum. + mask_ = mask_ >> bit_diff; + mask_ |= uint64_t{1} << (64 - bit_diff); // New position of former msb_. +} + +RowScalingRange::RowScalingRange(const int min_log2_value, + const int max_log2_value) + : min_log2_value_(min_log2_value), max_log2_value_(max_log2_value) { + // 2^0=1 should always be a valid coefficient. + CHECK_LE(min_log2_value, 0); + CHECK_GE(max_log2_value, 0); +} + +std::string RowScalingRange::DebugString() const { + return absl::StrFormat("coeff: {%s} bounds: {%s}", + coefficients_.DebugString(), bounds_.DebugString()); +} + +void RowScalingRange::UpdateWithBounds(const double value, + const double lower_bound, + const double upper_bound) { + DCHECK_LT(std::abs(value), kScalerInfinity); + coefficients_.SetBit(fast_ilogb(value)); + if (std::abs(lower_bound) <= kScalerInfinity) { + // Our choice of kScalerInfinity ensures this quantity does not overflow. + bounds_.SetBit(fast_ilogb(value * lower_bound)); + } + if (std::abs(upper_bound) <= kScalerInfinity) { + bounds_.SetBit(fast_ilogb(value * upper_bound)); + } +} + +void RowScalingRange::Update(const double value) { + if (std::abs(value) >= kScalerInfinity) return; + const int value_ilogb = fast_ilogb(value); + coefficients_.SetBit(value_ilogb); + bounds_.SetBit(value_ilogb); +} + +namespace internal { + +// Note that max_exponent is stored separately in the AutoShiftingBitmask65 +// structure, so we are keeping 65 bits of information. To retain only the most +// significant kRelZeroLog2 bits, we need to set to zero all initial bits in the +// exponent mask. We do this by masking these bits. We also ensure that we don't +// consider bits under the absolute zero tolerance. +// +// We ignore all bits that are either below -kScalerInfinityLog2 (absolute zero) +// or below range.GetMsb() + kRelZeroLog2. So we precompute the number of bits +// that this will discard in our 65-bit mantissa. +int ComputeMinimumNonIgnoredBit(const AutoShiftingBitmask65& range) { + // We need some fractional bits to be considered as 'reliable'. + static_assert(kRelZeroLog2 < 0); + const int num_discarded_bits = + 65 + std::max(kRelZeroLog2, -(range.GetMsb() + kScalerInfinityLog2)); + if (num_discarded_bits >= 64) { + // Two special cases (regrouped to speed up the common case): + // 1) If we need to discard the entire mantissa + the MSB, we're below the + // absolute zero: return 0, i.e. there are no bits at all. + if (num_discarded_bits >= 65) return 0; + // 2) We need to discard the 64-bit mantissa but not the MSB. + return range.GetMsb(); + } + // We remove the ignored bits from the mantissa and compute the LSB. + const int least_significant_bit = + ffsll(range.GetMask() & (kuint64max << num_discarded_bits)); + // If no other bit was set, then the only exponent seen was the + // max_exponent. + return range.GetMsb() + + (least_significant_bit == 0 ? 0 : least_significant_bit - 65); +} + +} // namespace internal + +// In this function we want to see if a sequence of numbers and products (whose +// information is already summarized in the range.coefficients and +// range.bounds) needs to be re-scaled. We do this with some caveats: +// +// First, if the maximum magnitude is under our absolute zero tolerance, +// we do not perform any scaling, as the recommended way to deal with these +// coefficients is to disregard them (i.e. treat them as true zero values). +// +// Second, given that we use the concept of relative zero magnitudes (which +// should be treated as zero), we truncate the information in the ranges +// to consider up to kRelZeroLog2 bits. +// +// With these modifications, we compute the largest and smallest exponents +// seen. +// - If both ranges are within [min_log2_value_, max_log2_value_], don't scale. +// - Otherwise, first try to maintain or shift coefficients such that +// range.coefficients is within [min_log2_value_, max_log2_value_], If the +// coefficient range is larger, we snap its upper bound to max_log2_value_ +// (and its lower bound will be below min_log2_value_). +// - If after the previous shift, there is still room to improve on the +// range.bounds (i.e. if range.coefficients is within [min_log2_value_, +// max_log2_value_], first try to maintain or shift coefficients such that +// range.bounds is within [min_log2_value_, max_log2_value_], If the +// bounds range is larger, we snap its upper bound to max_log2_value_ (and +// its lower bound will be below min_log2_value_), while at the same time +// ensuring that the resulting range.coefficients will still be within +// [min_log2_value_, max_log2_value_]. +int RowScalingRange::GetLog2Scale( + const OverflowHandlingMode overflow_handling_mode) const { + const int max_coefficient_bit = coefficients_.GetMsb(); + const int max_overall_bit = std::max(bounds_.GetMsb(), max_coefficient_bit); + // If max_overall_bit is under our absolute zero tolerance, do not scale. + if (max_overall_bit <= -kScalerInfinityLog2) return 0; + + const int min_coefficient_bit = + internal::ComputeMinimumNonIgnoredBit(coefficients_); + const int min_overall_bit = + std::max(-std::numeric_limits::max(), + std::min(internal::ComputeMinimumNonIgnoredBit(bounds_), + min_coefficient_bit)); + if (max_overall_bit <= max_log2_value_ && + min_overall_bit >= min_log2_value_) { + return 0; + } + return CorrectLog2Scale( + GetUncorrectedLog2Scale( + /*bit_range=*/{.min = min_coefficient_bit, + .max = max_coefficient_bit}, + overflow_handling_mode), + /*coefficient_bit_range=*/ + {.min = min_coefficient_bit, .max = max_coefficient_bit}, + /*overall_bit_range=*/{.min = min_overall_bit, .max = max_overall_bit}); +} + +int RowScalingRange::GetUncorrectedLog2Scale( + const Log2BitRange bit_range, + const OverflowHandlingMode overflow_handling_mode) const { + if (bit_range.max - bit_range.min <= max_log2_value_ - min_log2_value_) { + // If the coefficient range fits within the range of desired value, there is + // no overflow, and then there is not difference between the two modes of + // overflow handling. + const int log2_scale = [&]() { + if (bit_range.max > max_log2_value_) { + return max_log2_value_ - bit_range.max; + } else if (bit_range.min < min_log2_value_) { + return min_log2_value_ - bit_range.min; + } else { + return 0; + } + }(); + DCHECK_GE(bit_range.min + log2_scale, min_log2_value_); + DCHECK_LE(bit_range.max + log2_scale, max_log2_value_); + return log2_scale; + } + // Otherwise, the scaling depend on the scaling_mode. + switch (overflow_handling_mode) { + case OverflowHandlingMode::kClampToMin: + return min_log2_value_ - bit_range.min; + case OverflowHandlingMode::kClampToMax: + return max_log2_value_ - bit_range.max; + case OverflowHandlingMode::kEvenOverflow: + // Although this formula can be simplified, in this form is easier to + // understand. + const int overflow = + (bit_range.max - bit_range.min) - (max_log2_value_ - min_log2_value_); + // We need to move the smallest coefficient to min_log2_value_ minus + // half the overflow. + return (min_log2_value_ - bit_range.min) - overflow / 2; + } +} + +int RowScalingRange::CorrectLog2Scale( + int log2_scale, const Log2BitRange coefficient_bit_range, + const Log2BitRange overall_bit_range) const { + // Compute the interval [min_delta..max_delta] of the delta (positive or + // negative) that we can add to log2_scale while keeping the coefficient + // range within bounds. + const int max_delta = + std::max(0, max_log2_value_ - coefficient_bit_range.max - log2_scale); + const int min_delta = + std::min(0, min_log2_value_ - coefficient_bit_range.min - log2_scale); + // Move to improve quality of bounds range. + if (overall_bit_range.max + log2_scale > max_log2_value_) { + log2_scale += std::max( + min_delta, max_log2_value_ - overall_bit_range.max - log2_scale); + } else if (overall_bit_range.min + log2_scale < min_log2_value_) { + log2_scale += std::min( + max_delta, min_log2_value_ - overall_bit_range.min - log2_scale); + } + VLOG(4) << absl::StrFormat( + "coeff {%d,%d} bound {%d,%d} scale %d delta {%d,%d}", + coefficient_bit_range.min, coefficient_bit_range.max, + overall_bit_range.min, overall_bit_range.max, log2_scale, min_delta, + max_delta); + return log2_scale; +} + +void ColumnScalingRange::UpdateWithCoefficient(const double coefficient) { + if (std::abs(coefficient) < kScalerInfinity) { + coefficients_.SetBit(fast_ilogb(coefficient)); + } +} + +namespace { + +struct IntMinMax { + int min = kScalerInfinityLog2; + int max = -kScalerInfinityLog2; +}; + +std::optional BitRangeFromBitmask( + const AutoShiftingBitmask65& range) { + const int max_bit = range.GetMsb(); + if (max_bit <= kRelZeroLog2) { + return std::nullopt; + } + return IntMinMax{.min = internal::ComputeMinimumNonIgnoredBit(range), + .max = max_bit}; +} + +std::optional BitRangeFromBounds(const double lower_bound, + const double upper_bound) { + std::optional result; + if (std::abs(lower_bound) > kRelZero && lower_bound > -kScalerInfinity) { + const int lower_bound_ilogb = fast_ilogb(lower_bound); + result = IntMinMax{.min = lower_bound_ilogb, .max = lower_bound_ilogb}; + } + if (std::abs(upper_bound) > kRelZero && upper_bound < kScalerInfinity) { + if (!result.has_value()) { + result.emplace(); + } + const int upper_bound_ilogb = fast_ilogb(upper_bound); + result = IntMinMax{.min = std::min(result->min, upper_bound_ilogb), + .max = std::max(result->max, upper_bound_ilogb)}; + } + return result; +} + +// Returns how far the `range` is from the extrema of our acceptable interval +// [`min_log2_value`, `max_log2_value`]. A negative value in `.min` +// (respectively, `.max`) means that the `range` exceeds the lower (resp., +// upper) bound of the acceptable interval; a nonnegative value indicates how +// far `range` can be shifted before it hits the lower (resp., upper) bound. +IntMinMax BitRangeToBitRangeDiff(const std::optional range, + const int min_log2_value, + const int max_log2_value) { + if (!range.has_value()) { + return IntMinMax{.min = kScalerInfinityLog2, .max = kScalerInfinityLog2}; + } + return IntMinMax{.min = range->min - min_log2_value, + .max = max_log2_value - range->max}; +} + +} // namespace + +int ColumnScalingRange::GetLog2Scale(const int min_log2_value, + const int max_log2_value) const { + // Negative values mean that we would like to do some scaling to repair. + // Nonnegative values mean that we have this amount of slack to scale, for + // some other reason, without hitting the extrema in this direction. + const IntMinMax coefficient_diff = BitRangeToBitRangeDiff( + BitRangeFromBitmask(coefficients_), min_log2_value, max_log2_value); + const IntMinMax bound_diff = + BitRangeToBitRangeDiff(BitRangeFromBounds(lower_bound_, upper_bound_), + min_log2_value, max_log2_value); + + // There are 5 possible cases to consider, each with potentially conflicting + // remedies. So, we order them: prefer coefficient scaling over bound + // scaling, and prefer scaling down large values over scaling up small values. + if (coefficient_diff.max < 0) { + // The coefficients are too large. We implicitly ClampToMax, and we care + // more about matrix coefficients than about bounds, so we look here first. + return -coefficient_diff.max; + } else if (coefficient_diff.min < 0) { + // The coefficients are too small. Again, we care more about matrix + // coefficients than about bounds, so we handle this second, making sure not + // to scale so much that the coefficients become too large. + return std::max(coefficient_diff.min, -coefficient_diff.max); + } else if (bound_diff.max < 0) { + // The coefficients are fine, but the upper bound is large. We scale the + // variables, but mind to make sure that we don't make the coefficients too + // large as the bounds and coefficients are scaled in different directions. + return std::max(bound_diff.max, -coefficient_diff.max); + } else if (bound_diff.min < 0) { + // Everything is OK except for the lower bound. We must watch for both the + // coefficients becoming too small and for the bound becoming too large. + return std::min(-bound_diff.min, + std::min(coefficient_diff.min, bound_diff.max)); + } else { + // Everything is within range, so don't do any scaling. + return 0; + } +} + +} // namespace operations_research diff --git a/ortools/math_opt/labs/scaler_util.h b/ortools/math_opt/labs/scaler_util.h new file mode 100644 index 0000000000..276007c878 --- /dev/null +++ b/ortools/math_opt/labs/scaler_util.h @@ -0,0 +1,173 @@ +// Copyright 2010-2025 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_MATH_OPT_LABS_SCALER_UTIL_H_ +#define OR_TOOLS_MATH_OPT_LABS_SCALER_UTIL_H_ + +#include +#include +#include + +namespace operations_research { + +// Limit on finite quantities. +// Note that most MIP solvers adhere to the rule of using absolute tolerances +// when solving problems. This has many implications (see go/mpsolver-scaling +// for details), but one of them is that you can not meaningfully +// operate/optimize on problems with ranges far away from 10^-6 (the usual +// primal and dual tolerances for most solvers). In fact, most solvers treat +// modest numbers as infinity: +// - Gurobi 1e100 (but bounds over 1e20 are considered infinite) +// - Cplex 1e20 +// - SCIP 1e20 +// - XPRESS 1e20 +// This has to do with the fact that when you compare floating point numbers +// that differ beyond 2^51, the smaller quantity is treated just as zero. We +// allow for far larger values to be considered as `valid` before scaling, but +// these values should be mapped to ranges that the solvers can effectively deal +// with. However, we still consider very large values just as an infinite +// quantity, this protects from overflow in double computations, and also +// signals possible user errors. We also use 2^-kScalerInfinityLog2 as absolute +// zero threshold, i.e. anything less than or equal to 2^-kScalerInfinityLog2 is +// considered as zero. +constexpr int kScalerInfinityLog2 = 332; +// This is hexadecimal notation for floating point, we expect +// ilogb(kScalerInfinity) == kScalerInfinityLog2. +// TODO(user): Change this into pow(2, kScalerInfinityLog2) when it +// supports constexpr. +// NOTE(user): I had to change 0x1p332 to the decimal expression as +// SWIG does not understand (as of 2020-11-24) hexadecimal floating point +// notation. +constexpr double kScalerInfinity = + 8.7490028991320476975e+99; // 2^332 \approx 8.749e99. + +// We use a relative tolerance of about 2e-10 to distinguish zero right hand +// sides from non-zero right-hand-side, see +// go/mpsolver-scaling#the-canned-recommendation. +// Our choice of 2^-32 can be understood as trusting the first 32 bits of +// mantissa results on computation, and treating the last 20 bits as +// `unreliable` due to possible accumulated rounding errors. +constexpr int kRelZeroLog2 = -32; +constexpr double kRelZero = 2.3283064365386962890625e-10; + +// A bitmask that remembers the 65 most significant bits set. +class AutoShiftingBitmask65 { + public: + // Get the most significant bit set. + inline int GetMsb() const { return msb_; } + // Get a mask of the following 64 bits sets, where bit 0 is the least + // significant bit. + inline uint64_t GetMask() const { return mask_; } + void SetBit(int bit); + std::string DebugString() const; + + private: + // Most significant bit. + int msb_ = std::numeric_limits::min(); + // Bits sets under 'msb', + // note that for k [0:63] if ((mask>>k)&1) == 1, + // then we have set bit (msb - 64 + k) + uint64_t mask_ = 0; +}; + +enum class OverflowHandlingMode { kClampToMin, kClampToMax, kEvenOverflow }; + +// Stores data associated with a single row -- coefficients and the bounds of +// variables associated with those coefficients -- and suggests how to scale +// them to bring them to a more desirable range (from the perspective of a +// mixed-integer programming solver). +class RowScalingRange { + public: + RowScalingRange() = default; + // Will CHECK-fail if `min_log2_value` is positive or if `max_log2_value` is + // negative. + RowScalingRange(int min_log2_value, int max_log2_value); + + // Computes the power-of-two scaling factor that will bring the data in this + // object to a desirable numerical range. There are a lot of nitty-gritty + // details to do this in a reasonable and general way; see the .cc file for + // details. + int GetLog2Scale(OverflowHandlingMode overflow_handling_mode) const; + + void Update(double value); + // This function keeps track of the range of double values (or values * + // bound if the bound is a finite quantity) seen in a sequence of values (for + // example, coefficients and bounds of variables in a linear constraint). To + // keep things simple, it relies on looking at the exponent of the double + // representation, but remembers only the 65 most significant such exponents. + // Note that there is no point in storing more than 52 significant bits as + // that is the precision limit of double numbers. + void UpdateWithBounds(double value, double lower_bound, double upper_bound); + + std::string DebugString() const; + + // The rest is exposed for testing purposes only. + struct Log2BitRange { + int min = 0; + int max = 0; + }; + int GetUncorrectedLog2Scale( + Log2BitRange bit_range, + OverflowHandlingMode overflow_handling_mode) const; + int CorrectLog2Scale(int log2_scale, Log2BitRange coefficient_bit_range, + Log2BitRange overall_bit_range) const; + + private: + // Order of magnitudes for actual coefficients. + AutoShiftingBitmask65 coefficients_; + // Order of magnitudes for products of bounds and coefficients. + AutoShiftingBitmask65 bounds_; + // Range of exponent considered as acceptable. + const int min_log2_value_ = 0; // 2^0 = 1. + const int max_log2_value_ = 12; // 2^12 = 4096. +}; + +// Stores values associated with a single variable -- its bounds, and +// coefficients from constraints and objectives, and suggests how to scale them +// to bring them to a more desirable range (from the perspective of a +// mixed-integer programming solver). +class ColumnScalingRange { + public: + ColumnScalingRange(const double lower_bound, const double upper_bound) + : lower_bound_(lower_bound), upper_bound_(upper_bound) {} + + // Computes the power-of-two scaling factor that will bring the data in this + // object to a desirable numerical range of + // [2^`min_log2_value`, 2^`max_log2_value`]. If this is not attainable, it + // will prefer to scale coefficients over bounds, and prefer to scale down + // large values over scaling up small values (i.e., it is implicitly providing + // ClampToMax behavior). + int GetLog2Scale(int min_log2_value, int max_log2_value) const; + + void UpdateWithCoefficient(double coefficient); + + private: + double lower_bound_; + double upper_bound_; + + AutoShiftingBitmask65 coefficients_; +}; + +// Exposed publicly for testing purposes only. +namespace internal { + +// Compute the smallest bit in `range`, ignoring those that are either very +// small (below -kScalerInfinityLog2) or will be discarded to fit within a +// 65-bit mantissa (relative to the most significant bit in `range`). +int ComputeMinimumNonIgnoredBit(const AutoShiftingBitmask65& range); + +} // namespace internal + +} // namespace operations_research + +#endif // OR_TOOLS_MATH_OPT_LABS_SCALER_UTIL_H_ From ef0a57b51f571d77031a9ccc0154d3992df871cc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:54:13 +0200 Subject: [PATCH 142/509] cleanup from google3 --- ortools/packing/testdata/Class_01.2bp | 3324 +------------------------ ortools/util/fp_utils.h | 2 +- 2 files changed, 75 insertions(+), 3251 deletions(-) diff --git a/ortools/packing/testdata/Class_01.2bp b/ortools/packing/testdata/Class_01.2bp index fc0e3c6e40..265998df61 100644 --- a/ortools/packing/testdata/Class_01.2bp +++ b/ortools/packing/testdata/Class_01.2bp @@ -1,3250 +1,74 @@ - 1 PROBLEM CLASS - 20 N. OF ITEMS - 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 5 H(I),W(I),I=1,...,N - 2 4 - 6 10 - 7 5 - 3 6 - 7 10 - 5 1 - 5 3 - 9 6 - 4 2 - 7 6 - 2 7 - 3 8 - 10 4 - 5 4 - 3 10 - 3 8 - 8 7 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 4 4 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 5 5 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 6 6 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 7 7 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 10 H(I),W(I),I=1,...,N - 1 10 - 7 3 - 3 4 - 9 2 - 2 10 - 1 3 - 8 10 - 8 3 - 1 7 - 7 3 - 4 1 - 10 8 - 7 2 - 1 5 - 1 4 - 8 6 - 9 5 - 2 5 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 8 8 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 9 9 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 10 10 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 1 11 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - 3 4 - 5 6 - 7 4 - 4 10 - 5 9 - 2 1 - 1 7 - 1 3 - 3 8 - 4 4 - 2 7 - 9 6 - 2 2 - 8 2 - 1 4 - 6 10 - 1 7 - 9 3 - 5 9 - 8 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 2 12 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 3 13 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 4 14 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 5 15 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 6 16 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 7 17 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 3 1 H(I),W(I),I=1,...,N - 6 6 - 10 4 - 10 1 - 9 2 - 7 1 - 7 7 - 3 3 - 5 4 - 4 9 - 7 9 - 3 2 - 2 6 - 2 2 - 1 6 - 1 8 - 8 9 - 10 6 - 10 6 - 4 3 - 10 1 - 10 10 - 1 2 - 10 2 - 1 9 - 8 6 - 1 8 - 2 4 - 6 6 - 3 10 - 4 5 - 5 2 - 5 8 - 6 1 - 4 3 - 7 1 - 9 3 - 3 10 - 10 10 - 2 10 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 8 18 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 9 19 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 10 20 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 1 21 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - 4 7 - 2 10 - 9 8 - 2 4 - 8 4 - 1 6 - 5 2 - 10 9 - 8 4 - 4 1 - 7 4 - 10 3 - 8 10 - 10 10 - 8 2 - 8 9 - 9 7 - 2 3 - 4 10 - 3 4 - 1 3 - 5 1 - 2 1 - 6 4 - 2 2 - 8 10 - 7 6 - 4 4 - 9 8 - 3 9 - 2 7 - 5 8 - 4 2 - 5 10 - 7 8 - 1 5 - 4 3 - 5 8 - 4 9 - 9 8 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 2 22 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - 3 3 - 8 4 - 8 3 - 2 5 - 2 9 - 3 8 - 6 7 - 1 5 - 4 4 - 5 9 - 6 1 - 1 10 - 10 3 - 6 8 - 3 7 - 1 6 - 2 5 - 1 4 - 7 1 - 3 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 3 23 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - 6 5 - 1 2 - 4 10 - 4 6 - 4 6 - 9 5 - 8 7 - 3 2 - 10 1 - 7 10 - 7 7 - 6 7 - 2 2 - 7 4 - 8 3 - 2 6 - 3 1 - 5 10 - 8 10 - 4 8 - 8 8 - 10 4 - 1 7 - 7 6 - 7 10 - 3 5 - 1 5 - 10 5 - 1 5 - 1 5 - 8 2 - 5 4 - 6 5 - 2 9 - 4 5 - 7 6 - 2 6 - 9 6 - 4 7 - 7 4 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 4 24 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 5 25 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 6 26 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - 6 10 - 9 4 - 2 7 - 5 7 - 5 7 - 1 2 - 5 1 - 2 6 - 1 2 - 5 10 - 9 10 - 1 9 - 5 4 - 6 10 - 3 10 - 7 5 - 4 5 - 4 7 - 9 10 - 3 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 7 27 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 5 H(I),W(I),I=1,...,N - 3 8 - 4 5 - 10 8 - 7 3 - 10 9 - 2 7 - 3 4 - 5 9 - 4 2 - 9 1 - 2 4 - 8 3 - 6 3 - 9 4 - 1 9 - 3 1 - 6 1 - 6 10 - 1 4 - 4 3 - 1 7 - 10 3 - 3 10 - 6 10 - 9 8 - 2 3 - 2 8 - 8 8 - 2 2 - 9 2 - 8 4 - 2 6 - 6 1 - 9 5 - 3 6 - 4 7 - 1 2 - 5 1 - 3 7 - 3 4 - 7 2 - 2 3 - 5 3 - 10 9 - 1 5 - 8 6 - 3 1 - 1 2 - 2 5 - 7 9 - 2 5 - 6 10 - 1 2 - 3 4 - 1 1 - 7 1 - 5 8 - 7 7 - 2 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 8 28 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 9 29 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 6 - 2 7 - 8 2 - 3 2 - 1 8 - 1 5 - 5 4 - 10 5 - 4 4 - 9 1 - 10 7 - 8 8 - 3 3 - 1 5 - 2 8 - 7 2 - 2 8 - 7 3 - 2 5 - 4 7 - 2 4 - 8 5 - 7 2 - 8 10 - 4 10 - 10 2 - 4 3 - 10 10 - 3 8 - 2 10 - 8 7 - 7 9 - 9 3 - 6 8 - 2 6 - 8 2 - 4 6 - 7 9 - 6 7 - 3 9 - 3 7 - 6 7 - 3 7 - 10 10 - 6 2 - 3 1 - 7 10 - 7 3 - 4 1 - 5 5 - 8 10 - 5 6 - 3 5 - 10 2 - 7 7 - 2 1 - 2 10 - 10 3 - 1 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 10 30 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 1 31 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - 6 7 - 6 6 - 3 9 - 5 8 - 8 9 - 6 1 - 4 5 - 1 8 - 9 8 - 4 7 - 5 2 - 3 1 - 5 5 - 9 2 - 8 9 - 6 9 - 4 7 - 3 3 - 8 8 - 9 8 - 8 2 - 10 2 - 5 4 - 4 7 - 3 3 - 2 6 - 1 8 - 7 8 - 9 4 - 8 4 - 1 10 - 6 7 - 6 6 - 6 7 - 5 6 - 5 3 - 1 9 - 4 1 - 4 6 - 7 7 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 2 32 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - 10 8 - 6 4 - 8 4 - 7 9 - 10 10 - 5 2 - 5 5 - 5 1 - 3 6 - 3 8 - 10 9 - 2 5 - 2 5 - 5 2 - 1 10 - 1 9 - 2 10 - 1 10 - 1 7 - 9 4 - 1 6 - 2 2 - 6 4 - 2 7 - 3 2 - 4 9 - 4 5 - 5 7 - 3 10 - 6 7 - 7 9 - 2 6 - 9 5 - 10 5 - 2 8 - 2 9 - 4 8 - 2 6 - 6 9 - 8 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 3 33 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - 2 9 - 1 4 - 4 6 - 1 2 - 5 10 - 1 2 - 7 5 - 5 6 - 3 9 - 7 8 - 1 3 - 10 6 - 3 2 - 2 1 - 3 4 - 5 7 - 9 9 - 2 3 - 2 3 - 2 8 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 4 34 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - 2 4 - 6 5 - 8 8 - 8 7 - 6 2 - 10 5 - 8 1 - 3 10 - 8 3 - 5 6 - 4 8 - 2 1 - 7 1 - 2 6 - 1 10 - 5 7 - 4 6 - 6 9 - 8 3 - 3 5 - 6 9 - 7 10 - 7 6 - 2 10 - 1 7 - 7 7 - 4 1 - 1 5 - 2 8 - 1 3 - 7 4 - 3 10 - 3 5 - 8 3 - 9 3 - 2 8 - 5 3 - 1 6 - 5 1 - 7 3 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 5 35 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 6 36 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - 7 7 - 4 6 - 10 2 - 7 6 - 1 10 - 10 8 - 8 1 - 8 9 - 1 10 - 9 1 - 6 5 - 3 7 - 10 6 - 5 5 - 5 9 - 3 7 - 4 2 - 8 8 - 6 4 - 4 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 7 37 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 6 H(I),W(I),I=1,...,N - 4 3 - 5 9 - 2 10 - 7 5 - 5 6 - 6 9 - 1 3 - 6 9 - 10 4 - 5 6 - 7 8 - 5 4 - 10 5 - 9 2 - 10 10 - 6 10 - 2 2 - 6 2 - 2 10 - 5 7 - 3 9 - 9 5 - 9 8 - 10 2 - 7 7 - 3 4 - 8 10 - 5 3 - 8 6 - 6 4 - 3 6 - 3 4 - 5 4 - 2 4 - 9 7 - 5 9 - 2 7 - 6 6 - 5 9 - 2 7 - 3 10 - 6 5 - 3 1 - 1 10 - 7 7 - 5 2 - 9 6 - 9 5 - 7 8 - 8 7 - 1 8 - 9 9 - 8 3 - 5 6 - 3 6 - 8 6 - 8 10 - 7 9 - 8 4 - 2 10 - 1 7 - 10 7 - 7 5 - 6 1 - 1 6 - 3 6 - 1 10 - 4 1 - 8 3 - 8 5 - 7 1 - 5 5 - 4 6 - 10 7 - 2 2 - 2 2 - 5 8 - 10 6 - 10 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 8 38 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 6 H(I),W(I),I=1,...,N - 4 1 - 4 8 - 3 1 - 10 9 - 3 6 - 10 6 - 3 8 - 7 5 - 3 2 - 1 7 - 7 4 - 1 8 - 4 7 - 3 4 - 5 5 - 3 8 - 4 5 - 4 2 - 8 9 - 10 6 - 9 10 - 8 10 - 2 8 - 10 9 - 4 3 - 10 10 - 6 8 - 6 5 - 3 3 - 8 8 - 3 9 - 2 7 - 6 9 - 5 3 - 7 1 - 9 10 - 3 7 - 7 6 - 7 1 - 7 2 - 2 1 - 1 3 - 7 8 - 4 7 - 1 6 - 2 4 - 10 4 - 6 10 - 2 1 - 3 1 - 7 4 - 8 9 - 1 8 - 4 8 - 10 7 - 4 7 - 6 3 - 7 5 - 3 5 - 4 9 - 10 5 - 8 5 - 7 5 - 9 4 - 8 5 - 6 2 - 3 5 - 4 5 - 10 9 - 2 6 - 6 7 - 10 7 - 7 7 - 3 8 - 6 6 - 8 9 - 6 1 - 7 5 - 5 2 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 9 39 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 9 H(I),W(I),I=1,...,N - 6 7 - 10 3 - 3 1 - 2 6 - 2 7 - 6 5 - 5 6 - 9 10 - 5 9 - 2 7 - 2 5 - 10 3 - 6 8 - 1 9 - 1 8 - 5 10 - 5 5 - 7 4 - 9 3 - 4 1 - 8 10 - 9 6 - 10 8 - 5 7 - 5 10 - 1 5 - 8 4 - 6 4 - 7 3 - 6 2 - 3 9 - 9 1 - 10 1 - 5 4 - 7 10 - 10 10 - 3 10 - 10 9 - 5 10 - 7 7 - 9 10 - 5 10 - 6 4 - 3 10 - 2 1 - 1 2 - 3 2 - 3 3 - 1 10 - 8 3 - 1 5 - 7 9 - 10 6 - 4 7 - 9 9 - 6 9 - 2 10 - 6 9 - 9 3 - 7 4 - 8 9 - 6 7 - 6 9 - 7 1 - 10 2 - 2 4 - 10 3 - 9 7 - 2 9 - 10 5 - 4 3 - 9 5 - 5 8 - 4 10 - 1 2 - 6 2 - 10 7 - 9 10 - 2 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 10 40 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 1 41 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - 10 6 - 6 4 - 10 3 - 9 6 - 6 3 - 7 6 - 9 2 - 7 7 - 4 6 - 10 3 - 8 7 - 2 1 - 2 10 - 9 3 - 10 2 - 4 2 - 1 4 - 9 8 - 6 10 - 4 10 - 9 9 - 9 4 - 7 2 - 9 7 - 3 3 - 1 10 - 10 10 - 7 8 - 6 3 - 1 8 - 2 8 - 1 2 - 3 2 - 8 4 - 7 7 - 6 2 - 4 6 - 3 7 - 9 7 - 10 3 - 10 3 - 2 8 - 8 6 - 7 4 - 7 5 - 5 4 - 5 1 - 2 7 - 4 8 - 4 3 - 7 4 - 3 7 - 3 1 - 3 8 - 2 3 - 3 5 - 10 6 - 2 3 - 3 4 - 7 1 - 3 9 - 4 9 - 5 1 - 4 1 - 5 9 - 3 6 - 6 9 - 7 5 - 2 7 - 9 2 - 10 6 - 1 4 - 2 10 - 3 3 - 1 6 - 5 4 - 7 6 - 2 4 - 10 4 - 1 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 2 42 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - 1 7 - 8 4 - 4 6 - 6 7 - 1 10 - 3 6 - 6 7 - 10 7 - 7 7 - 2 8 - 5 5 - 7 8 - 4 9 - 5 3 - 2 10 - 1 1 - 1 2 - 3 2 - 4 3 - 2 1 - 10 5 - 2 6 - 9 1 - 2 8 - 6 10 - 2 2 - 10 2 - 6 4 - 10 3 - 3 7 - 1 9 - 3 5 - 2 2 - 2 5 - 3 7 - 6 8 - 8 1 - 7 10 - 6 5 - 10 7 - 8 3 - 2 1 - 8 4 - 4 7 - 3 9 - 1 2 - 6 2 - 10 7 - 9 10 - 3 7 - 2 6 - 4 9 - 5 3 - 10 5 - 3 9 - 4 9 - 4 5 - 9 9 - 8 6 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 3 43 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - 5 4 - 5 8 - 1 8 - 4 2 - 1 2 - 1 3 - 4 6 - 8 1 - 1 4 - 5 5 - 5 5 - 2 1 - 6 10 - 3 5 - 3 3 - 8 10 - 2 1 - 5 10 - 10 7 - 1 2 - 8 3 - 3 8 - 1 9 - 1 2 - 6 5 - 1 2 - 5 6 - 10 8 - 2 10 - 9 6 - 5 7 - 3 3 - 4 2 - 10 1 - 5 3 - 10 3 - 6 10 - 8 7 - 6 9 - 2 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 4 44 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - 2 4 - 5 3 - 3 9 - 5 10 - 3 2 - 3 9 - 3 6 - 7 2 - 8 9 - 7 3 - 6 3 - 9 10 - 7 1 - 1 4 - 6 2 - 2 9 - 1 6 - 9 4 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 5 45 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 1 H(I),W(I),I=1,...,N - 9 1 - 5 6 - 8 5 - 5 10 - 5 6 - 9 4 - 2 4 - 5 2 - 9 8 - 10 4 - 6 4 - 1 8 - 8 9 - 8 1 - 3 4 - 7 10 - 10 4 - 5 6 - 6 6 - 3 2 - 8 2 - 6 9 - 7 6 - 2 5 - 7 6 - 1 6 - 6 3 - 8 10 - 5 1 - 1 7 - 10 8 - 1 2 - 6 6 - 2 3 - 6 8 - 2 1 - 5 3 - 3 10 - 9 1 - 8 9 - 10 8 - 6 7 - 2 10 - 7 1 - 4 9 - 7 7 - 5 9 - 2 6 - 2 9 - 4 3 - 1 9 - 8 7 - 3 3 - 10 7 - 4 10 - 3 9 - 10 1 - 1 9 - 4 10 - 1 1 - 9 4 - 5 6 - 2 6 - 7 4 - 6 8 - 4 6 - 5 10 - 9 6 - 7 3 - 5 2 - 1 9 - 10 4 - 4 9 - 5 8 - 3 2 - 5 1 - 9 3 - 7 5 - 6 10 - 3 3 - 2 3 - 7 3 - 9 8 - 7 8 - 10 3 - 4 10 - 3 7 - 2 8 - 1 7 - 10 3 - 6 8 - 7 9 - 7 4 - 10 4 - 5 7 - 5 10 - 4 6 - 3 10 - 3 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 6 46 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - 4 1 - 2 10 - 10 10 - 10 8 - 9 8 - 6 4 - 2 6 - 3 6 - 8 9 - 9 5 - 2 5 - 5 5 - 2 4 - 6 4 - 8 1 - 10 3 - 7 4 - 7 7 - 4 4 - 2 4 - 1 5 - 3 4 - 4 5 - 1 3 - 8 6 - 10 7 - 7 1 - 9 8 - 3 9 - 8 10 - 5 10 - 1 2 - 4 10 - 7 3 - 5 5 - 6 10 - 4 7 - 4 9 - 4 1 - 7 1 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 7 47 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 5 H(I),W(I),I=1,...,N - 5 4 - 1 1 - 9 6 - 3 10 - 3 9 - 1 1 - 2 1 - 5 2 - 3 3 - 1 7 - 8 10 - 7 10 - 1 10 - 2 4 - 10 3 - 2 7 - 6 4 - 5 1 - 3 1 - 1 9 - 10 5 - 5 10 - 6 10 - 5 4 - 8 5 - 2 7 - 1 9 - 1 5 - 9 9 - 8 10 - 4 1 - 6 6 - 6 6 - 8 2 - 4 10 - 3 8 - 3 9 - 2 8 - 2 5 - 9 1 - 1 3 - 1 8 - 5 10 - 10 3 - 1 7 - 1 3 - 10 8 - 1 1 - 8 3 - 1 9 - 2 10 - 6 7 - 2 6 - 3 6 - 4 8 - 4 8 - 5 6 - 3 4 - 6 6 - 8 3 - 2 5 - 9 10 - 1 5 - 2 3 - 3 9 - 5 2 - 3 5 - 7 4 - 9 6 - 4 4 - 2 4 - 4 4 - 6 8 - 7 10 - 9 9 - 8 7 - 3 1 - 5 9 - 1 2 - 9 8 - 2 4 - 5 3 - 6 1 - 9 5 - 9 9 - 4 9 - 7 2 - 4 1 - 2 2 - 6 4 - 9 9 - 1 2 - 5 2 - 5 2 - 10 7 - 4 5 - 1 10 - 10 7 - 4 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 8 48 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - 6 6 - 7 7 - 10 1 - 3 6 - 9 10 - 4 5 - 6 6 - 7 4 - 3 9 - 3 7 - 10 8 - 8 4 - 7 9 - 9 3 - 9 9 - 6 1 - 3 9 - 5 5 - 1 4 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 9 49 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 3 - 8 2 - 9 2 - 1 5 - 2 6 - 3 5 - 2 3 - 6 6 - 8 3 - 3 7 - 1 2 - 3 7 - 5 6 - 6 3 - 9 7 - 8 1 - 3 9 - 5 2 - 1 1 - 9 2 - 2 3 - 5 7 - 7 9 - 10 9 - 9 8 - 4 3 - 8 1 - 4 6 - 3 2 - 6 9 - 10 8 - 2 6 - 5 2 - 6 7 - 2 6 - 5 10 - 2 10 - 2 2 - 6 5 - 10 10 - 3 4 - 7 4 - 8 5 - 4 10 - 8 10 - 6 6 - 6 3 - 10 1 - 5 2 - 8 1 - 3 7 - 5 4 - 2 5 - 7 10 - 5 1 - 5 4 - 10 7 - 6 4 - 10 9 - 5 1 - 7 2 - 5 9 - 10 10 - 9 6 - 10 5 - 6 4 - 1 6 - 6 7 - 2 4 - 4 1 - 3 4 - 9 10 - 8 2 - 10 6 - 6 1 - 2 9 - 8 5 - 4 8 - 7 1 - 6 9 - 3 10 - 3 8 - 5 8 - 2 8 - 9 2 - 5 3 - 2 2 - 5 10 - 5 4 - 2 1 - 3 5 - 5 10 - 3 6 - 10 5 - 7 2 - 4 6 - 9 10 - 9 9 - 6 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 10 50 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 9 H(I),W(I),I=1,...,N - 3 3 - 1 10 - 6 9 - 3 2 - 10 6 - 10 9 - 8 3 - 4 3 - 9 9 - 9 3 - 4 1 - 4 1 - 6 5 - 9 9 - 4 2 - 8 7 - 10 8 - 1 5 - 6 9 - 6 7 - 2 8 - 10 3 - 8 7 - 9 1 - 9 6 - 6 8 - 2 5 - 4 1 - 9 9 - 10 10 - 4 5 - 8 1 - 4 5 - 10 1 - 2 3 - 4 1 - 1 7 - 2 6 - 9 8 - 8 4 - 7 10 - 10 5 - 10 1 - 9 4 - 6 2 - 9 9 - 5 9 - 8 1 - 2 7 - 7 4 - 5 9 - 2 6 - 5 10 - 8 9 - 7 6 - 9 6 - 7 9 - 7 6 - 8 3 - 1 9 - 3 7 - 2 1 - 1 10 - 5 8 - 2 9 - 6 2 - 2 10 - 8 8 - 7 4 - 5 2 - 10 7 - 4 7 - 3 5 - 6 7 - 6 1 - 7 9 - 9 8 - 4 5 - 7 3 - 7 9 - 7 9 - 2 8 - 1 10 - 6 5 - 3 4 - 10 3 - 10 10 - 9 10 - 5 5 - 10 7 - 1 6 - 5 9 - 6 4 - 8 3 - 1 3 - 7 1 - 1 1 - 3 7 - 4 10 - + 1 PROBLEM CLASS + 20 N. OF ITEMS + 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 9 5 H(I),W(I),I=1,...,N + 2 4 + 6 10 + 7 5 + 3 6 + 7 10 + 5 1 + 5 3 + 9 6 + 4 2 + 7 6 + 2 7 + 3 8 + 10 4 + 5 4 + 3 10 + 3 8 + 8 7 + 3 8 + 7 8 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 2 2 H(I),W(I),I=1,...,N + 8 6 + 2 10 + 3 1 + 4 8 + 10 3 + 9 1 + 5 1 + 3 6 + 1 1 + 2 4 + 2 9 + 9 1 + 5 9 + 7 4 + 2 2 + 4 3 + 7 9 + 1 4 + 8 9 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 5 7 H(I),W(I),I=1,...,N + 6 10 + 6 5 + 2 7 + 8 4 + 10 9 + 5 8 + 6 8 + 9 4 + 3 9 + 10 3 + 5 9 + 7 1 + 9 8 + 6 4 + 6 3 + 3 4 + 2 10 + 1 6 + 4 1 diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index 33bf542edd..cdfc476ed8 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -256,7 +256,7 @@ inline FloatType Interpolate(FloatType x, FloatType y, FloatType alpha) { inline int fast_ilogb(double value) { return ilogb(value); } inline double fast_scalbn(double value, int exponent) { return scalbn(value, exponent); -} +} } // namespace operations_research From c9ef87403dc523210f2c19592bdbcf50b8d64d0b Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:55:25 +0200 Subject: [PATCH 143/509] pdlp: Add README.md --- ortools/pdlp/README.md | 59 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 ortools/pdlp/README.md diff --git a/ortools/pdlp/README.md b/ortools/pdlp/README.md new file mode 100644 index 0000000000..b770f265a3 --- /dev/null +++ b/ortools/pdlp/README.md @@ -0,0 +1,59 @@ +# Primal-Dual Hybrid Gradient Solver (PDLP) + +This directory contains PDLP, a library for solving linear programming (LP) and +quadratic programming (QP) problems using first-order methods. + +The implementation is based on the Primal-Dual Hybrid Gradient (PDHG) algorithm, +which is preprocessed with scaling and optional presolving to improve +performance and numerical stability. + +## Core C++ libraries: + +* [`primal_dual_hybrid_gradient.h`][primal_dual_hybrid_gradient_h]: The main + entry point for the solver, which takes a `QuadraticProgram` and solver + parameters. + +* [`quadratic_program.h`][quadratic_program_h]: Defines the `QuadraticProgram` + struct to represent the optimization problem, including objective vectors, + constraint matrices, and bounds. + +* [`quadratic_program_io.h`][quadratic_program_io_h]: Provides utilities to read + quadratic programs from various file formats, including MPS and MPModelProto. + +* [`sharded_quadratic_program.h`][sharded_quadratic_program_h] and + [`sharder.h`][sharder_h]: These provide the infrastructure for sharding + problem data and performing parallel computations. + +* [`scheduler.h`][scheduler_h]: A thread scheduling interface that supports + multiple backends (e.g. Eigen's thread pools). + +* [`iteration_stats.h`][iteration_stats_h] and + [`termination.h`][termination_h]: Contain logic for computing convergence and + infeasibility statistics and checking termination criteria. + +## Configuration and Logging + +* [`solvers.proto`][solvers_proto]: Defines the `PrimalDualHybridGradientParams` + message for configuring the solver, including termination criteria, + algorithmic choices like restart strategies, and linesearch rules. +* [`solve_log.proto`][solve_log_proto]: Defines messages for logging the + solver's progress and final result, such as `IterationStats` and `SolveLog`. + +## Wrappers and Samples + +* [`python/`](python): Contains the `pybind11` wrapper to expose the C++ library + to Python, along with its build definitions and tests. +* [`samples/`](samples): This directory provides example usage of the library. + + + +[primal_dual_hybrid_gradient_h]: ../pdlp/primal_dual_hybrid_gradient.h +[quadratic_program_h]: ../pdlp/quadratic_program.h +[quadratic_program_io_h]: ../pdlp/quadratic_program_io.h +[sharded_quadratic_program_h]: ../pdlp/sharded_quadratic_program.h +[sharder_h]: ../pdlp/sharder.h +[scheduler_h]: ../pdlp/scheduler.h +[iteration_stats_h]: ../pdlp/iteration_stats.h +[termination_h]: ../pdlp/termination.h +[solvers_proto]: ../pdlp/solvers.proto +[solve_log_proto]: ../pdlp/solve_log.proto From 0ef0f402d119abbf2416168a101f7c37568a1e3c Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:54:13 +0200 Subject: [PATCH 144/509] cleanup from google3 --- ortools/packing/testdata/Class_01.2bp | 3324 +------------------------ ortools/util/fp_utils.h | 5 + 2 files changed, 79 insertions(+), 3250 deletions(-) diff --git a/ortools/packing/testdata/Class_01.2bp b/ortools/packing/testdata/Class_01.2bp index fc0e3c6e40..265998df61 100644 --- a/ortools/packing/testdata/Class_01.2bp +++ b/ortools/packing/testdata/Class_01.2bp @@ -1,3250 +1,74 @@ - 1 PROBLEM CLASS - 20 N. OF ITEMS - 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 5 H(I),W(I),I=1,...,N - 2 4 - 6 10 - 7 5 - 3 6 - 7 10 - 5 1 - 5 3 - 9 6 - 4 2 - 7 6 - 2 7 - 3 8 - 10 4 - 5 4 - 3 10 - 3 8 - 8 7 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 4 4 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 5 5 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 6 6 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 7 7 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 10 H(I),W(I),I=1,...,N - 1 10 - 7 3 - 3 4 - 9 2 - 2 10 - 1 3 - 8 10 - 8 3 - 1 7 - 7 3 - 4 1 - 10 8 - 7 2 - 1 5 - 1 4 - 8 6 - 9 5 - 2 5 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 8 8 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 9 9 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 10 10 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 1 11 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - 3 4 - 5 6 - 7 4 - 4 10 - 5 9 - 2 1 - 1 7 - 1 3 - 3 8 - 4 4 - 2 7 - 9 6 - 2 2 - 8 2 - 1 4 - 6 10 - 1 7 - 9 3 - 5 9 - 8 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 2 12 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 3 13 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 4 14 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 5 15 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 6 16 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 7 17 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 3 1 H(I),W(I),I=1,...,N - 6 6 - 10 4 - 10 1 - 9 2 - 7 1 - 7 7 - 3 3 - 5 4 - 4 9 - 7 9 - 3 2 - 2 6 - 2 2 - 1 6 - 1 8 - 8 9 - 10 6 - 10 6 - 4 3 - 10 1 - 10 10 - 1 2 - 10 2 - 1 9 - 8 6 - 1 8 - 2 4 - 6 6 - 3 10 - 4 5 - 5 2 - 5 8 - 6 1 - 4 3 - 7 1 - 9 3 - 3 10 - 10 10 - 2 10 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 8 18 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 9 19 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 10 20 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 1 21 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - 4 7 - 2 10 - 9 8 - 2 4 - 8 4 - 1 6 - 5 2 - 10 9 - 8 4 - 4 1 - 7 4 - 10 3 - 8 10 - 10 10 - 8 2 - 8 9 - 9 7 - 2 3 - 4 10 - 3 4 - 1 3 - 5 1 - 2 1 - 6 4 - 2 2 - 8 10 - 7 6 - 4 4 - 9 8 - 3 9 - 2 7 - 5 8 - 4 2 - 5 10 - 7 8 - 1 5 - 4 3 - 5 8 - 4 9 - 9 8 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 2 22 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - 3 3 - 8 4 - 8 3 - 2 5 - 2 9 - 3 8 - 6 7 - 1 5 - 4 4 - 5 9 - 6 1 - 1 10 - 10 3 - 6 8 - 3 7 - 1 6 - 2 5 - 1 4 - 7 1 - 3 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 3 23 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - 6 5 - 1 2 - 4 10 - 4 6 - 4 6 - 9 5 - 8 7 - 3 2 - 10 1 - 7 10 - 7 7 - 6 7 - 2 2 - 7 4 - 8 3 - 2 6 - 3 1 - 5 10 - 8 10 - 4 8 - 8 8 - 10 4 - 1 7 - 7 6 - 7 10 - 3 5 - 1 5 - 10 5 - 1 5 - 1 5 - 8 2 - 5 4 - 6 5 - 2 9 - 4 5 - 7 6 - 2 6 - 9 6 - 4 7 - 7 4 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 4 24 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 5 25 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 6 26 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - 6 10 - 9 4 - 2 7 - 5 7 - 5 7 - 1 2 - 5 1 - 2 6 - 1 2 - 5 10 - 9 10 - 1 9 - 5 4 - 6 10 - 3 10 - 7 5 - 4 5 - 4 7 - 9 10 - 3 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 7 27 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 5 H(I),W(I),I=1,...,N - 3 8 - 4 5 - 10 8 - 7 3 - 10 9 - 2 7 - 3 4 - 5 9 - 4 2 - 9 1 - 2 4 - 8 3 - 6 3 - 9 4 - 1 9 - 3 1 - 6 1 - 6 10 - 1 4 - 4 3 - 1 7 - 10 3 - 3 10 - 6 10 - 9 8 - 2 3 - 2 8 - 8 8 - 2 2 - 9 2 - 8 4 - 2 6 - 6 1 - 9 5 - 3 6 - 4 7 - 1 2 - 5 1 - 3 7 - 3 4 - 7 2 - 2 3 - 5 3 - 10 9 - 1 5 - 8 6 - 3 1 - 1 2 - 2 5 - 7 9 - 2 5 - 6 10 - 1 2 - 3 4 - 1 1 - 7 1 - 5 8 - 7 7 - 2 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 8 28 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 9 29 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 6 - 2 7 - 8 2 - 3 2 - 1 8 - 1 5 - 5 4 - 10 5 - 4 4 - 9 1 - 10 7 - 8 8 - 3 3 - 1 5 - 2 8 - 7 2 - 2 8 - 7 3 - 2 5 - 4 7 - 2 4 - 8 5 - 7 2 - 8 10 - 4 10 - 10 2 - 4 3 - 10 10 - 3 8 - 2 10 - 8 7 - 7 9 - 9 3 - 6 8 - 2 6 - 8 2 - 4 6 - 7 9 - 6 7 - 3 9 - 3 7 - 6 7 - 3 7 - 10 10 - 6 2 - 3 1 - 7 10 - 7 3 - 4 1 - 5 5 - 8 10 - 5 6 - 3 5 - 10 2 - 7 7 - 2 1 - 2 10 - 10 3 - 1 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 10 30 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 1 31 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - 6 7 - 6 6 - 3 9 - 5 8 - 8 9 - 6 1 - 4 5 - 1 8 - 9 8 - 4 7 - 5 2 - 3 1 - 5 5 - 9 2 - 8 9 - 6 9 - 4 7 - 3 3 - 8 8 - 9 8 - 8 2 - 10 2 - 5 4 - 4 7 - 3 3 - 2 6 - 1 8 - 7 8 - 9 4 - 8 4 - 1 10 - 6 7 - 6 6 - 6 7 - 5 6 - 5 3 - 1 9 - 4 1 - 4 6 - 7 7 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 2 32 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - 10 8 - 6 4 - 8 4 - 7 9 - 10 10 - 5 2 - 5 5 - 5 1 - 3 6 - 3 8 - 10 9 - 2 5 - 2 5 - 5 2 - 1 10 - 1 9 - 2 10 - 1 10 - 1 7 - 9 4 - 1 6 - 2 2 - 6 4 - 2 7 - 3 2 - 4 9 - 4 5 - 5 7 - 3 10 - 6 7 - 7 9 - 2 6 - 9 5 - 10 5 - 2 8 - 2 9 - 4 8 - 2 6 - 6 9 - 8 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 3 33 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - 2 9 - 1 4 - 4 6 - 1 2 - 5 10 - 1 2 - 7 5 - 5 6 - 3 9 - 7 8 - 1 3 - 10 6 - 3 2 - 2 1 - 3 4 - 5 7 - 9 9 - 2 3 - 2 3 - 2 8 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 4 34 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - 2 4 - 6 5 - 8 8 - 8 7 - 6 2 - 10 5 - 8 1 - 3 10 - 8 3 - 5 6 - 4 8 - 2 1 - 7 1 - 2 6 - 1 10 - 5 7 - 4 6 - 6 9 - 8 3 - 3 5 - 6 9 - 7 10 - 7 6 - 2 10 - 1 7 - 7 7 - 4 1 - 1 5 - 2 8 - 1 3 - 7 4 - 3 10 - 3 5 - 8 3 - 9 3 - 2 8 - 5 3 - 1 6 - 5 1 - 7 3 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 5 35 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 6 36 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - 7 7 - 4 6 - 10 2 - 7 6 - 1 10 - 10 8 - 8 1 - 8 9 - 1 10 - 9 1 - 6 5 - 3 7 - 10 6 - 5 5 - 5 9 - 3 7 - 4 2 - 8 8 - 6 4 - 4 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 7 37 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 6 H(I),W(I),I=1,...,N - 4 3 - 5 9 - 2 10 - 7 5 - 5 6 - 6 9 - 1 3 - 6 9 - 10 4 - 5 6 - 7 8 - 5 4 - 10 5 - 9 2 - 10 10 - 6 10 - 2 2 - 6 2 - 2 10 - 5 7 - 3 9 - 9 5 - 9 8 - 10 2 - 7 7 - 3 4 - 8 10 - 5 3 - 8 6 - 6 4 - 3 6 - 3 4 - 5 4 - 2 4 - 9 7 - 5 9 - 2 7 - 6 6 - 5 9 - 2 7 - 3 10 - 6 5 - 3 1 - 1 10 - 7 7 - 5 2 - 9 6 - 9 5 - 7 8 - 8 7 - 1 8 - 9 9 - 8 3 - 5 6 - 3 6 - 8 6 - 8 10 - 7 9 - 8 4 - 2 10 - 1 7 - 10 7 - 7 5 - 6 1 - 1 6 - 3 6 - 1 10 - 4 1 - 8 3 - 8 5 - 7 1 - 5 5 - 4 6 - 10 7 - 2 2 - 2 2 - 5 8 - 10 6 - 10 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 8 38 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 6 H(I),W(I),I=1,...,N - 4 1 - 4 8 - 3 1 - 10 9 - 3 6 - 10 6 - 3 8 - 7 5 - 3 2 - 1 7 - 7 4 - 1 8 - 4 7 - 3 4 - 5 5 - 3 8 - 4 5 - 4 2 - 8 9 - 10 6 - 9 10 - 8 10 - 2 8 - 10 9 - 4 3 - 10 10 - 6 8 - 6 5 - 3 3 - 8 8 - 3 9 - 2 7 - 6 9 - 5 3 - 7 1 - 9 10 - 3 7 - 7 6 - 7 1 - 7 2 - 2 1 - 1 3 - 7 8 - 4 7 - 1 6 - 2 4 - 10 4 - 6 10 - 2 1 - 3 1 - 7 4 - 8 9 - 1 8 - 4 8 - 10 7 - 4 7 - 6 3 - 7 5 - 3 5 - 4 9 - 10 5 - 8 5 - 7 5 - 9 4 - 8 5 - 6 2 - 3 5 - 4 5 - 10 9 - 2 6 - 6 7 - 10 7 - 7 7 - 3 8 - 6 6 - 8 9 - 6 1 - 7 5 - 5 2 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 9 39 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 9 H(I),W(I),I=1,...,N - 6 7 - 10 3 - 3 1 - 2 6 - 2 7 - 6 5 - 5 6 - 9 10 - 5 9 - 2 7 - 2 5 - 10 3 - 6 8 - 1 9 - 1 8 - 5 10 - 5 5 - 7 4 - 9 3 - 4 1 - 8 10 - 9 6 - 10 8 - 5 7 - 5 10 - 1 5 - 8 4 - 6 4 - 7 3 - 6 2 - 3 9 - 9 1 - 10 1 - 5 4 - 7 10 - 10 10 - 3 10 - 10 9 - 5 10 - 7 7 - 9 10 - 5 10 - 6 4 - 3 10 - 2 1 - 1 2 - 3 2 - 3 3 - 1 10 - 8 3 - 1 5 - 7 9 - 10 6 - 4 7 - 9 9 - 6 9 - 2 10 - 6 9 - 9 3 - 7 4 - 8 9 - 6 7 - 6 9 - 7 1 - 10 2 - 2 4 - 10 3 - 9 7 - 2 9 - 10 5 - 4 3 - 9 5 - 5 8 - 4 10 - 1 2 - 6 2 - 10 7 - 9 10 - 2 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 10 40 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 1 41 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - 10 6 - 6 4 - 10 3 - 9 6 - 6 3 - 7 6 - 9 2 - 7 7 - 4 6 - 10 3 - 8 7 - 2 1 - 2 10 - 9 3 - 10 2 - 4 2 - 1 4 - 9 8 - 6 10 - 4 10 - 9 9 - 9 4 - 7 2 - 9 7 - 3 3 - 1 10 - 10 10 - 7 8 - 6 3 - 1 8 - 2 8 - 1 2 - 3 2 - 8 4 - 7 7 - 6 2 - 4 6 - 3 7 - 9 7 - 10 3 - 10 3 - 2 8 - 8 6 - 7 4 - 7 5 - 5 4 - 5 1 - 2 7 - 4 8 - 4 3 - 7 4 - 3 7 - 3 1 - 3 8 - 2 3 - 3 5 - 10 6 - 2 3 - 3 4 - 7 1 - 3 9 - 4 9 - 5 1 - 4 1 - 5 9 - 3 6 - 6 9 - 7 5 - 2 7 - 9 2 - 10 6 - 1 4 - 2 10 - 3 3 - 1 6 - 5 4 - 7 6 - 2 4 - 10 4 - 1 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 2 42 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - 1 7 - 8 4 - 4 6 - 6 7 - 1 10 - 3 6 - 6 7 - 10 7 - 7 7 - 2 8 - 5 5 - 7 8 - 4 9 - 5 3 - 2 10 - 1 1 - 1 2 - 3 2 - 4 3 - 2 1 - 10 5 - 2 6 - 9 1 - 2 8 - 6 10 - 2 2 - 10 2 - 6 4 - 10 3 - 3 7 - 1 9 - 3 5 - 2 2 - 2 5 - 3 7 - 6 8 - 8 1 - 7 10 - 6 5 - 10 7 - 8 3 - 2 1 - 8 4 - 4 7 - 3 9 - 1 2 - 6 2 - 10 7 - 9 10 - 3 7 - 2 6 - 4 9 - 5 3 - 10 5 - 3 9 - 4 9 - 4 5 - 9 9 - 8 6 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 3 43 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - 5 4 - 5 8 - 1 8 - 4 2 - 1 2 - 1 3 - 4 6 - 8 1 - 1 4 - 5 5 - 5 5 - 2 1 - 6 10 - 3 5 - 3 3 - 8 10 - 2 1 - 5 10 - 10 7 - 1 2 - 8 3 - 3 8 - 1 9 - 1 2 - 6 5 - 1 2 - 5 6 - 10 8 - 2 10 - 9 6 - 5 7 - 3 3 - 4 2 - 10 1 - 5 3 - 10 3 - 6 10 - 8 7 - 6 9 - 2 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 4 44 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - 2 4 - 5 3 - 3 9 - 5 10 - 3 2 - 3 9 - 3 6 - 7 2 - 8 9 - 7 3 - 6 3 - 9 10 - 7 1 - 1 4 - 6 2 - 2 9 - 1 6 - 9 4 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 5 45 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 1 H(I),W(I),I=1,...,N - 9 1 - 5 6 - 8 5 - 5 10 - 5 6 - 9 4 - 2 4 - 5 2 - 9 8 - 10 4 - 6 4 - 1 8 - 8 9 - 8 1 - 3 4 - 7 10 - 10 4 - 5 6 - 6 6 - 3 2 - 8 2 - 6 9 - 7 6 - 2 5 - 7 6 - 1 6 - 6 3 - 8 10 - 5 1 - 1 7 - 10 8 - 1 2 - 6 6 - 2 3 - 6 8 - 2 1 - 5 3 - 3 10 - 9 1 - 8 9 - 10 8 - 6 7 - 2 10 - 7 1 - 4 9 - 7 7 - 5 9 - 2 6 - 2 9 - 4 3 - 1 9 - 8 7 - 3 3 - 10 7 - 4 10 - 3 9 - 10 1 - 1 9 - 4 10 - 1 1 - 9 4 - 5 6 - 2 6 - 7 4 - 6 8 - 4 6 - 5 10 - 9 6 - 7 3 - 5 2 - 1 9 - 10 4 - 4 9 - 5 8 - 3 2 - 5 1 - 9 3 - 7 5 - 6 10 - 3 3 - 2 3 - 7 3 - 9 8 - 7 8 - 10 3 - 4 10 - 3 7 - 2 8 - 1 7 - 10 3 - 6 8 - 7 9 - 7 4 - 10 4 - 5 7 - 5 10 - 4 6 - 3 10 - 3 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 6 46 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - 4 1 - 2 10 - 10 10 - 10 8 - 9 8 - 6 4 - 2 6 - 3 6 - 8 9 - 9 5 - 2 5 - 5 5 - 2 4 - 6 4 - 8 1 - 10 3 - 7 4 - 7 7 - 4 4 - 2 4 - 1 5 - 3 4 - 4 5 - 1 3 - 8 6 - 10 7 - 7 1 - 9 8 - 3 9 - 8 10 - 5 10 - 1 2 - 4 10 - 7 3 - 5 5 - 6 10 - 4 7 - 4 9 - 4 1 - 7 1 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 7 47 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 5 H(I),W(I),I=1,...,N - 5 4 - 1 1 - 9 6 - 3 10 - 3 9 - 1 1 - 2 1 - 5 2 - 3 3 - 1 7 - 8 10 - 7 10 - 1 10 - 2 4 - 10 3 - 2 7 - 6 4 - 5 1 - 3 1 - 1 9 - 10 5 - 5 10 - 6 10 - 5 4 - 8 5 - 2 7 - 1 9 - 1 5 - 9 9 - 8 10 - 4 1 - 6 6 - 6 6 - 8 2 - 4 10 - 3 8 - 3 9 - 2 8 - 2 5 - 9 1 - 1 3 - 1 8 - 5 10 - 10 3 - 1 7 - 1 3 - 10 8 - 1 1 - 8 3 - 1 9 - 2 10 - 6 7 - 2 6 - 3 6 - 4 8 - 4 8 - 5 6 - 3 4 - 6 6 - 8 3 - 2 5 - 9 10 - 1 5 - 2 3 - 3 9 - 5 2 - 3 5 - 7 4 - 9 6 - 4 4 - 2 4 - 4 4 - 6 8 - 7 10 - 9 9 - 8 7 - 3 1 - 5 9 - 1 2 - 9 8 - 2 4 - 5 3 - 6 1 - 9 5 - 9 9 - 4 9 - 7 2 - 4 1 - 2 2 - 6 4 - 9 9 - 1 2 - 5 2 - 5 2 - 10 7 - 4 5 - 1 10 - 10 7 - 4 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 8 48 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - 6 6 - 7 7 - 10 1 - 3 6 - 9 10 - 4 5 - 6 6 - 7 4 - 3 9 - 3 7 - 10 8 - 8 4 - 7 9 - 9 3 - 9 9 - 6 1 - 3 9 - 5 5 - 1 4 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 9 49 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 3 - 8 2 - 9 2 - 1 5 - 2 6 - 3 5 - 2 3 - 6 6 - 8 3 - 3 7 - 1 2 - 3 7 - 5 6 - 6 3 - 9 7 - 8 1 - 3 9 - 5 2 - 1 1 - 9 2 - 2 3 - 5 7 - 7 9 - 10 9 - 9 8 - 4 3 - 8 1 - 4 6 - 3 2 - 6 9 - 10 8 - 2 6 - 5 2 - 6 7 - 2 6 - 5 10 - 2 10 - 2 2 - 6 5 - 10 10 - 3 4 - 7 4 - 8 5 - 4 10 - 8 10 - 6 6 - 6 3 - 10 1 - 5 2 - 8 1 - 3 7 - 5 4 - 2 5 - 7 10 - 5 1 - 5 4 - 10 7 - 6 4 - 10 9 - 5 1 - 7 2 - 5 9 - 10 10 - 9 6 - 10 5 - 6 4 - 1 6 - 6 7 - 2 4 - 4 1 - 3 4 - 9 10 - 8 2 - 10 6 - 6 1 - 2 9 - 8 5 - 4 8 - 7 1 - 6 9 - 3 10 - 3 8 - 5 8 - 2 8 - 9 2 - 5 3 - 2 2 - 5 10 - 5 4 - 2 1 - 3 5 - 5 10 - 3 6 - 10 5 - 7 2 - 4 6 - 9 10 - 9 9 - 6 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 10 50 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 9 H(I),W(I),I=1,...,N - 3 3 - 1 10 - 6 9 - 3 2 - 10 6 - 10 9 - 8 3 - 4 3 - 9 9 - 9 3 - 4 1 - 4 1 - 6 5 - 9 9 - 4 2 - 8 7 - 10 8 - 1 5 - 6 9 - 6 7 - 2 8 - 10 3 - 8 7 - 9 1 - 9 6 - 6 8 - 2 5 - 4 1 - 9 9 - 10 10 - 4 5 - 8 1 - 4 5 - 10 1 - 2 3 - 4 1 - 1 7 - 2 6 - 9 8 - 8 4 - 7 10 - 10 5 - 10 1 - 9 4 - 6 2 - 9 9 - 5 9 - 8 1 - 2 7 - 7 4 - 5 9 - 2 6 - 5 10 - 8 9 - 7 6 - 9 6 - 7 9 - 7 6 - 8 3 - 1 9 - 3 7 - 2 1 - 1 10 - 5 8 - 2 9 - 6 2 - 2 10 - 8 8 - 7 4 - 5 2 - 10 7 - 4 7 - 3 5 - 6 7 - 6 1 - 7 9 - 9 8 - 4 5 - 7 3 - 7 9 - 7 9 - 2 8 - 1 10 - 6 5 - 3 4 - 10 3 - 10 10 - 9 10 - 5 5 - 10 7 - 1 6 - 5 9 - 6 4 - 8 3 - 1 3 - 7 1 - 1 1 - 3 7 - 4 10 - + 1 PROBLEM CLASS + 20 N. OF ITEMS + 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 9 5 H(I),W(I),I=1,...,N + 2 4 + 6 10 + 7 5 + 3 6 + 7 10 + 5 1 + 5 3 + 9 6 + 4 2 + 7 6 + 2 7 + 3 8 + 10 4 + 5 4 + 3 10 + 3 8 + 8 7 + 3 8 + 7 8 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 2 2 H(I),W(I),I=1,...,N + 8 6 + 2 10 + 3 1 + 4 8 + 10 3 + 9 1 + 5 1 + 3 6 + 1 1 + 2 4 + 2 9 + 9 1 + 5 9 + 7 4 + 2 2 + 4 3 + 7 9 + 1 4 + 8 9 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 5 7 H(I),W(I),I=1,...,N + 6 10 + 6 5 + 2 7 + 8 4 + 10 9 + 5 8 + 6 8 + 9 4 + 3 9 + 10 3 + 5 9 + 7 1 + 9 8 + 6 4 + 6 3 + 3 4 + 2 10 + 1 6 + 4 1 diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index a5e6cec283..cdfc476ed8 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -253,6 +253,11 @@ inline FloatType Interpolate(FloatType x, FloatType y, FloatType alpha) { return alpha * x + (1 - alpha) * y; } +inline int fast_ilogb(double value) { return ilogb(value); } +inline double fast_scalbn(double value, int exponent) { + return scalbn(value, exponent); +} + } // namespace operations_research #endif // OR_TOOLS_UTIL_FP_UTILS_H_ From ce6e36569064bcb2ec3f87b44c6b8072019dbe28 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:55:25 +0200 Subject: [PATCH 145/509] pdlp: Add README.md --- ortools/pdlp/README.md | 59 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 ortools/pdlp/README.md diff --git a/ortools/pdlp/README.md b/ortools/pdlp/README.md new file mode 100644 index 0000000000..b770f265a3 --- /dev/null +++ b/ortools/pdlp/README.md @@ -0,0 +1,59 @@ +# Primal-Dual Hybrid Gradient Solver (PDLP) + +This directory contains PDLP, a library for solving linear programming (LP) and +quadratic programming (QP) problems using first-order methods. + +The implementation is based on the Primal-Dual Hybrid Gradient (PDHG) algorithm, +which is preprocessed with scaling and optional presolving to improve +performance and numerical stability. + +## Core C++ libraries: + +* [`primal_dual_hybrid_gradient.h`][primal_dual_hybrid_gradient_h]: The main + entry point for the solver, which takes a `QuadraticProgram` and solver + parameters. + +* [`quadratic_program.h`][quadratic_program_h]: Defines the `QuadraticProgram` + struct to represent the optimization problem, including objective vectors, + constraint matrices, and bounds. + +* [`quadratic_program_io.h`][quadratic_program_io_h]: Provides utilities to read + quadratic programs from various file formats, including MPS and MPModelProto. + +* [`sharded_quadratic_program.h`][sharded_quadratic_program_h] and + [`sharder.h`][sharder_h]: These provide the infrastructure for sharding + problem data and performing parallel computations. + +* [`scheduler.h`][scheduler_h]: A thread scheduling interface that supports + multiple backends (e.g. Eigen's thread pools). + +* [`iteration_stats.h`][iteration_stats_h] and + [`termination.h`][termination_h]: Contain logic for computing convergence and + infeasibility statistics and checking termination criteria. + +## Configuration and Logging + +* [`solvers.proto`][solvers_proto]: Defines the `PrimalDualHybridGradientParams` + message for configuring the solver, including termination criteria, + algorithmic choices like restart strategies, and linesearch rules. +* [`solve_log.proto`][solve_log_proto]: Defines messages for logging the + solver's progress and final result, such as `IterationStats` and `SolveLog`. + +## Wrappers and Samples + +* [`python/`](python): Contains the `pybind11` wrapper to expose the C++ library + to Python, along with its build definitions and tests. +* [`samples/`](samples): This directory provides example usage of the library. + + + +[primal_dual_hybrid_gradient_h]: ../pdlp/primal_dual_hybrid_gradient.h +[quadratic_program_h]: ../pdlp/quadratic_program.h +[quadratic_program_io_h]: ../pdlp/quadratic_program_io.h +[sharded_quadratic_program_h]: ../pdlp/sharded_quadratic_program.h +[sharder_h]: ../pdlp/sharder.h +[scheduler_h]: ../pdlp/scheduler.h +[iteration_stats_h]: ../pdlp/iteration_stats.h +[termination_h]: ../pdlp/termination.h +[solvers_proto]: ../pdlp/solvers.proto +[solve_log_proto]: ../pdlp/solve_log.proto From bbcb7a8cc8c417dd73afb60e2eb30205a07330d2 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:55:25 +0200 Subject: [PATCH 146/509] update README.md --- ortools/graph/README.md | 109 +++++++++++++++++----------------------- ortools/pdlp/README.md | 6 --- 2 files changed, 47 insertions(+), 68 deletions(-) diff --git a/ortools/graph/README.md b/ortools/graph/README.md index 4831200355..7a070bed18 100644 --- a/ortools/graph/README.md +++ b/ortools/graph/README.md @@ -5,76 +5,64 @@ flow problems. It contains in particular: -* well-tuned algorithms (for example, shortest paths and - [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path)). -* hard-to-find algorithms (Hamiltonian paths, push-relabel flow algorithms). -* other, more common algorithms, that are useful to use with graphs from - `util/graph`. +* well-tuned algorithms (for example, shortest paths and + [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path)). +* hard-to-find algorithms (Hamiltonian paths, push-relabel flow algorithms). +* other, more common algorithms, that are useful to use with graphs from + `util/graph`. Generic algorithms for shortest paths: -* [`bounded_dijkstra.h`][bounded_dijkstra_h]: entry point for shortest paths. - This is the preferred implementation for most needs. - -* [`bidirectional_dijkstra.h`][bidirectional_dijkstra_h]: for large graphs, - this implementation might be faster than `bounded_dijkstra.h`. - -* [`shortest_paths.h`][shortest_paths_h]: shortest paths that are computed in - parallel (only if there are several sources). Includes - [Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm) and - [Bellman-Ford](https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm) - algorithms. *Although its name is very generic, only use this implementation - if parallelism makes sense.* +* [`bounded_dijkstra.h`][bounded_dijkstra_h]: entry point for shortest paths. + This is the preferred implementation for most needs. +* [`bidirectional_dijkstra.h`][bidirectional_dijkstra_h]: for large graphs, + this implementation might be faster than `bounded_dijkstra.h`. +* [`shortest_paths.h`][shortest_paths_h]: shortest paths that are computed in + parallel (only if there are several sources). Includes + [Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm) and + [Bellman-Ford](https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm) + algorithms. *Although its name is very generic, only use this implementation + if parallelism makes sense.* Specific algorithms for paths: -* [`dag_shortest_path.h`][dag_shortest_path_h]: shortest paths on directed - acyclic graphs. If you have such a graph, this implementation is likely to - be the fastest. Unlike most implementations, these algorithms have two - interfaces: a "simple" one (list of edges and weights) and a standard one - (taking as input a graph data structure from - [`//ortools/graph/graph.h`][graph_h]). - -* [`dag_constrained_shortest_path.`][dag_constrained_shortest_path_h]: - shortest paths on directed acyclic graphs with resource constraints. - -* [`hamiltonian_path.h`][hamiltonian_path_h]: entry point for computing - minimum [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path) - and cycles on directed graphs with costs on arcs, using a - dynamic-programming algorithm. - -* [`eulerian_path.h`][eulerian_path_h]: entry point for computing minimum - [Eulerian paths](https://en.wikipedia.org/wiki/Eulerian_path) and cycles on - undirected graphs. +* [`dag_shortest_path.h`][dag_shortest_path_h]: shortest paths on directed + acyclic graphs. If you have such a graph, this implementation is likely to be + the fastest. Unlike most implementations, these algorithms have two interfaces + : a "simple" one (list of edges and weights) and a standard one (taking as + input a graph data structure from [`//ortools/graph/graph.h`][graph_h]). +* [`dag_constrained_shortest_path.`][dag_constrained_shortest_path_h]: shortest + paths on directed acyclic graphs with resource constraints. +* [`hamiltonian_path.h`][hamiltonian_path_h]: entry point for computing minimum + [Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path) and cycles + on directed graphs with costs on arcs, using a dynamic-programming algorithm. +* [`eulerian_path.h`][eulerian_path_h]: entry point for computing minimum + [Eulerian paths](https://en.wikipedia.org/wiki/Eulerian_path) and cycles on + undirected graphs. Graph decompositions: * [`connected_components.h`][connected_components_h]: entry point for computing connected components in an undirected graph. (It does not need an explicit graph class.) - * [`strongly_connected_components.h`][strongly_connected_components_h]: entry point for computing the strongly connected components of a directed graph. - -* [`cliques.h`][cliques_h]: entry point for computing maximum cliques and - clique covers in a directed graph, based on the Bron-Kerbosch algorithm.(It - does not need an explicit graph class.) +* [`cliques.h`][cliques_h]: entry point for computing maximum cliques and clique + covers in a directed graph, based on the Bron-Kerbosch algorithm.(It does not + need an explicit graph class.) Flow algorithms: -* [`linear_assignment.h`][linear_assignment_h]: entry point for solving linear - sum assignment problems (classical assignment problems where the total cost - is the sum of the costs of each arc used) on directed graphs with arc costs, - based on the Goldberg-Kennedy push-relabel algorithm. - -* [`max_flow.h`][max_flow_h]: entry point for computing maximum flows on - directed graphs with arc capacities, based on the Goldberg-Tarjan - push-relabel algorithm. - -* [`min_cost_flow.h`][min_cost_flow_h]: entry point for computing minimum-cost - flows on directed graphs with arc capacities, arc costs, and - supplies/demands at nodes, based on the Goldberg-Tarjan push-relabel - algorithm. +* [`linear_assignment.h`][linear_assignment_h]: entry point for solving linear + sum assignment problems (classical assignment problems where the total cost is + the sum of the costs of each arc used) on directed graphs with arc costs, + based on the Goldberg-Kennedy push-relabel algorithm. +* [`max_flow.h`][max_flow_h]: entry point for computing maximum flows on + directed graphs with arc capacities, based on the Goldberg-Tarjan push-relabel + algorithm. +* [`min_cost_flow.h`][min_cost_flow_h]: entry point for computing minimum-cost + flows on directed graphs with arc capacities, arc costs, and supplies/demands + at nodes, based on the Goldberg-Tarjan push-relabel algorithm. ## Class design @@ -201,21 +189,18 @@ node. ## Wrappers -* [`python`](python): the SWIG code that makes the wrapper available in Python - and its unit tests. - -* [`java`](java): the SWIG code that makes the wrapper available in Java and - its unit tests. - -* [`csharp`](csharp): the SWIG code that makes the wrapper available in C# and - its unit tests. +* [`python`](python): This directory contains the `pybind11` bindings to make + these C++ libraries available in Python. +* [`java`](java): the SWIG code that makes the wrapper available in Java and its + unit tests. +* [`csharp`](csharp): the SWIG code that makes the wrapper available in C# and + its unit tests. ## Samples You can find some canonical examples in [`samples`][samples]. - [graph_h]: ../graph/graph.h [bounded_dijkstra_h]: ../graph/bounded_dijkstra.h [bidirectional_dijkstra_h]: ../graph/bidirectional_dijkstra.h diff --git a/ortools/pdlp/README.md b/ortools/pdlp/README.md index b770f265a3..6a0245f557 100644 --- a/ortools/pdlp/README.md +++ b/ortools/pdlp/README.md @@ -12,21 +12,16 @@ performance and numerical stability. * [`primal_dual_hybrid_gradient.h`][primal_dual_hybrid_gradient_h]: The main entry point for the solver, which takes a `QuadraticProgram` and solver parameters. - * [`quadratic_program.h`][quadratic_program_h]: Defines the `QuadraticProgram` struct to represent the optimization problem, including objective vectors, constraint matrices, and bounds. - * [`quadratic_program_io.h`][quadratic_program_io_h]: Provides utilities to read quadratic programs from various file formats, including MPS and MPModelProto. - * [`sharded_quadratic_program.h`][sharded_quadratic_program_h] and [`sharder.h`][sharder_h]: These provide the infrastructure for sharding problem data and performing parallel computations. - * [`scheduler.h`][scheduler_h]: A thread scheduling interface that supports multiple backends (e.g. Eigen's thread pools). - * [`iteration_stats.h`][iteration_stats_h] and [`termination.h`][termination_h]: Contain logic for computing convergence and infeasibility statistics and checking termination criteria. @@ -46,7 +41,6 @@ performance and numerical stability. * [`samples/`](samples): This directory provides example usage of the library. - [primal_dual_hybrid_gradient_h]: ../pdlp/primal_dual_hybrid_gradient.h [quadratic_program_h]: ../pdlp/quadratic_program.h [quadratic_program_io_h]: ../pdlp/quadratic_program_io.h From 6af8bd54b4728cd61afcdfb167e217cfae8036c0 Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Wed, 2 Jul 2025 12:20:11 +0000 Subject: [PATCH 147/509] Turn some `.i` files into `.swig` This helps with the import / export process. --- cmake/dotnet.cmake | 1 + cmake/java.cmake | 1 + cmake/python.cmake | 1 + ortools/algorithms/java/BUILD.bazel | 2 +- ortools/algorithms/java/CMakeLists.txt | 10 +++++----- ...knapsack_solver.i => knapsack_solver.swig} | 2 +- ortools/base/python-swig.h | 2 +- ortools/constraint_solver/java/CMakeLists.txt | 10 +++++----- ...traint_solver.i => constraint_solver.swig} | 8 ++++---- .../constraint_solver/python/CMakeLists.txt | 10 +++++----- ...traint_solver.i => constraint_solver.swig} | 20 +++++++++---------- .../python/constraint_solver_doc.h | 2 +- ...lpers.i => constraint_solver_helpers.swig} | 2 +- .../{pywrapcp_util.i => pywrapcp_util.swig} | 6 +++--- ortools/graph/csharp/graph.i | 2 +- ortools/graph/java/BUILD.bazel | 2 +- ortools/graph/java/CMakeLists.txt | 10 +++++----- ortools/graph/java/{graph.i => graph.swig} | 2 +- ortools/init/java/init.i | 2 +- ortools/init/python/init_test.py | 2 +- ortools/linear_solver/java/BUILD.bazel | 2 +- ortools/linear_solver/java/CMakeLists.txt | 20 +++++++++---------- .../{linear_solver.i => linear_solver.swig} | 4 ++-- .../{modelbuilder.i => modelbuilder.swig} | 4 ++-- ortools/linear_solver/python/CMakeLists.txt | 8 ++++---- .../{linear_solver.i => linear_solver.swig} | 4 ++-- .../python/linear_solver_natural_api.py | 2 +- ortools/linear_solver/python/pywraplp_test.py | 2 +- ortools/routing/java/CMakeLists.txt | 10 +++++----- .../{index_manager.i => index_manager.swig} | 2 +- .../routing/java/{routing.i => routing.swig} | 8 ++++---- ortools/routing/java/{types.i => types.swig} | 2 +- ortools/routing/python/CMakeLists.txt | 10 +++++----- .../{index_manager.i => index_manager.swig} | 4 ++-- .../python/{routing.i => routing.swig} | 16 +++++++-------- .../routing/python/{types.i => types.swig} | 2 +- ortools/sat/java/BUILD.bazel | 2 +- ortools/sat/java/CMakeLists.txt | 10 +++++----- ortools/sat/java/{sat.i => sat.swig} | 4 ++-- ortools/util/functions_swig_helpers.h | 2 +- ortools/util/java/BUILD.bazel | 8 ++++---- ortools/util/java/CMakeLists.txt | 10 +++++----- ...sl_string_view.i => absl_string_view.swig} | 2 +- .../util/java/{functions.i => functions.swig} | 0 ...erval_list.i => sorted_interval_list.swig} | 2 +- .../util/java/{tuple_set.i => tuple_set.swig} | 2 +- ortools/util/java/{vector.i => vector.swig} | 0 .../python/{functions.i => functions.swig} | 0 ortools/util/python/{pair.i => pair.swig} | 0 ortools/util/python/{vector.i => vector.swig} | 0 50 files changed, 121 insertions(+), 118 deletions(-) rename ortools/algorithms/java/{knapsack_solver.i => knapsack_solver.swig} (98%) rename ortools/constraint_solver/java/{constraint_solver.i => constraint_solver.swig} (99%) rename ortools/constraint_solver/python/{constraint_solver.i => constraint_solver.swig} (99%) rename ortools/constraint_solver/python/{constraint_solver_helpers.i => constraint_solver_helpers.swig} (97%) rename ortools/constraint_solver/python/{pywrapcp_util.i => pywrapcp_util.swig} (92%) rename ortools/graph/java/{graph.i => graph.swig} (99%) rename ortools/linear_solver/java/{linear_solver.i => linear_solver.swig} (99%) rename ortools/linear_solver/java/{modelbuilder.i => modelbuilder.swig} (99%) rename ortools/linear_solver/python/{linear_solver.i => linear_solver.swig} (99%) rename ortools/routing/java/{index_manager.i => index_manager.swig} (97%) rename ortools/routing/java/{routing.i => routing.swig} (99%) rename ortools/routing/java/{types.i => types.swig} (98%) rename ortools/routing/python/{index_manager.i => index_manager.swig} (95%) rename ortools/routing/python/{routing.i => routing.swig} (93%) rename ortools/routing/python/{types.i => types.swig} (98%) rename ortools/sat/java/{sat.i => sat.swig} (98%) rename ortools/util/java/{absl_string_view.i => absl_string_view.swig} (99%) rename ortools/util/java/{functions.i => functions.swig} (100%) rename ortools/util/java/{sorted_interval_list.i => sorted_interval_list.swig} (98%) rename ortools/util/java/{tuple_set.i => tuple_set.swig} (97%) rename ortools/util/java/{vector.i => vector.swig} (100%) rename ortools/util/python/{functions.i => functions.swig} (100%) rename ortools/util/python/{pair.i => pair.swig} (100%) rename ortools/util/python/{vector.i => vector.swig} (100%) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 12ead2117a..e241e01c26 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -20,6 +20,7 @@ if(NOT TARGET ${PROJECT_NAMESPACE}::ortools) endif() # Will need swig +set(SWIG_SOURCE_FILE_EXTENSIONS ".i" ".swig") set(CMAKE_SWIG_FLAGS) find_package(SWIG REQUIRED) include(UseSWIG) diff --git a/cmake/java.cmake b/cmake/java.cmake index ae7f023c9d..bd73e09dd2 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -20,6 +20,7 @@ if(NOT TARGET ${PROJECT_NAMESPACE}::ortools) endif() # Will need swig +set(SWIG_SOURCE_FILE_EXTENSIONS ".i" ".swig") set(CMAKE_SWIG_FLAGS) find_package(SWIG REQUIRED) include(UseSWIG) diff --git a/cmake/python.cmake b/cmake/python.cmake index 6d4fbde01e..d7173f9abe 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -23,6 +23,7 @@ if(NOT TARGET ${PROJECT_NAMESPACE}::ortools) endif() # Will need swig +set(SWIG_SOURCE_FILE_EXTENSIONS ".i" ".swig") set(CMAKE_SWIG_FLAGS) find_package(SWIG REQUIRED) include(UseSWIG) diff --git a/ortools/algorithms/java/BUILD.bazel b/ortools/algorithms/java/BUILD.bazel index dd3cb9da80..5b84c09b2f 100644 --- a/ortools/algorithms/java/BUILD.bazel +++ b/ortools/algorithms/java/BUILD.bazel @@ -17,7 +17,7 @@ load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") ortools_java_wrap_cc( name = "knapsacksolver", - src = "knapsack_solver.i", + src = "knapsack_solver.swig", module = "operations_research_algorithms", package = "com.google.ortools.algorithms", swig_includes = [ diff --git a/ortools/algorithms/java/CMakeLists.txt b/ortools/algorithms/java/CMakeLists.txt index cfae6f4161..82c9e26691 100644 --- a/ortools/algorithms/java/CMakeLists.txt +++ b/ortools/algorithms/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE knapsack_solver.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE knapsack_solver.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE knapsack_solver.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE knapsack_solver.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE knapsack_solver.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE knapsack_solver.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE knapsack_solver.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE knapsack_solver.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.algorithms) swig_add_library(jnialgorithms TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/algorithms - SOURCES knapsack_solver.i) + SOURCES knapsack_solver.swig) target_include_directories(jnialgorithms PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnialgorithms PROPERTIES diff --git a/ortools/algorithms/java/knapsack_solver.i b/ortools/algorithms/java/knapsack_solver.swig similarity index 98% rename from ortools/algorithms/java/knapsack_solver.i rename to ortools/algorithms/java/knapsack_solver.swig index 429098139e..17b4d45c42 100644 --- a/ortools/algorithms/java/knapsack_solver.i +++ b/ortools/algorithms/java/knapsack_solver.swig @@ -21,7 +21,7 @@ %include "enums.swg" %include "ortools/base/base.i" -%import "ortools/util/java/vector.i" +%import "ortools/util/java/vector.swig" %{ #include "ortools/algorithms/knapsack_solver.h" diff --git a/ortools/base/python-swig.h b/ortools/base/python-swig.h index 8c7003691a..38dc43b8f5 100644 --- a/ortools/base/python-swig.h +++ b/ortools/base/python-swig.h @@ -13,7 +13,7 @@ // Static part of SWIG-generated C++ wrapper for Python (_module_name.cc). // -// This file should only be included in base.i inside Python-specific part: +// This file should only be included in base.swig inside Python-specific part: // #ifdef SWIGPYTHON // %{ // #include "ortools/base/swig/python-swig.cc" diff --git a/ortools/constraint_solver/java/CMakeLists.txt b/ortools/constraint_solver/java/CMakeLists.txt index e111667437..61e0ae1957 100644 --- a/ortools/constraint_solver/java/CMakeLists.txt +++ b/ortools/constraint_solver/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE constraint_solver.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE constraint_solver.i PROPERTY SWIG_MODULE_NAME Globals) -set_property(SOURCE constraint_solver.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE constraint_solver.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE constraint_solver.swig PROPERTY SWIG_MODULE_NAME Globals) +set_property(SOURCE constraint_solver.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE constraint_solver.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE constraint_solver.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.constraintsolver) swig_add_library(jniconstraint_solver TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/constraintsolver - SOURCES constraint_solver.i) + SOURCES constraint_solver.swig) target_include_directories(jniconstraint_solver PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jniconstraint_solver PROPERTIES diff --git a/ortools/constraint_solver/java/constraint_solver.i b/ortools/constraint_solver/java/constraint_solver.swig similarity index 99% rename from ortools/constraint_solver/java/constraint_solver.i rename to ortools/constraint_solver/java/constraint_solver.swig index 88f8540460..f27f5415ff 100644 --- a/ortools/constraint_solver/java/constraint_solver.i +++ b/ortools/constraint_solver/java/constraint_solver.swig @@ -20,9 +20,9 @@ %include "exception.i" %include "ortools/base/base.i" -%include "ortools/util/java/absl_string_view.i" -%include "ortools/util/java/tuple_set.i" -%include "ortools/util/java/vector.i" +%include "ortools/util/java/absl_string_view.swig" +%include "ortools/util/java/tuple_set.swig" +%include "ortools/util/java/vector.swig" %include "ortools/util/java/proto.i" // Make the SWIG-generated constructor public. @@ -82,7 +82,7 @@ class GlobalRefGuard { %} // ############ BEGIN DUPLICATED CODE BLOCK ############ -// IMPORTANT: keep this code block in sync with the .i +// IMPORTANT: keep this code block in sync with the .swig // files in ../python and ../csharp. // Protect from failure. diff --git a/ortools/constraint_solver/python/CMakeLists.txt b/ortools/constraint_solver/python/CMakeLists.txt index 8fe4444aa7..f40645ab1b 100644 --- a/ortools/constraint_solver/python/CMakeLists.txt +++ b/ortools/constraint_solver/python/CMakeLists.txt @@ -32,16 +32,16 @@ target_link_libraries(constraint_solver_pybind11 PRIVATE add_library(${PROJECT_NAMESPACE}::constraint_solver_pybind11 ALIAS constraint_solver_pybind11) # legacy pywrapcp -set_property(SOURCE constraint_solver.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE constraint_solver.i PROPERTY SWIG_MODULE_NAME pywrapcp) -set_property(SOURCE constraint_solver.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE constraint_solver.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE constraint_solver.swig PROPERTY SWIG_MODULE_NAME pywrapcp) +set_property(SOURCE constraint_solver.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE constraint_solver.i PROPERTY COMPILE_OPTIONS -nofastunpack) +set_property(SOURCE constraint_solver.swig PROPERTY COMPILE_OPTIONS -nofastunpack) swig_add_library(pywrapcp TYPE MODULE LANGUAGE python OUTPUT_DIR ${PYTHON_PROJECT_DIR}/constraint_solver - SOURCES constraint_solver.i) + SOURCES constraint_solver.swig) target_include_directories(pywrapcp PRIVATE ${Python3_INCLUDE_DIRS}) set_property(TARGET pywrapcp PROPERTY SWIG_USE_TARGET_INCLUDE_DIRECTORIES ON) diff --git a/ortools/constraint_solver/python/constraint_solver.i b/ortools/constraint_solver/python/constraint_solver.swig similarity index 99% rename from ortools/constraint_solver/python/constraint_solver.i rename to ortools/constraint_solver/python/constraint_solver.swig index 498e4af146..c4e8c5bb04 100644 --- a/ortools/constraint_solver/python/constraint_solver.i +++ b/ortools/constraint_solver/python/constraint_solver.swig @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i file exposes the code declared in ../constraint_solver.h and +// This .swig file exposes the code declared in ../constraint_solver.h and // ../constraint_solveri.h. // // It is particularly complex for a swig file, mostly because it contains a @@ -35,12 +35,12 @@ %include "ortools/util/python/proto.i" // PY_CONVERT_HELPER_* macros. -%include "ortools/constraint_solver/python/constraint_solver_helpers.i" +%include "ortools/constraint_solver/python/constraint_solver_helpers.swig" -%include "ortools/util/python/functions.i" -%include "ortools/util/python/vector.i" +%include "ortools/util/python/functions.swig" +%include "ortools/util/python/vector.swig" -%include "ortools/constraint_solver/python/pywrapcp_util.i" +%include "ortools/constraint_solver/python/pywrapcp_util.swig" // We *do* need to use SWIGTYPE_... type names directly, because the // (recommended replacement) $descriptor macro fails, as of 2019-07, with @@ -103,7 +103,7 @@ struct FailureProtect { // ============= Type conversions ============== -// See ./constraint_solver_helpers.i +// See ./constraint_solver_helpers.swig PY_CONVERT_HELPER_PTR(Constraint); PY_CONVERT_HELPER_PTR(Decision); PY_CONVERT_HELPER_PTR(DecisionBuilder); @@ -131,7 +131,7 @@ PY_CONVERT(LocalSearchFilter); PY_CONVERT(LocalSearchFilterManager); // Support passing std::function as argument. -// See ../utils/python/functions.i, from which this was copied and adapted. +// See ../utils/python/functions.swig, from which this was copied and adapted. %{ static void PyFunctionSolverToVoid(PyObject* pyfunc, @@ -647,10 +647,10 @@ PY_STRINGIFY_DEBUGSTRING(Decision); } // ############ BEGIN DUPLICATED CODE BLOCK ############ -// IMPORTANT: keep this code block in sync with the .i +// IMPORTANT: keep this code block in sync with the .swig // files in ../java and ../csharp. // TODO(user): extract this duplicated code into a common, multi-language -// .i file with SWIG_exception. +// .swig file with SWIG_exception. // Protect from failure. // TODO(user): document this further. @@ -1911,7 +1911,7 @@ RENAME_ASSIGNMENT_CONTAINER( } // namespace operations_research -// ================= constraint_solver.i API ===================== +// ================= constraint_solver.swig API ===================== namespace operations_research { diff --git a/ortools/constraint_solver/python/constraint_solver_doc.h b/ortools/constraint_solver/python/constraint_solver_doc.h index c8dff6346f..73d207c446 100644 --- a/ortools/constraint_solver/python/constraint_solver_doc.h +++ b/ortools/constraint_solver/python/constraint_solver_doc.h @@ -5715,7 +5715,7 @@ static const char* __doc_operations_research_Solver_SetUseFastLocalSearch = R"doc(enabled for metaheuristics. Disables/enables fast local search.)doc"; static const char* __doc_operations_research_Solver_ShouldFail = - R"doc(See http://cs/file:constraint_solver.i%20ShouldFail.)doc"; + R"doc(See http://cs/file:constraint_solver.swig%20ShouldFail.)doc"; static const char* __doc_operations_research_Solver_Solve = R"doc(@{ Solves the problem using the given DecisionBuilder and returns true diff --git a/ortools/constraint_solver/python/constraint_solver_helpers.i b/ortools/constraint_solver/python/constraint_solver_helpers.swig similarity index 97% rename from ortools/constraint_solver/python/constraint_solver_helpers.i rename to ortools/constraint_solver/python/constraint_solver_helpers.swig index 22f0e21189..576a912ec3 100644 --- a/ortools/constraint_solver/python/constraint_solver_helpers.i +++ b/ortools/constraint_solver/python/constraint_solver_helpers.swig @@ -32,7 +32,7 @@ PyObject* PyObjFrom(const int64_t& c) { // Conversion of IntExpr* and IntVar* are a bit special because of the two // possible casts from IntExpr and Constraint. We define them here because -// they are used by both constraint_solver.i and routing.i, but need to +// they are used by both constraint_solver.swig and routing.swig, but need to // be defined at a point where IntVar/IntExpr are known. %define PY_CONVERT_HELPER_INTEXPR_AND_INTVAR() %{ diff --git a/ortools/constraint_solver/python/pywrapcp_util.i b/ortools/constraint_solver/python/pywrapcp_util.swig similarity index 92% rename from ortools/constraint_solver/python/pywrapcp_util.i rename to ortools/constraint_solver/python/pywrapcp_util.swig index 9ecb76dd59..dedd8006b9 100644 --- a/ortools/constraint_solver/python/pywrapcp_util.i +++ b/ortools/constraint_solver/python/pywrapcp_util.swig @@ -11,11 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i file cannot be used in isolation! -// It represents some of the inlined C++ content of ./constraint_solver.i, +// This .swig file cannot be used in isolation! +// It represents some of the inlined C++ content of ./constraint_solver.swig, // and was split out because it's a large enough chunk of C++ code. // -// It can only be interpreted in the context of ./constraint_solver.i, where +// It can only be interpreted in the context of ./constraint_solver.swig, where // it is included. %{ #include diff --git a/ortools/graph/csharp/graph.i b/ortools/graph/csharp/graph.i index d51b07c3b5..791c322513 100644 --- a/ortools/graph/csharp/graph.i +++ b/ortools/graph/csharp/graph.i @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i file is only used in the open-source export to OR-Tools. +// This .swig file is only used in the open-source export to OR-Tools. // // It exposes some of the C++ classes in ../, namely : // - SimpleMaxFlow, from ../max_flow.h diff --git a/ortools/graph/java/BUILD.bazel b/ortools/graph/java/BUILD.bazel index 25e7ba18ae..4818a86092 100644 --- a/ortools/graph/java/BUILD.bazel +++ b/ortools/graph/java/BUILD.bazel @@ -19,7 +19,7 @@ load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") ortools_java_wrap_cc( name = "graph", - src = "graph.i", + src = "graph.swig", module = "graph", package = "com.google.ortools.graph", swig_includes = [ diff --git a/ortools/graph/java/CMakeLists.txt b/ortools/graph/java/CMakeLists.txt index 6054afd20d..0dfe048636 100644 --- a/ortools/graph/java/CMakeLists.txt +++ b/ortools/graph/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE graph.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE graph.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE graph.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE graph.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE graph.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE graph.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE graph.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE graph.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.graph) swig_add_library(jnigraph TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/graph - SOURCES graph.i) + SOURCES graph.swig) target_include_directories(jnigraph PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnigraph PROPERTIES diff --git a/ortools/graph/java/graph.i b/ortools/graph/java/graph.swig similarity index 99% rename from ortools/graph/java/graph.i rename to ortools/graph/java/graph.swig index db096aa644..222e968fbe 100644 --- a/ortools/graph/java/graph.i +++ b/ortools/graph/java/graph.swig @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i files exposes some of the C++ classes in ../, namely : +// This .swig files exposes some of the C++ classes in ../, namely : // - MaxFlow, from SimpleMaxFlow in ../max_flow.h // - MinCostFlow, from SimpleMinCostFlow in ../min_cost_flow.h // - LinearSumAssignment, from SimpleLinearSumAssignment in ../assignment.h diff --git a/ortools/init/java/init.i b/ortools/init/java/init.i index 5f16276062..d042d3afab 100644 --- a/ortools/init/java/init.i +++ b/ortools/init/java/init.i @@ -15,7 +15,7 @@ %include "std_string.i" -%include "ortools/util/java/absl_string_view.i" +%include "ortools/util/java/absl_string_view.swig" %{ #include "ortools/init/init.h" diff --git a/ortools/init/python/init_test.py b/ortools/init/python/init_test.py index 18708e4ffa..9988e6b6b7 100755 --- a/ortools/init/python/init_test.py +++ b/ortools/init/python/init_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Simple unit tests for python/init.i. Not exhaustive.""" +"""Simple unit tests for python/init.swig. Not exhaustive.""" from absl.testing import absltest from ortools.init.python import init diff --git a/ortools/linear_solver/java/BUILD.bazel b/ortools/linear_solver/java/BUILD.bazel index cad9cc2a3a..0cf3c00226 100644 --- a/ortools/linear_solver/java/BUILD.bazel +++ b/ortools/linear_solver/java/BUILD.bazel @@ -19,7 +19,7 @@ load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") ortools_java_wrap_cc( name = "modelbuilder", - src = "modelbuilder.i", + src = "modelbuilder.swig", module = "modelbuilder", package = "com.google.ortools.modelbuilder", swig_includes = [ diff --git a/ortools/linear_solver/java/CMakeLists.txt b/ortools/linear_solver/java/CMakeLists.txt index 9b10ca3b96..a09642c0d7 100644 --- a/ortools/linear_solver/java/CMakeLists.txt +++ b/ortools/linear_solver/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE linear_solver.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE linear_solver.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE linear_solver.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE linear_solver.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE linear_solver.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE linear_solver.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE linear_solver.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE linear_solver.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.linearsolver) swig_add_library(jnilinear_solver TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/linearsolver - SOURCES linear_solver.i) + SOURCES linear_solver.swig) target_include_directories(jnilinear_solver PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnilinear_solver PROPERTIES @@ -29,17 +29,17 @@ set_target_properties(jnilinear_solver PROPERTIES POSITION_INDEPENDENT_CODE ON) target_link_libraries(jnilinear_solver PRIVATE ortools::ortools) -set_property(SOURCE modelbuilder.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE modelbuilder.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE modelbuilder.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE modelbuilder.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE modelbuilder.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE modelbuilder.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE modelbuilder.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE modelbuilder.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.modelbuilder) swig_add_library(jnimodelbuilder TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/modelbuilder - SOURCES modelbuilder.i) + SOURCES modelbuilder.swig) target_include_directories(jnimodelbuilder PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnimodelbuilder PROPERTIES diff --git a/ortools/linear_solver/java/linear_solver.i b/ortools/linear_solver/java/linear_solver.swig similarity index 99% rename from ortools/linear_solver/java/linear_solver.i rename to ortools/linear_solver/java/linear_solver.swig index 5c4b24c415..958239412c 100644 --- a/ortools/linear_solver/java/linear_solver.i +++ b/ortools/linear_solver/java/linear_solver.swig @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i file exposes the linear programming and integer programming +// This .swig file exposes the linear programming and integer programming // solver. See the C++/Python codelab: (there isn't // a java codelab yet, as of July 2014) // @@ -33,7 +33,7 @@ // We prefer our in-house vector wrapper to std_vector.i, because it // converts to and from native java arrays. -%import "ortools/util/java/vector.i" +%import "ortools/util/java/vector.swig" %include "ortools/util/java/proto.i" diff --git a/ortools/linear_solver/java/modelbuilder.i b/ortools/linear_solver/java/modelbuilder.swig similarity index 99% rename from ortools/linear_solver/java/modelbuilder.i rename to ortools/linear_solver/java/modelbuilder.swig index f8c526c1b0..441039dba6 100644 --- a/ortools/linear_solver/java/modelbuilder.i +++ b/ortools/linear_solver/java/modelbuilder.swig @@ -15,7 +15,7 @@ %include "ortools/base/base.i" %include "enums.swg" -%include "ortools/util/java/vector.i" +%include "ortools/util/java/vector.swig" %{ #include "ortools/linear_solver/wrappers/model_builder_helper.h" @@ -27,7 +27,7 @@ // The only difference is that the argument is not a basic type, and needs // processing to be passed to the std::function. // -// TODO(user): cleanup java/functions.i and move the code there. +// TODO(user): cleanup java/functions.swig and move the code there. %{ #include // std::make_shared diff --git a/ortools/linear_solver/python/CMakeLists.txt b/ortools/linear_solver/python/CMakeLists.txt index b7f0b586e4..9a7b45984d 100644 --- a/ortools/linear_solver/python/CMakeLists.txt +++ b/ortools/linear_solver/python/CMakeLists.txt @@ -11,15 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE linear_solver.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE linear_solver.i PROPERTY SWIG_MODULE_NAME pywraplp) -set_property(SOURCE linear_solver.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE linear_solver.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE linear_solver.swig PROPERTY SWIG_MODULE_NAME pywraplp) +set_property(SOURCE linear_solver.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) swig_add_library(pywraplp TYPE MODULE LANGUAGE python OUTPUT_DIR ${PYTHON_PROJECT_DIR}/linear_solver - SOURCES linear_solver.i) + SOURCES linear_solver.swig) target_include_directories(pywraplp PRIVATE ${Python3_INCLUDE_DIRS}) set_property(TARGET pywraplp PROPERTY SWIG_USE_TARGET_INCLUDE_DIRECTORIES ON) diff --git a/ortools/linear_solver/python/linear_solver.i b/ortools/linear_solver/python/linear_solver.swig similarity index 99% rename from ortools/linear_solver/python/linear_solver.i rename to ortools/linear_solver/python/linear_solver.swig index 2309ea3e4c..028b3dbd96 100644 --- a/ortools/linear_solver/python/linear_solver.i +++ b/ortools/linear_solver/python/linear_solver.swig @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This .i file exposes the linear programming and integer programming +// This .swig file exposes the linear programming and integer programming // solver. See the C++/Python codelab: . // // The python API is enriched by custom code defined here, making it @@ -35,7 +35,7 @@ %include "ortools/util/python/proto.i" -%import "ortools/util/python/vector.i" +%import "ortools/util/python/vector.swig" // We need to forward-declare the proto here, so that the PROTO_* macros // involving them work correctly. The order matters very much: this declaration diff --git a/ortools/linear_solver/python/linear_solver_natural_api.py b/ortools/linear_solver/python/linear_solver_natural_api.py index 45e9265289..42a56a8697 100644 --- a/ortools/linear_solver/python/linear_solver_natural_api.py +++ b/ortools/linear_solver/python/linear_solver_natural_api.py @@ -13,7 +13,7 @@ """Patch to the python wrapper of ../linear_solver.h providing an algebraic API. -This is directly imported, and use exclusively in ./linear_solver.i. See that +This is directly imported, and use exclusively in ./linear_solver.swig. See that file. For examples leveraging the code defined here, see ./pywraplp_test.py and ../../../python/linear_programming.py. diff --git a/ortools/linear_solver/python/pywraplp_test.py b/ortools/linear_solver/python/pywraplp_test.py index 1f98234375..ff35260334 100644 --- a/ortools/linear_solver/python/pywraplp_test.py +++ b/ortools/linear_solver/python/pywraplp_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Simple unit tests for python/linear_solver.i. Not exhaustive.""" +"""Simple unit tests for python/linear_solver.swig. Not exhaustive.""" import unittest from ortools.linear_solver import linear_solver_pb2 diff --git a/ortools/routing/java/CMakeLists.txt b/ortools/routing/java/CMakeLists.txt index 90aaca3e43..1d0b7e8dac 100644 --- a/ortools/routing/java/CMakeLists.txt +++ b/ortools/routing/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE routing.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE routing.i PROPERTY SWIG_MODULE_NAME Globals) -set_property(SOURCE routing.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE routing.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE routing.swig PROPERTY SWIG_MODULE_NAME Globals) +set_property(SOURCE routing.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE routing.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE routing.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.routing) swig_add_library(jnirouting TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/routing - SOURCES routing.i) + SOURCES routing.swig) target_include_directories(jnirouting PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnirouting PROPERTIES diff --git a/ortools/routing/java/index_manager.i b/ortools/routing/java/index_manager.swig similarity index 97% rename from ortools/routing/java/index_manager.i rename to ortools/routing/java/index_manager.swig index 4e1670c0c7..f598513b30 100644 --- a/ortools/routing/java/index_manager.i +++ b/ortools/routing/java/index_manager.swig @@ -14,7 +14,7 @@ // Wrapper for RoutingIndexManager. %include "ortools/base/base.i" -%include "ortools/routing/java/types.i" +%include "ortools/routing/java/types.swig" %{ #include "ortools/routing/index_manager.h" diff --git a/ortools/routing/java/routing.i b/ortools/routing/java/routing.swig similarity index 99% rename from ortools/routing/java/routing.i rename to ortools/routing/java/routing.swig index fcb6f6c2a3..abc131dd94 100644 --- a/ortools/routing/java/routing.i +++ b/ortools/routing/java/routing.swig @@ -19,12 +19,12 @@ %template(IntBoolPair) std::pair; %include "ortools/base/base.i" -%include "ortools/util/java/vector.i" +%include "ortools/util/java/vector.swig" %include "ortools/util/java/proto.i" -%import "ortools/constraint_solver/java/constraint_solver.i" -%import "ortools/util/java/sorted_interval_list.i" // Domain +%import "ortools/constraint_solver/java/constraint_solver.swig" +%import "ortools/util/java/sorted_interval_list.swig" // Domain -%include "ortools/routing/java/index_manager.i" +%include "ortools/routing/java/index_manager.swig" // We need to forward-declare the proto here, so that PROTO_INPUT involving it // works correctly. The order matters very much: this declaration needs to be diff --git a/ortools/routing/java/types.i b/ortools/routing/java/types.swig similarity index 98% rename from ortools/routing/java/types.i rename to ortools/routing/java/types.swig index 45e27fbdfa..f9f6d0e3d3 100644 --- a/ortools/routing/java/types.i +++ b/ortools/routing/java/types.swig @@ -18,7 +18,7 @@ // This file is to be %included when wrapped objects need to use these typemaps. %include "ortools/base/base.i" -%import "ortools/util/java/vector.i" +%import "ortools/util/java/vector.swig" %{ #include "ortools/routing/types.h" diff --git a/ortools/routing/python/CMakeLists.txt b/ortools/routing/python/CMakeLists.txt index 9fe346f31b..2c7e9e9dbb 100644 --- a/ortools/routing/python/CMakeLists.txt +++ b/ortools/routing/python/CMakeLists.txt @@ -33,16 +33,16 @@ target_link_libraries(routing_pybind11 PRIVATE add_library(${PROJECT_NAMESPACE}::routing_pybind11 ALIAS routing_pybind11) # legacy pywraprouting -set_property(SOURCE routing.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE routing.i PROPERTY SWIG_MODULE_NAME pywraprouting) -set_property(SOURCE routing.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE routing.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE routing.swig PROPERTY SWIG_MODULE_NAME pywraprouting) +set_property(SOURCE routing.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE routing.i PROPERTY COMPILE_OPTIONS -nofastunpack) +set_property(SOURCE routing.swig PROPERTY COMPILE_OPTIONS -nofastunpack) swig_add_library(pywraprouting TYPE MODULE LANGUAGE python OUTPUT_DIR ${PYTHON_PROJECT_DIR}/routing - SOURCES routing.i) + SOURCES routing.swig) target_include_directories(pywraprouting PRIVATE ${Python3_INCLUDE_DIRS}) set_property(TARGET pywraprouting PROPERTY SWIG_USE_TARGET_INCLUDE_DIRECTORIES ON) diff --git a/ortools/routing/python/index_manager.i b/ortools/routing/python/index_manager.swig similarity index 95% rename from ortools/routing/python/index_manager.i rename to ortools/routing/python/index_manager.swig index 02dc547d8b..27b1114f0f 100644 --- a/ortools/routing/python/index_manager.i +++ b/ortools/routing/python/index_manager.swig @@ -14,8 +14,8 @@ // Wrapper for RoutingIndexManager. %include "ortools/base/base.i" -%include "ortools/routing/python/types.i" -%import "ortools/util/python/vector.i" +%include "ortools/routing/python/types.swig" +%import "ortools/util/python/vector.swig" %{ #include "ortools/routing/index_manager.h" diff --git a/ortools/routing/python/routing.i b/ortools/routing/python/routing.swig similarity index 93% rename from ortools/routing/python/routing.i rename to ortools/routing/python/routing.swig index 4b49f36ab9..417050ed1c 100644 --- a/ortools/routing/python/routing.i +++ b/ortools/routing/python/routing.swig @@ -18,19 +18,19 @@ %include "ortools/util/python/proto.i" // PY_CONVERT_HELPER_* macros. -%include "ortools/constraint_solver/python/constraint_solver_helpers.i" +%include "ortools/constraint_solver/python/constraint_solver_helpers.swig" -%include "ortools/util/python/functions.i" -%include "ortools/util/python/pair.i" -%include "ortools/util/python/vector.i" +%include "ortools/util/python/functions.swig" +%include "ortools/util/python/pair.swig" +%include "ortools/util/python/vector.swig" // While the module name will be overridden by the one specified on the cmd line, // without this, derived classes (e.g. TypeRequirementChecker) will import base // class from the module specified in the following %import. %module pywraprouting -%import(module="ortools.constraint_solver.pywrapcp") "ortools/constraint_solver/python/constraint_solver.i" -%include "ortools/routing/python/types.i" -%include "ortools/routing/python/index_manager.i" +%import(module="ortools.constraint_solver.pywrapcp") "ortools/constraint_solver/python/constraint_solver.swig" +%include "ortools/routing/python/types.swig" +%include "ortools/routing/python/index_manager.swig" // We need to forward-declare the proto here, so that PROTO_INPUT involving it // works correctly. The order matters very much: this declaration needs to be @@ -69,7 +69,7 @@ DEFINE_INDEX_TYPE_TYPEDEF( // ============= Type conversions ============== -// See ./constraint_solver_helpers.i. +// See ./constraint_solver_helpers.swig. PY_CONVERT_HELPER_INTEXPR_AND_INTVAR(); PY_CONVERT_HELPER_PTR(IntervalVar); PY_CONVERT_HELPER_PTR(LocalSearchFilter); diff --git a/ortools/routing/python/types.i b/ortools/routing/python/types.swig similarity index 98% rename from ortools/routing/python/types.i rename to ortools/routing/python/types.swig index c7949de427..09e0d4b617 100644 --- a/ortools/routing/python/types.i +++ b/ortools/routing/python/types.swig @@ -18,7 +18,7 @@ // This file is to be %included when wrapped objects need to use these typemaps. %include "ortools/base/base.i" -%import "ortools/util/python/vector.i" +%import "ortools/util/python/vector.swig" %{ #include "ortools/routing/types.h" diff --git a/ortools/sat/java/BUILD.bazel b/ortools/sat/java/BUILD.bazel index dc1589eb1e..76bc1a8521 100644 --- a/ortools/sat/java/BUILD.bazel +++ b/ortools/sat/java/BUILD.bazel @@ -19,7 +19,7 @@ load("//bazel:swig_java.bzl", "ortools_java_wrap_cc") ortools_java_wrap_cc( name = "sat", - src = "sat.i", + src = "sat.swig", java_deps = [ "//ortools/sat:cp_model_java_proto", "//ortools/sat:sat_parameters_java_proto", diff --git a/ortools/sat/java/CMakeLists.txt b/ortools/sat/java/CMakeLists.txt index ea6aeec7bf..6f7243292b 100644 --- a/ortools/sat/java/CMakeLists.txt +++ b/ortools/sat/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE sat.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE sat.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE sat.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE sat.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE sat.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE sat.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE sat.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE sat.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.sat) swig_add_library(jnisat TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/sat - SOURCES sat.i) + SOURCES sat.swig) target_include_directories(jnisat PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jnisat PROPERTIES diff --git a/ortools/sat/java/sat.i b/ortools/sat/java/sat.swig similarity index 98% rename from ortools/sat/java/sat.i rename to ortools/sat/java/sat.swig index af1b325e2b..f2def937a9 100644 --- a/ortools/sat/java/sat.i +++ b/ortools/sat/java/sat.swig @@ -17,7 +17,7 @@ %include "ortools/util/java/proto.i" -%import "ortools/util/java/sorted_interval_list.i" +%import "ortools/util/java/sorted_interval_list.swig" %{ #include "ortools/sat/cp_model.pb.h" @@ -76,7 +76,7 @@ PROTO2_RETURN(operations_research::sat::CpSolverResponse, // The only difference is that the argument is not a basic type, and needs // processing to be passed to the std::function. // -// TODO(user): cleanup java/functions.i and move the code there. +// TODO(user): cleanup java/functions.swig and move the code there. %{ #include // std::make_shared %} diff --git a/ortools/util/functions_swig_helpers.h b/ortools/util/functions_swig_helpers.h index a3451091b6..41423970ea 100644 --- a/ortools/util/functions_swig_helpers.h +++ b/ortools/util/functions_swig_helpers.h @@ -15,7 +15,7 @@ #define OR_TOOLS_UTIL_FUNCTIONS_SWIG_HELPERS_H_ // This file contains class definitions for the wrapping of C++ std::functions -// in Java. It is #included by java/functions.i. +// in Java. It is #included by java/functions.swig. #include #include diff --git a/ortools/util/java/BUILD.bazel b/ortools/util/java/BUILD.bazel index 748abcbad6..1496f51923 100644 --- a/ortools/util/java/BUILD.bazel +++ b/ortools/util/java/BUILD.bazel @@ -33,7 +33,7 @@ config_setting( filegroup( name = "vector_swig", srcs = [ - "vector.i", + "vector.swig", ], visibility = ["//visibility:public"], ) @@ -49,7 +49,7 @@ filegroup( filegroup( name = "absl_string_view_swig", srcs = [ - "absl_string_view.i", + "absl_string_view.swig", ], visibility = ["//visibility:public"], ) @@ -57,14 +57,14 @@ filegroup( filegroup( name = "sorted_interval_list_swig", srcs = [ - "sorted_interval_list.i", + "sorted_interval_list.swig", ], visibility = ["//visibility:public"], ) ortools_java_wrap_cc( name = "sorted_interval_list", - src = "sorted_interval_list.i", + src = "sorted_interval_list.swig", package = "com.google.ortools.util", swig_includes = [ ":vector_swig", diff --git a/ortools/util/java/CMakeLists.txt b/ortools/util/java/CMakeLists.txt index 5964336095..6f99f53adf 100644 --- a/ortools/util/java/CMakeLists.txt +++ b/ortools/util/java/CMakeLists.txt @@ -11,17 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(SOURCE sorted_interval_list.i PROPERTY CPLUSPLUS ON) -set_property(SOURCE sorted_interval_list.i PROPERTY SWIG_MODULE_NAME main) -set_property(SOURCE sorted_interval_list.i PROPERTY COMPILE_DEFINITIONS +set_property(SOURCE sorted_interval_list.swig PROPERTY CPLUSPLUS ON) +set_property(SOURCE sorted_interval_list.swig PROPERTY SWIG_MODULE_NAME main) +set_property(SOURCE sorted_interval_list.swig PROPERTY COMPILE_DEFINITIONS ${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=) -set_property(SOURCE sorted_interval_list.i PROPERTY COMPILE_OPTIONS +set_property(SOURCE sorted_interval_list.swig PROPERTY COMPILE_OPTIONS -package ${JAVA_PACKAGE}.util) swig_add_library(jniutil TYPE OBJECT LANGUAGE java OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/util - SOURCES sorted_interval_list.i) + SOURCES sorted_interval_list.swig) target_include_directories(jniutil PRIVATE ${JNI_INCLUDE_DIRS}) set_target_properties(jniutil PROPERTIES diff --git a/ortools/util/java/absl_string_view.i b/ortools/util/java/absl_string_view.swig similarity index 99% rename from ortools/util/java/absl_string_view.i rename to ortools/util/java/absl_string_view.swig index 311a31d40b..6d3dc12ed0 100644 --- a/ortools/util/java/absl_string_view.i +++ b/ortools/util/java/absl_string_view.swig @@ -12,7 +12,7 @@ // limitations under the License. /* ----------------------------------------------------------------------------- - * absl_string_view.i + * absl_string_view.swig * * Typemaps for absl::string_view * This is mapped to a Java String and is passed around by value. diff --git a/ortools/util/java/functions.i b/ortools/util/java/functions.swig similarity index 100% rename from ortools/util/java/functions.i rename to ortools/util/java/functions.swig diff --git a/ortools/util/java/sorted_interval_list.i b/ortools/util/java/sorted_interval_list.swig similarity index 98% rename from ortools/util/java/sorted_interval_list.i rename to ortools/util/java/sorted_interval_list.swig index 97f38b0f10..f9c1e07a09 100644 --- a/ortools/util/java/sorted_interval_list.i +++ b/ortools/util/java/sorted_interval_list.swig @@ -13,7 +13,7 @@ // This is the java SWIG wrapper for ../sorted_interval_list.h. See that file. -%include "ortools/util/java/vector.i" +%include "ortools/util/java/vector.swig" %{ #include diff --git a/ortools/util/java/tuple_set.i b/ortools/util/java/tuple_set.swig similarity index 97% rename from ortools/util/java/tuple_set.i rename to ortools/util/java/tuple_set.swig index 95e3d72c2c..110eb497b5 100644 --- a/ortools/util/java/tuple_set.i +++ b/ortools/util/java/tuple_set.swig @@ -16,7 +16,7 @@ %include "ortools/base/base.i" -%include "ortools/util/java/vector.i" +%include "ortools/util/java/vector.swig" %{ // TODO(user): see if we can remove diff --git a/ortools/util/java/vector.i b/ortools/util/java/vector.swig similarity index 100% rename from ortools/util/java/vector.i rename to ortools/util/java/vector.swig diff --git a/ortools/util/python/functions.i b/ortools/util/python/functions.swig similarity index 100% rename from ortools/util/python/functions.i rename to ortools/util/python/functions.swig diff --git a/ortools/util/python/pair.i b/ortools/util/python/pair.swig similarity index 100% rename from ortools/util/python/pair.i rename to ortools/util/python/pair.swig diff --git a/ortools/util/python/vector.i b/ortools/util/python/vector.swig similarity index 100% rename from ortools/util/python/vector.i rename to ortools/util/python/vector.swig From b3e91bf346deda20aad15367ab2b0309cd0ff216 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 4 Jul 2025 14:21:18 +0200 Subject: [PATCH 148/509] examples: fix rcpsp_sat.py test --- examples/python/rcpsp_sat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/python/rcpsp_sat.py b/examples/python/rcpsp_sat.py index eb4fa3e1b7..2b78e3d049 100644 --- a/examples/python/rcpsp_sat.py +++ b/examples/python/rcpsp_sat.py @@ -27,9 +27,9 @@ from absl import app from absl import flags from google.protobuf import text_format +from ortools.sat.python import cp_model from ortools.scheduling import rcpsp_pb2 from ortools.scheduling.python import rcpsp -from ortools.sat.python import cp_model _INPUT = flags.DEFINE_string("input", "", "Input file to parse and solve.") _OUTPUT_PROTO = flags.DEFINE_string( From 527c178b5664795a8c9069626dd808c5b3a2ad39 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 4 Jul 2025 15:17:22 +0200 Subject: [PATCH 149/509] remove deprecated doc generation stuff * remove "doc" target from legacy makefile based build * remove doxygen templates no more used * removed pdoc3 .mako files (we now re-use pdoc) --- Makefile | 9 +- makefiles/Makefile.doc.mk | 44 - tools/doc/DoxygenLayout.xml | 220 --- tools/doc/all.footer.html.in | 8 - tools/doc/all.styleSheet.css.in | 1450 --------------- tools/doc/cpp.doxy.in | 2544 --------------------------- tools/doc/cpp.header.html.in | 33 - tools/doc/default.footer.html.in | 21 - tools/doc/default.header.html.in | 56 - tools/doc/default.styleSheet.css.in | 1730 ------------------ tools/doc/dotnet.doxy.in | 2525 -------------------------- tools/doc/dotnet.header.html.in | 34 - tools/doc/gen_javadoc.sh | 27 - tools/doc/gen_ref_doc.py | 251 --- tools/doc/java.doxy.in | 2525 -------------------------- tools/doc/java.header.html.in | 34 - tools/doc/ortools.header.html.in | 67 - tools/doc/samples_cpp.dox | 19 - tools/doc/templates/credits.mako | 0 tools/doc/templates/head.mako | 6 - tools/doc/templates/logo.mako | 5 - 21 files changed, 1 insertion(+), 11607 deletions(-) delete mode 100644 makefiles/Makefile.doc.mk delete mode 100644 tools/doc/DoxygenLayout.xml delete mode 100644 tools/doc/all.footer.html.in delete mode 100644 tools/doc/all.styleSheet.css.in delete mode 100644 tools/doc/cpp.doxy.in delete mode 100644 tools/doc/cpp.header.html.in delete mode 100644 tools/doc/default.footer.html.in delete mode 100644 tools/doc/default.header.html.in delete mode 100644 tools/doc/default.styleSheet.css.in delete mode 100644 tools/doc/dotnet.doxy.in delete mode 100644 tools/doc/dotnet.header.html.in delete mode 100755 tools/doc/gen_javadoc.sh delete mode 100755 tools/doc/gen_ref_doc.py delete mode 100644 tools/doc/java.doxy.in delete mode 100644 tools/doc/java.header.html.in delete mode 100644 tools/doc/ortools.header.html.in delete mode 100644 tools/doc/samples_cpp.dox delete mode 100644 tools/doc/templates/credits.mako delete mode 100644 tools/doc/templates/head.mako delete mode 100644 tools/doc/templates/logo.mako diff --git a/Makefile b/Makefile index a590e1d748..1021b82baf 100644 --- a/Makefile +++ b/Makefile @@ -101,13 +101,6 @@ include $(OR_ROOT)makefiles/Makefile.dotnet.mk include $(OR_ROOT)makefiles/Makefile.java.mk include $(OR_ROOT)makefiles/Makefile.python.mk include $(OR_ROOT)makefiles/Makefile.archive.mk -ifneq ($(PLATFORM),WIN64) -include $(OR_ROOT)makefiles/Makefile.doc.mk -else -# Remove some rules on windows -help_doc: - -endif .PHONY: help_usage help_usage: @@ -125,7 +118,7 @@ else endif .PHONY: help_all -help_all: help_usage help_cpp help_dotnet help_java help_python help_archive help_doc +help_all: help_usage help_cpp help_dotnet help_java help_python help_archive .PHONY: check_all check_all: check_cpp check_dotnet check_java check_python diff --git a/makefiles/Makefile.doc.mk b/makefiles/Makefile.doc.mk deleted file mode 100644 index e523245cb4..0000000000 --- a/makefiles/Makefile.doc.mk +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generate documentation -.PHONY: help_doc # Generate list of Documentation targets with descriptions. -help_doc: - @echo Use one of the following Documentation targets: - @$(GREP) "^.PHONY: .* #" $(CURDIR)/makefiles/Makefile.doc.mk | $(SED) "s/\.PHONY: \(.*\) # \(.*\)/\1\t\2/" | expand -t20 - @echo - - -# Main target -.PHONY: doc # Create doxygen and python documentation. -doc: doxy-doc python-doc java-doc - -.PHONY: doxy-doc # Create doxygen ref documentation. -doxy-doc: cpp python java dotnet - bash -c "command -v doxygen" - python3 tools/doc/gen_ref_doc.py - -.PHONY: java-doc # Create Javadoc ref documentation. -java-doc: java - bash -c "command -v mvn" - tools/doc/gen_javadoc.sh - -.PHONY: python-doc # Create python documentation. -python-doc: python - bash -c "command -v pdoc" - $(SET_PYTHONPATH) pdoc \ - --logo https://developers.google.com/optimization/images/orLogo.png \ - -o docs/python/ \ - --no-search -d google \ - --footer-text "OR-Tools ${OR_TOOLS_MAJOR}.${OR_TOOLS_MINOR}" \ - build_make/python/ortools diff --git a/tools/doc/DoxygenLayout.xml b/tools/doc/DoxygenLayout.xml deleted file mode 100644 index aa503b3de9..0000000000 --- a/tools/doc/DoxygenLayout.xml +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools/doc/all.footer.html.in b/tools/doc/all.footer.html.in deleted file mode 100644 index 5c65399883..0000000000 --- a/tools/doc/all.footer.html.in +++ /dev/null @@ -1,8 +0,0 @@ -
-
- - - diff --git a/tools/doc/all.styleSheet.css.in b/tools/doc/all.styleSheet.css.in deleted file mode 100644 index 50f8fdbe8e..0000000000 --- a/tools/doc/all.styleSheet.css.in +++ /dev/null @@ -1,1450 +0,0 @@ -/* The standard CSS for doxygen */ - -/* @group Heading Levels */ - -div.contents .textblock h1 { - text-align: left; - font-size: 20pt; - font-weight: normal; - margin-top: 1.5em; - padding: 0 0 0.4em 0; - border-bottom: 1px solid #999; - border-top-width: 0; - border-left-width: 0; - border-right-width: 0; - background-color: transparent; -} - -h1.groupheader { - font-size: 150%; -} - -.title { - font-size: 20pt; - font-weight: normal; - margin: 10px 2px; -} - -dt { - font-weight: bold; -} - -div.multicol { - -moz-column-gap: 1em; - -webkit-column-gap: 1em; - -moz-column-count: 3; - -webkit-column-count: 3; -} - -p.startli, p.startdd, p.starttd { - margin-top: 2px; -} - -p.endli { - margin-bottom: 0px; -} - -p.enddd { - margin-bottom: 4px; -} - -p.endtd { - margin-bottom: 2px; -} - -/* @end */ - -caption { - font-weight: bold; -} - -span.legend { - font-size: 70%; - text-align: center; -} - -h3.version { - font-size: 90%; - text-align: center; -} - -div.qindex { - margin-bottom: 1em; -} - -div.qindex, div.navtab{ - background-color: #eee; - border: 1px solid #999; - text-align: center; -} - -div.qindex, div.navpath { - width: 100%; - line-height: 140%; -} - -div.navtab { - margin-right: 15px; -} - -/* @group Link Styling */ - -a.qindex { - font-weight: bold; -} - -a.qindexHL { - font-weight: bold; - background-color: #9CAFD4; - color: #ffffff; - border: 1px double #869DCA; -} - -/* @end */ - -dl.el { - margin-left: -1cm; -} - -a.el { - padding: 1px; - text-decoration: none; - color: #577E25; -} - -a.el:hover { - text-decoration: underline; -} - -pre.fragment { - /*border: 1px solid #C4CFE5; - background-color: #FBFCFD; - padding: 4px 6px; - margin: 4px 8px 4px 2px; - overflow: auto; - word-wrap: break-word; - font-size: 9pt; - line-height: 125%; - font-family: monospace, fixed; - font-size: 105%;*/ -font-family: Consolas, "Liberation Mono", Courier, monospace; -font-size: 10pt; -padding: 0.5em 1em; -background-color: #f5f5f5; -border: 1px solid #bbb; -border-radius(5px); -} - -div.fragment { - /*margin: 0 0 0 5px; - padding: 0.5em 1em; - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-size: 10pt; - background-color: #eef7e3; - border-left: 3px solid #8DC841; - border-right: 0; - border-bottom: 0;*/ - -font-family: Consolas, "Liberation Mono", Courier, monospace; -font-size: 10pt; -padding: 0.5em 1em; -background-color: #f5f5f5; -border: 1px solid #bbb; -border-radius(5px); -} - -div.line { - min-height: 13px; - text-wrap: unrestricted; - white-space: -moz-pre-wrap; /* Moz */ - white-space: -pre-wrap; /* Opera 4-6 */ - white-space: -o-pre-wrap; /* Opera 7 */ - white-space: pre-wrap; /* CSS3 */ - word-wrap: break-word; /* IE 5.5+ */ - text-indent: -53px; - padding-left: 53px; - padding-bottom: 0px; - margin: 0px; - line-height: normal; -} - -span.lineno { - padding-right: 4px; - text-align: right; - background-color: #E8E8E8; - white-space: pre; -} - -div.ah { - width: 100%; - background-color: #eee; - font-weight: bold; - color: #000; - margin-bottom: 1px; - margin-top: 1px; - border: solid 1px #999; -} - -div.groupHeader { - margin-left: 16px; - margin-top: 12px; - font-weight: bold; -} - -div.groupText { - margin-left: 16px; - font-style: italic; -} - -body { - background-color: white; - color: black; - margin: 0; -} - -div.contents { - width: 950px; - margin: 0 auto; -} - -td.indexkey { - background-color: #EBEFF6; - font-weight: bold; - border: 1px solid #C4CFE5; - margin: 2px 0px 2px 0; - padding: 2px 10px; - white-space: nowrap; - vertical-align: top; -} - -td.indexvalue { - background-color: #EBEFF6; - border: 1px solid #C4CFE5; - padding: 2px 10px; - margin: 2px 0px; -} - -tr.memlist { - background-color: #EEF1F7; -} - -p.formulaDsp { - text-align: center; -} - -img.formulaDsp { - -} - -img.formulaInl { - vertical-align: middle; -} - -div.center { - text-align: center; - margin-top: 0px; - margin-bottom: 0px; - padding: 0px; -} - -div.center img { - border: 0px; -} - -address.footer { - text-align: right; - padding-right: 12px; -} - -img.footer { - border: 0px; - vertical-align: middle; -} - -/* @group Code Colorization */ - -span.keyword { - color: #008000 -} - -span.keywordtype { - color: #604020 -} - -span.keywordflow { - color: #e08000 -} - -span.comment { - color: #800000 -} - -span.preprocessor { - color: #806020 -} - -span.stringliteral { - color: #002080 -} - -span.charliteral { - color: #008080 -} - -span.vhdldigit { - color: #ff00ff -} - -span.vhdlchar { - color: #000000 -} - -span.vhdlkeyword { - color: #700070 -} - -span.vhdllogic { - color: #ff0000 -} - -blockquote { - background-color: #F7F8FB; - border-left: 2px solid #9CAFD4; - margin: 0 24px 0 4px; - padding: 0 12px 0 16px; -} - -/* @end */ - -td.tiny { - font-size: 75%; -} - -.dirtab { - padding: 4px; - border-collapse: collapse; - border: 1px solid #A3B4D7; -} - -th.dirtab { - background: #EBEFF6; - font-weight: bold; -} - -hr { - display: none; - height: 0px; - border: none; - border-top: 1px solid #4A6AAA; -} - -hr.footer { - height: 1px; -} - -/* @group Member Descriptions */ - -table.memberdecls { - border-spacing: 0px; - padding: 0px; -} - -.memberdecls td, .fieldtable tr { - -webkit-transition-property: background-color, box-shadow; - -webkit-transition-duration: 0.5s; - -moz-transition-property: background-color, box-shadow; - -moz-transition-duration: 0.5s; - -ms-transition-property: background-color, box-shadow; - -ms-transition-duration: 0.5s; - -o-transition-property: background-color, box-shadow; - -o-transition-duration: 0.5s; - transition-property: background-color, box-shadow; - transition-duration: 0.5s; -} - -.memberdecls td.glow, .fieldtable tr.glow { - background-color: cyan; - /*box-shadow: 0 0 15px cyan;*/ -} - -.mdescLeft, .mdescRight, -.memItemLeft, .memItemRight, -.memTemplItemLeft, .memTemplItemRight, .memTemplParams { - background-color: #F9FAFC; - border: none; - margin: 4px; - padding: 1px 0 0 8px; -} - -.mdescLeft, .mdescRight { - padding: 0px 8px 4px 8px; - color: #555; -} - -.memSeparator { - border-bottom: 1px solid #DEE4F0; - line-height: 1px; - margin: 0px; - padding: 0px; -} - -.memItemLeft, .memTemplItemLeft { - white-space: nowrap; -} - -.memItemRight { - width: 100%; -} - -.memTemplParams { - color: #4665A2; - white-space: nowrap; - font-size: 80%; -} - -/* @end */ - -/* @group Member Details */ - -/* Styles for detailed member documentation */ - -.memtemplate { - font-size: 80%; - color: #4665A2; - font-weight: normal; - margin-left: 9px; -} - -.memtitle { - display: none; -} - -.memnav { - background-color: #EBEFF6; - border: 1px solid #A3B4D7; - text-align: center; - margin: 2px; - margin-right: 15px; - padding: 2px; -} - -.mempage { - width: 100%; -} - -.memitem { - padding: 0; - /*margin-bottom: 10px;*/ - margin-right: 5px; - display: table !important; - width: 100%; -} - -.memname { - font-weight: bold; - margin-left: 6px; -} - -.memname td { - vertical-align: bottom; -} - -.memproto, dl.reflist dt { - border-top: 1px solid #A8B8D9; - border-left: 1px solid #A8B8D9; - border-right: 1px solid #A8B8D9; - padding: 6px 0px 6px 0px; - color: #000; - font-weight: bold; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - background-color: #eee; - border-top-right-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-topleft: 4px; - -webkit-border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - -} - -.memdoc, dl.reflist dd { - border: 1px solid #A8B8D9; - padding: 6px 10px 2px 10px; - background-color: #FBFCFD; - background-color: #FFFFFF; - border-bottom-left-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-bottomright: 4px; - -webkit-border-bottom-left-radius: 4px; - -webkit-border-bottom-right-radius: 4px; -} - -dl.reflist dt { - padding: 5px; -} - -dl.reflist dd { - margin: 0px 0px 10px 0px; - padding: 5px; -} - -.paramkey { - text-align: right; -} - -.paramtype { - white-space: nowrap; -} - -.paramname { - color: #602020; - white-space: nowrap; -} -.paramname em { - font-style: normal; -} -.paramname code { - line-height: 14px; -} - -.params, .retval, .exception, .tparams { - margin-left: 0px; - padding-left: 0px; -} - -.params .paramname, .retval .paramname { - font-weight: bold; - vertical-align: top; -} - -.params .paramtype { - font-style: italic; - vertical-align: top; -} - -.params .paramdir { - font-family: "courier new",courier,monospace; - vertical-align: top; -} - -table.mlabels { - border-spacing: 0px; -} - -td.mlabels-left { - width: 100%; - padding: 0px; -} - -td.mlabels-right { - vertical-align: bottom; - padding: 0px; - white-space: nowrap; -} - -span.mlabels { - margin-left: 8px; -} - -span.mlabel { - background-color: #728DC1; - border-top:1px solid #5373B4; - border-left:1px solid #5373B4; - border-right:1px solid #C4CFE5; - border-bottom:1px solid #C4CFE5; - text-shadow: none; - color: white; - margin-right: 4px; - padding: 2px 3px; - border-radius: 3px; - font-size: 7pt; - white-space: nowrap; - vertical-align: middle; -} - - - -/* @end */ - -/* these are for tree view when not used as main index */ - -div.directory { - margin: 10px 0px; - border-top: 1px solid #bbb; - width: 100%; -} - -.directory table { - border-collapse:collapse; -} - -.directory td { - margin: 0px; - padding: 0px; - vertical-align: top; -} - -.directory td.entry { - white-space: nowrap; - padding: 5px 5px 5px 0; -} - -.directory td.entry a { - outline:none; -} - -.directory td.entry a img { - border: none; -} - -.directory td.desc { - width: 100%; - padding-left: 6px; - padding-right: 6px; - padding-top: 3px; - /*border-left: 1px solid rgba(0,0,0,0.05);*/ -} - -.directory tr.even { - padding-left: 6px; - background-color: #F7F8FB; -} - -.directory img { - vertical-align: -30%; -} - -.directory .levels { - white-space: nowrap; - width: 100%; - text-align: right; - font-size: 9pt; -} - -.directory .levels span { - cursor: pointer; - padding-left: 2px; - padding-right: 2px; - color: #3D578C; -} - -div.dynheader { - margin-top: 8px; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -address { - font-style: normal; - color: #2A3D61; -} - -table table { - width: 90%; -} - -.memitem table table { - width: auto; -} - -table.doxtable { - border-collapse:collapse; - margin-top: 4px; - margin-bottom: 4px; -} - -table.doxtable td, table.doxtable th { - border: 1px solid #2D4068; - padding: 3px 7px 2px; -} - -table.doxtable th { - background-color: #374F7F; - color: #FFFFFF; - font-size: 110%; - padding-bottom: 4px; - padding-top: 5px; -} - -table.fieldtable { - width: 100%; - margin-bottom: 10px; - border: 1px solid #A8B8D9; - border-spacing: 0px; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - border-radius: 4px; -} - -.fieldtable td, .fieldtable th { - padding: 3px 7px 2px; -} - -.fieldtable td.fieldtype, .fieldtable td.fieldname { - white-space: nowrap; - border-right: 1px solid #A8B8D9; - border-bottom: 1px solid #A8B8D9; - vertical-align: top; -} - -.fieldtable td.fielddoc { - border-bottom: 1px solid #A8B8D9; - width: 100%; -} - -.fieldtable tr:last-child td { - border-bottom: none; -} - -.fieldtable th { - background-color: #E2E8F2; - font-size: 90%; - color: #253555; - padding-bottom: 4px; - padding-top: 5px; - text-align:left; - -moz-border-radius-topleft: 4px; - -moz-border-radius-topright: 4px; - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom: 1px solid #A8B8D9; -} - - -.tabsearch { - top: 0px; - left: 10px; - height: 36px; - z-index: 101; - overflow: hidden; - font-size: 13px; -} - -.navpath { - display: none; -} - -.navpath ul { - font-size: 11px; - height:30px; - line-height:30px; - color:#8AA0CC; - border:solid 1px #C2CDE4; - overflow:hidden; - margin:0px; - padding:0px; -} - -.navpath li { - list-style-type:none; - float:left; - padding-left:10px; - padding-right:15px; - color:#364D7C; -} - -.navpath li.navelem a { - height:32px; - display:block; - text-decoration: none; - outline: none; - color: #283A5D; - font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - text-decoration: none; -} - -.navpath li.navelem a:hover { - color:#6884BD; -} - -.navpath li.footer { - list-style-type:none; - float:right; - padding-left:10px; - padding-right:15px; - background-image:none; - background-repeat:no-repeat; - background-position:right; - color:#364D7C; - font-size: 8pt; -} - - -div.summary { - font-size: 8pt; - padding-right: 5px; -} - -div.summary a { - white-space: nowrap; - padding: 1px; - text-decoration: none; - color: #577E25; -} - -div.summary a:hover { - text-decoration: underline; -} - -div.ingroups { - font-size: 8pt; - width: 50%; - text-align: left; -} - -div.ingroups a { - white-space: nowrap; -} - -div.header { - width: 950px; - margin: 2em auto; - border-bottom: 1px solid #999; -} - -dl { - padding: 0 0 0 10px; -} - -/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ -dl.section { - margin-left: 0px; - padding-left: 0px; -} - -dl.note { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #D0C000; -} - -dl.warning, dl.attention { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #FF0000; -} - -dl.pre, dl.post, dl.invariant { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00D000; -} - -dl.deprecated { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #505050; -} - -dl.todo { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00C0E0; -} - -dl.test { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #3030E0; -} - -dl.bug { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #C08050; -} - -dl.section dd { - margin-bottom: 6px; -} - - -#projectlogo { - text-align: center; - vertical-align: bottom; - border-collapse: separate; -} - -#projectlogo img { - border: 0px none; -} - -#projectname { - font: 300% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 2px 0px; -} - -#projectbrief { - font: 120% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#projectnumber { - font: 50% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#titlearea { - padding: 0px; - margin: 0px; - width: 100%; - border-bottom: 1px solid #5373B4; -} - -.image { - text-align: center; -} - -.dotgraph { - text-align: center; -} - -.mscgraph { - text-align: center; -} - -.caption { - font-weight: bold; -} - -div.zoom { - border: 1px solid #90A5CE; -} - -dl.citelist { - margin-bottom:50px; -} - -dl.citelist dt { - color:#334975; - float:left; - font-weight:bold; - margin-right:10px; - padding:5px; -} - -dl.citelist dd { - margin:2px 0; - padding:5px 0; -} - -div.toc { - padding: 14px 25px; - background-color: #F4F6FA; - border: 1px solid #D8DFEE; - border-radius: 7px 7px 7px 7px; - float: right; - height: auto; - margin: 0 20px 10px 10px; - width: 200px; -} - -div.toc li { - font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; - margin-top: 5px; - padding-left: 10px; - padding-top: 2px; -} - -div.toc h3 { - font: bold 12px/1.2 Arial,FreeSans,sans-serif; - color: #4665A2; - border-bottom: 0 none; - margin: 0; -} - -div.toc ul { - list-style: none outside none; - border: medium none; - padding: 0px; -} - -div.toc li.level1 { - margin-left: 0px; -} - -div.toc li.level2 { - margin-left: 15px; -} - -div.toc li.level3 { - margin-left: 30px; -} - -div.toc li.level4 { - margin-left: 45px; -} - -.inherit_header { - font-weight: bold; - color: gray; - cursor: pointer; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.inherit_header td { - padding: 6px 0px 2px 5px; -} - -.inherit { - display: none; -} - -tr.heading h2 { - margin-top: 12px; - margin-bottom: 4px; -} - -@media print { - #top { display: none; } - #side-nav { display: none; } - #nav-path { display: none; } - body { overflow:visible; } - h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } - .summary { display: none; } - .memitem { page-break-inside: avoid; } - - #doc-content { - margin-left:0 !important; - height:auto !important; - width:auto !important; - overflow:inherit; - display:inline; - } -} - -/* tabs.css */ -.tabs, .tabs2, .tabs3 { - width: 100%; - z-index: 101; - font-size: 11pt; - background-color: #EAF5DB; - border-left: 1px solid #999; - border-right: 1px solid #999; - border-bottom: 1px solid #999; - padding: 0; - margin: 0; -} - -.tabs2 { - font-size: 10pt; -} -.tabs3 { - font-size: 9pt; -} - -#navrow1 .tablist, #navrow2 .tablist, #navrow3 .tablist, #navrow4 .tablist { - margin: 0; - padding: 0; - display: table; -} - -.tablist li { - float: left; - display: table-cell; - list-style: none; -} - -#navrow1 { - border-top: 1px solid #999; - margin-top: 2em; -} - -#navrow1 .tablist a, #navrow2 .tablist a, #navrow3 .tablist a, #navrow4 .tablist a { - display: block; - margin: 8px 0; - padding: 0 8px; - border-right: 1px solid #bbb; -} - -.tablist li { - margin-bottom: 0 !important; -} - -.tablist li.current a { - font-weight: bold; -} - - - - - -/* SFML css */ -body { - font-family: 'Ubuntu', 'Arial', sans-serif; - line-height: 140%; - margin: 0 0 2em 0; - padding: 0; -} - -#banner-container { - width: 100%; - margin-top: 25px; - border-top: 2px solid #999; - border-bottom: 2px solid #999; - background-color: rgb(140, 200, 65); -} - -#banner { - width: 950px; - height: 60px; - line-height: 54px; - margin: 0 auto; - text-align: center; -} - -#banner #sfml { - display: inline; - vertical-align: top; - margin-left: 15px; - color: #fff; - font-size: 50pt; - text-shadow: rgba(0, 0, 0, 0.5) 1px 1px 5px; -} - -#footer-container { - clear: both; - width: 100%; - margin-top: 50px; - border-top: 1px solid #999; -} - -#footer { - width: 950px; - margin: 10px auto; - text-align: center; - font-size: 10pt; - color: #555; -} - -#footer a { - padding: 1px; - text-decoration: none; - color: rgb(70, 100, 30); -} - -#footer a:hover { - text-decoration: underline; -} - -div.contents, #content { - width: 950px; - margin: 0 auto; - padding: 0; -} - -div.contents h1 { - color: #333; - padding: 0.5em 0; - margin-top: 30px; - margin-bottom: 0; - text-align: center; - font-size: 26pt; - font-weight: normal; -} - -div.contents h2 { - font-size: 20pt; - font-weight: normal; - margin-top: 1.5em; - padding-bottom: 0.4em; - border-bottom: 1px solid #999; -} - -div.contents h3 { - font-size: 16pt; - font-weight: normal; -} - -div.contents p { - color: #333; - text-align: justify; -} - -div.contents a, #content a { - padding: 1px; - text-decoration: none; - color: rgb(70, 100, 30); -} - -div.contents a:hover, #content a:hover { - text-decoration: underline; -} - -div.contents code { - font-size: 11pt; - font-family: Consolas, "Liberation Mono", Courier, monospace; -} - -div.contents pre code { - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-size: 10pt; - padding: 0.5em 1em; - background-color: #f5f5f5; - border: 1px solid #bbb; -} - -div.contents ul { - list-style-type: square; - list-style-position: outside; - margin: 0 0 0 1.5em; - padding: 0; -} - -div.contents ul li { - color: #333; - margin: 0 0 0.3em 0; -} - - -.icon { - font-family: Arial, Helvetica; - font-weight: bold; - font-size: 12px; - height: 14px; - width: 16px; - display: inline-block; - background-color: #8cc445; - color: white; - text-align: center; - border-radius: 4px; - margin-left: 2px; - margin-right: 2px; - line-height: normal; -} - -.icona { - width: 24px; - height: 22px; - display: inline-block; -} - -.iconfopen { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2folderopen.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -.iconfclosed { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2folderclosed.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -.icondoc { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2doc.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -/* tooltip related style info */ - -.ttc { - position: absolute; - display: none; -} - -#powerTip { - cursor: default; - white-space: nowrap; - background-color: white; - border: 1px solid gray; - border-radius: 4px 4px 4px 4px; - box-shadow: 1px 1px 7px gray; - display: none; - font-size: smaller; - max-width: 80%; - opacity: 0.9; - padding: 1ex 1em 1em; - position: absolute; - z-index: 2147483647; -} - -#powerTip div.ttdoc { - color: grey; - font-style: italic; -} - -#powerTip div.ttname a { - font-weight: bold; -} - -#powerTip div.ttname { - font-weight: bold; -} - -#powerTip div.ttdeci { - color: #006318; -} - -#powerTip div { - margin: 0px; - padding: 0px; - font: 12px/16px Roboto,sans-serif; -} - -#powerTip:before, #powerTip:after { - content: ""; - position: absolute; - margin: 0px; -} - -#powerTip.n:after, #powerTip.n:before, -#powerTip.s:after, #powerTip.s:before, -#powerTip.w:after, #powerTip.w:before, -#powerTip.e:after, #powerTip.e:before, -#powerTip.ne:after, #powerTip.ne:before, -#powerTip.se:after, #powerTip.se:before, -#powerTip.nw:after, #powerTip.nw:before, -#powerTip.sw:after, #powerTip.sw:before { - border: solid transparent; - content: " "; - height: 0; - width: 0; - position: absolute; -} - -#powerTip.n:after, #powerTip.s:after, -#powerTip.w:after, #powerTip.e:after, -#powerTip.nw:after, #powerTip.ne:after, -#powerTip.sw:after, #powerTip.se:after { - border-color: rgba(255, 255, 255, 0); -} - -#powerTip.n:before, #powerTip.s:before, -#powerTip.w:before, #powerTip.e:before, -#powerTip.nw:before, #powerTip.ne:before, -#powerTip.sw:before, #powerTip.se:before { - border-color: rgba(128, 128, 128, 0); -} - -#powerTip.n:after, #powerTip.n:before, -#powerTip.ne:after, #powerTip.ne:before, -#powerTip.nw:after, #powerTip.nw:before { - top: 100%; -} - -#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { - border-top-color: #ffffff; - border-width: 10px; - margin: 0px -10px; -} -#powerTip.n:before { - border-top-color: #808080; - border-width: 11px; - margin: 0px -11px; -} -#powerTip.n:after, #powerTip.n:before { - left: 50%; -} - -#powerTip.nw:after, #powerTip.nw:before { - right: 14px; -} - -#powerTip.ne:after, #powerTip.ne:before { - left: 14px; -} - -#powerTip.s:after, #powerTip.s:before, -#powerTip.se:after, #powerTip.se:before, -#powerTip.sw:after, #powerTip.sw:before { - bottom: 100%; -} - -#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { - border-bottom-color: #ffffff; - border-width: 10px; - margin: 0px -10px; -} - -#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { - border-bottom-color: #808080; - border-width: 11px; - margin: 0px -11px; -} - -#powerTip.s:after, #powerTip.s:before { - left: 50%; -} - -#powerTip.sw:after, #powerTip.sw:before { - right: 14px; -} - -#powerTip.se:after, #powerTip.se:before { - left: 14px; -} - -#powerTip.e:after, #powerTip.e:before { - left: 100%; -} -#powerTip.e:after { - border-left-color: #ffffff; - border-width: 10px; - top: 50%; - margin-top: -10px; -} -#powerTip.e:before { - border-left-color: #808080; - border-width: 11px; - top: 50%; - margin-top: -11px; -} - -#powerTip.w:after, #powerTip.w:before { - right: 100%; -} -#powerTip.w:after { - border-right-color: #ffffff; - border-width: 10px; - top: 50%; - margin-top: -10px; -} -#powerTip.w:before { - border-right-color: #808080; - border-width: 11px; - top: 50%; - margin-top: -11px; -} -.arrow { - cursor: pointer; -} diff --git a/tools/doc/cpp.doxy.in b/tools/doc/cpp.doxy.in deleted file mode 100644 index b8bf9599c0..0000000000 --- a/tools/doc/cpp.doxy.in +++ /dev/null @@ -1,2544 +0,0 @@ -# Doxyfile 1.8.18 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the configuration -# file that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# https://www.gnu.org/software/libiconv/ for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = tools/doc/orLogo.png - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = docs - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all generated output in the proper direction. -# Possible values are: None, LTR, RTL and Context. -# The default value is: None. - -OUTPUT_TEXT_DIRECTION = None - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = YES - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = YES - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = NO - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = YES - -# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line -# such as -# /*************** -# as being the beginning of a Javadoc-style comment "banner". If set to NO, the -# Javadoc-style will behave just like regular comments and it will not be -# interpreted by doxygen. -# The default value is: NO. - -JAVADOC_BANNER = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines (in the resulting output). You can put ^^ in the value part of an -# alias to insert a newline as if a physical newline was in the original file. -# When you need a literal { or } or , in the value part of an alias you have to -# escape them by means of a backslash (\), this can lead to conflicts with the -# commands \{ and \} for these it is advised to use the version @{ and @} or use -# a double escape (\\{ and \\}) - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice -# sources only. Doxygen will then generate output that is more tailored for that -# language. For instance, namespaces will be presented as modules, types will be -# separated into more groups, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_SLICE = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, -# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: -# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser -# tries to guess whether the code is fixed or free formatted code, this is the -# default for Fortran type files). For instance to make doxygen treat .inc files -# as Fortran files (default is PHP), and .f files as C (default is Fortran), -# use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See https://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 5. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 5 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual -# methods of a class will be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIV_VIRTUAL = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# declarations. If set to NO, these declarations will be included in the -# documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) ands Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = YES - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= YES - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = YES - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = tools/doc/DoxygenLayout.xml - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. If -# EXTRACT_ALL is set to YES then this flag will automatically be disabled. -# The default value is: NO. - -WARN_NO_PARAMDOC = YES - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = ortools tools/doc - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. - -FILE_PATTERNS = *.h *.cc *cpp.dox -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = \ - SWIGTYPE* swig* Swig* \ - ortools/algorithms/samples \ - ortools/algorithms/csharp \ - ortools/algorithms/java \ - ortools/algorithms/python \ - ortools/constraint_solver/samples \ - ortools/constraint_solver/csharp \ - ortools/constraint_solver/java \ - ortools/constraint_solver/python \ - ortools/graph/samples \ - ortools/graph/csharp \ - ortools/graph/java \ - ortools/graph/python \ - ortools/linear_solver/samples \ - ortools/linear_solver/csharp \ - ortools/linear_solver/java \ - ortools/linear_solver/python \ - ortools/sat/samples \ - ortools/sat/csharp \ - ortools/sat/java \ - ortools/sat/python \ - ortools/util/csharp \ - ortools/util/java \ - ortools/util/python - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = */SWIGTYPE* */*swig* */*Swig* */mainJNI* - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = */mainJNI* - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = \ - examples/cpp \ - ortools/algorithms/samples \ - ortools/constraint_solver/samples \ - ortools/graph/samples \ - ortools/linear_solver/samples \ - ortools/sat/samples - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = *.cc *.h - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = YES - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = "python3 tools/doc/doxygen_filter.py" - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# entity all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see https://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT -#HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = tools/doc/header.tmp.html - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = tools/doc/footer.tmp.html - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = tools/doc/styleSheet.tmp.css - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# https://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML -# documentation will contain a main index with vertical navigation menus that -# are dynamically created via JavaScript. If disabled, the navigation index will -# consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have JavaScript, -# like the Qt help browser. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_MENUS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy -# genXcode/_index.html for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "OR-Tools Documentation" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = com.Google.OrTools - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = com.Google.OrTools - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Google.OR-Tools - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the main .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = com.Google.OrTools - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg -# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see -# https://inkscape.org) to generate formulas as SVG images instead of PNGs for -# the HTML output. These images will generally look nicer at scaled resolutions. -# Possible values are: png The default and svg Looks nicer but requires the -# pdf2svg tool. -# The default value is: png. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FORMULA_FORMAT = png - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands -# to create new LaTeX commands to be used in formulas as building blocks. See -# the section "Including formulas" for details. - -FORMULA_MACROFILE = - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side JavaScript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /