diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index df90828f19..e98873ec08 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -1793,12 +1793,13 @@ cc_library( ":sat_base", ":sat_solver", "//ortools/base", + "//ortools/base:strong_vector", "//ortools/util:bitset", "//ortools/util:sort", "//ortools/util:strong_integers", "@com_google_absl//absl/base:core_headers", - "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/meta:type_traits", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", ], @@ -1892,6 +1893,7 @@ cc_test( ":sat_solver", "//ortools/base:gmock_main", "//ortools/util:sorted_interval_list", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/types:span", ], ) @@ -2290,6 +2292,7 @@ cc_test( ":integer_base", ":integer_search", ":intervals", + ":linear_constraint", ":model", ":precedences", ":sat_base", @@ -2722,7 +2725,6 @@ cc_library( ":cuts", ":integer", ":integer_base", - ":intervals", ":linear_constraint", ":linear_constraint_manager", ":model", @@ -2738,6 +2740,7 @@ cc_library( "@com_google_absl//absl/base:core_headers", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", diff --git a/ortools/sat/cp_model.proto b/ortools/sat/cp_model.proto index 625471ad29..2f798b5eb1 100644 --- a/ortools/sat/cp_model.proto +++ b/ortools/sat/cp_model.proto @@ -221,7 +221,7 @@ message CircuitConstraintProto { // - Self-arcs are allowed except for node 0. // - There is no cycle in this graph, except through node 0. // -// Note: Currently this constraint expect all the nodes in [0, num_nodes) to +// Note: Currently this constraint expects all the nodes in [0, num_nodes) to // have at least one incident arc. The model will be considered invalid if it // is not the case. You can add self-arc fixed to one to ignore some nodes if // needed. @@ -246,6 +246,26 @@ message RoutesConstraintProto { // arc_literal => (current_capacity_tail + demand <= current_capacity_head) repeated int32 demands = 4; int64 capacity = 5; + + // A set of variables associated with the nodes. + message NodeVariables { + // The i-th element is the index of the variable associated with the i-th + // node, or -1 if there is no variable associated with this node (negative + // variable indices, referring to the negation of a variable, are not + // supported). + repeated int32 vars = 1; + } + + // Variables associated with the nodes of the graph, such as the load of the + // vehicle arriving at a node, or the time which a vehicle arrives at a node. + // Variables with the same "dimension" (such as "load" or "time") must be + // listed together. + // This field is optional. If it is set, the linear constraints of size 1 or 2 + // between these variables will be used to derive cuts for this constraint. If + // it is not set, the solve will try to automatically derive it, from the + // linear constraints of size 1 or 2 in the model (this can fail in complex + // cases). + repeated NodeVariables dimensions = 6; } // The values of the n-tuple formed by the given expression can only be one of diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index a7c8475570..f245e8b177 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -625,7 +625,8 @@ std::string ValidateGraphInput(bool is_route, const GraphProto& graph) { return ""; } -std::string ValidateRoutesConstraint(const ConstraintProto& ct) { +std::string ValidateRoutesConstraint(const CpModelProto& model, + const ConstraintProto& ct) { int max_node = 0; absl::flat_hash_set nodes; for (const int node : ct.routes().tails()) { @@ -655,6 +656,23 @@ std::string ValidateRoutesConstraint(const ConstraintProto& ct) { nodes.size()); } + for (const RoutesConstraintProto::NodeVariables& dimension : + ct.routes().dimensions()) { + if (dimension.vars().size() != nodes.size()) { + return absl::StrCat( + "If the dimensions field is set, its elements must be of size " + "num_nodes:", + nodes.size()); + } + for (const int var : dimension.vars()) { + if (var != -1 && (var < 0 || var >= model.variables_size())) { + return absl::StrCat( + "All the node variables in a route constraint must refer to a " + "valid variable, or be equal to -1"); + } + } + } + return ValidateGraphInput(/*is_route=*/true, ct.routes()); } @@ -1130,7 +1148,7 @@ std::string ValidateCpModel(const CpModelProto& model, bool after_presolve) { ValidateGraphInput(/*is_route=*/false, ct.circuit())); break; case ConstraintProto::ConstraintCase::kRoutes: - RETURN_IF_NOT_EMPTY(ValidateRoutesConstraint(ct)); + RETURN_IF_NOT_EMPTY(ValidateRoutesConstraint(model, ct)); break; case ConstraintProto::ConstraintCase::kInterval: RETURN_IF_NOT_EMPTY(ValidateIntervalConstraint(model, ct)); diff --git a/ortools/sat/cp_model_checker_test.cc b/ortools/sat/cp_model_checker_test.cc index cca6d9b0a5..d48e579c17 100644 --- a/ortools/sat/cp_model_checker_test.cc +++ b/ortools/sat/cp_model_checker_test.cc @@ -665,6 +665,60 @@ TEST(ValidateCpModelTest, IntervalMustAppearBeforeTheyAreUsed) { HasSubstr("must appear before")); } +TEST(ValidateCpModelTest, ValidNodeVariables) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { vars: [ 2, 3 ] } + dimensions { vars: [ -1, -1 ] } + } + } + )pb"); + EXPECT_TRUE(ValidateCpModel(model).empty()); +} + +TEST(ValidateCpModelTest, InvalidNodeVariablesCount) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { vars: [ 2, 3, 2 ] } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), HasSubstr("must be of size num_nodes:2")); +} + +TEST(ValidateCpModelTest, InvalidNodeVariableInRoutesConstraint) { + const CpModelProto model = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + constraints { + routes { + tails: [ 0, 1 ] + heads: [ 1, 0 ] + literals: [ 0, 1 ] + dimensions { vars: [ 2, 3 ] } + } + } + )pb"); + EXPECT_THAT(ValidateCpModel(model), + HasSubstr("must refer to a valid variable")); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/cp_model_copy.cc b/ortools/sat/cp_model_copy.cc index 72eea80387..087b9aae9f 100644 --- a/ortools/sat/cp_model_copy.cc +++ b/ortools/sat/cp_model_copy.cc @@ -858,7 +858,7 @@ bool ModelCopy::CopyAndMapCumulative(const ConstraintProto& ct) { const int new_index = interval_mapping_[ct.cumulative().intervals(i)]; if (new_index != -1) { new_ct->add_intervals(new_index); - *new_ct->add_demands() = ct.cumulative().demands(i); + CopyLinearExpression(ct.cumulative().demands(i), new_ct->add_demands()); } } diff --git a/ortools/sat/cp_model_loader.cc b/ortools/sat/cp_model_loader.cc index f9dedc2835..2ee4ce27bf 100644 --- a/ortools/sat/cp_model_loader.cc +++ b/ortools/sat/cp_model_loader.cc @@ -1280,10 +1280,9 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { max_sum += std::max(term_a, term_b); } - // Load conditional precedences. + // Load conditional precedences and always true binary relations. const SatParameters& params = *m->GetOrCreate(); - if (params.auto_detect_greater_than_at_least_one_of() && - ct.enforcement_literal().size() == 1 && vars.size() <= 2) { + if (ct.enforcement_literal().size() <= 1 && vars.size() <= 2) { // To avoid overflow in the code below, we tighten the bounds. int64_t rhs_min = ct.linear().domain(0); int64_t rhs_max = ct.linear().domain(ct.linear().domain().size() - 1); @@ -1291,13 +1290,19 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) { rhs_max = std::min(rhs_max, max_sum.value()); auto* repository = m->GetOrCreate(); - const Literal lit = mapping->Literal(ct.enforcement_literal(0)); - const Domain domain = ReadDomainFromProto(ct.linear()); - if (vars.size() == 1) { - repository->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); - } else if (vars.size() == 2) { - repository->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, rhs_min, - rhs_max); + if (ct.enforcement_literal().empty()) { + if (vars.size() == 2) { + repository->Add(Literal(kNoLiteralIndex), {vars[0], coeffs[0]}, + {vars[1], coeffs[1]}, rhs_min, rhs_max); + } + } else { + const Literal lit = mapping->Literal(ct.enforcement_literal(0)); + if (vars.size() == 1) { + repository->Add(lit, {vars[0], coeffs[0]}, {}, rhs_min, rhs_max); + } else if (vars.size() == 2) { + repository->Add(lit, {vars[0], coeffs[0]}, {vars[1], coeffs[1]}, + rhs_min, rhs_max); + } } } diff --git a/ortools/sat/cp_model_utils.cc b/ortools/sat/cp_model_utils.cc index 5f5d355207..9dce54ab2b 100644 --- a/ortools/sat/cp_model_utils.cc +++ b/ortools/sat/cp_model_utils.cc @@ -162,6 +162,7 @@ void GetReferencesUsedByConstraint(const ConstraintProto& ct, break; case ConstraintProto::ConstraintCase::kRoutes: AddIndices(ct.routes().literals(), literals); + // The node variables are not used by the constraint itself. break; case ConstraintProto::ConstraintCase::kInverse: AddIndices(ct.inverse().f_direct(), variables); diff --git a/ortools/sat/cumulative_energy.cc b/ortools/sat/cumulative_energy.cc index 6f43e3eae2..36e5d617c0 100644 --- a/ortools/sat/cumulative_energy.cc +++ b/ortools/sat/cumulative_energy.cc @@ -225,15 +225,14 @@ bool CumulativeEnergyConstraint::Propagate() { const int task_with_new_energy_max = start_event_task_time_[event_with_new_energy_max].task_index; - if (helper_->IsPresent(task_with_new_energy_max)) { - helper_->AddPresenceReason(task_with_new_energy_max); - helper_->AddStartMinReason(task_with_new_energy_max, window_start); - helper_->AddEndMaxReason(task_with_new_energy_max, window_end); - if (!demands_->DecreaseEnergyMax(task_with_new_energy_max, - new_energy_max)) { - return false; - } + helper_->AddStartMinReason(task_with_new_energy_max, window_start); + helper_->AddEndMaxReason(task_with_new_energy_max, window_end); + if (!demands_->DecreaseEnergyMax(task_with_new_energy_max, + new_energy_max)) { + return false; + } + if (helper_->IsPresent(task_with_new_energy_max)) { theta_tree_.AddOrUpdateEvent( task_to_start_event_[task_with_new_energy_max], start_event_task_time_[event_with_new_energy_max].time * diff --git a/ortools/sat/cumulative_energy_test.cc b/ortools/sat/cumulative_energy_test.cc index 990f734154..a8b56e9905 100644 --- a/ortools/sat/cumulative_energy_test.cc +++ b/ortools/sat/cumulative_energy_test.cc @@ -37,6 +37,7 @@ #include "ortools/sat/integer_base.h" #include "ortools/sat/integer_search.h" #include "ortools/sat/intervals.h" +#include "ortools/sat/linear_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/precedences.h" #include "ortools/sat/sat_base.h" @@ -81,24 +82,31 @@ std::string InstanceDebugString(const EnergyInstance& instance) { bool SolveUsingConstraint(const EnergyInstance& instance) { Model model; std::vector intervals; - std::vector energy_min; - std::vector energy_max; + std::vector> decomposed_energies; for (const auto& task : instance.tasks) { - const IntegerVariable start_var = - model.Add(NewIntegerVariable(task.start_min, task.end_max)); - const IntegerVariable end_var = - model.Add(NewIntegerVariable(task.start_min, task.end_max)); - const IntegerVariable size_var = - model.Add(NewIntegerVariable(task.duration_min, task.duration_max)); if (task.is_optional) { const Literal is_present = Literal(model.Add(NewBooleanVariable()), true); - intervals.push_back(model.Add( - NewOptionalInterval(start_var, end_var, size_var, is_present))); + const IntegerVariable start = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + const IntegerVariable end = + model.Add(NewIntegerVariable(task.start_min, task.end_max)); + const IntegerVariable duration = + model.Add(NewIntegerVariable(task.duration_min, task.duration_max)); + intervals.push_back( + model.Add(NewOptionalInterval(start, end, duration, is_present))); } else { - intervals.push_back(model.Add(NewInterval(start_var, end_var, size_var))); + intervals.push_back(model.Add(NewIntervalWithVariableSize( + task.start_min, task.end_max, task.duration_min, task.duration_max))); } - energy_min.push_back(task.energy_min); - energy_max.push_back(task.energy_max); + std::vector energy_literals; + std::vector energy_literals_values_values; + for (int e = task.energy_min; e <= task.energy_max; ++e) { + const Literal lit = Literal(model.Add(NewBooleanVariable()), true); + energy_literals.push_back(lit); + energy_literals_values_values.push_back({lit, e, 1}); + } + model.Add(ExactlyOneConstraint(energy_literals)); + decomposed_energies.push_back(energy_literals_values_values); } const AffineExpression capacity( @@ -108,7 +116,7 @@ bool SolveUsingConstraint(const EnergyInstance& instance) { SchedulingConstraintHelper* helper = repo->GetOrCreateHelper(intervals); SchedulingDemandHelper* demands_helper = new SchedulingDemandHelper({}, helper, &model); - demands_helper->OverrideEnergyBounds(energy_min, energy_max); + demands_helper->OverrideDecomposedEnergies(decomposed_energies); model.TakeOwnership(demands_helper); AddCumulativeOverloadChecker(capacity, helper, demands_helper, &model); diff --git a/ortools/sat/diffn.cc b/ortools/sat/diffn.cc index a541c30e28..236752e166 100644 --- a/ortools/sat/diffn.cc +++ b/ortools/sat/diffn.cc @@ -273,11 +273,11 @@ bool NonOverlappingRectanglesEnergyPropagator::Propagate() { return true; } - if (std::max(bounding_box.SizeX(), bounding_box.SizeY()) * - active_box_ranges.size() > - std::numeric_limits::max()) { + if (AtMinOrMaxInt64I( + CapProdI(CapProdI(bounding_box.SizeX(), bounding_box.SizeY()), + active_box_ranges.size()))) { // Avoid integer overflows if the area of the boxes get comparable with - // INT64_MAX + // INT64_MAX. return true; } diff --git a/ortools/sat/integer.h b/ortools/sat/integer.h index b0cf07abf9..51be1efe29 100644 --- a/ortools/sat/integer.h +++ b/ortools/sat/integer.h @@ -200,7 +200,7 @@ class IntegerEncoder { IntegerValue value) const; // Advanced usage. It is more efficient to create the associated literals in - // order, but it might be annoying to do so. Instead, you can first call + // order, but it might be anoying to do so. Instead, you can first call // DisableImplicationBetweenLiteral() and when you are done creating all the // associated literals, you can call (only at level zero) // AddAllImplicationsBetweenAssociatedLiterals() which will also turn back on @@ -318,7 +318,7 @@ class IntegerEncoder { // corresponding to the same variable). // // Note that we only keep this for positive variable. - // The one for the negation can be inferred by it. + // The one for the negation can be infered by it. // // Like x >= 1 x >= 4 x >= 5 // Correspond to x <= 0 x <= 3 x <= 4 diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 5c9c7a4afb..33521bebc6 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -858,7 +858,7 @@ bool LinearProgrammingConstraint::IncrementalPropagate( } } - if (!lp_solution_is_set_ || force_lp_call_on_next_propagate_) { + if (!lp_solution_is_set_) { return Propagate(); } @@ -2120,11 +2120,8 @@ void LinearProgrammingConstraint::UpdateSimplexIterationLimit( } bool LinearProgrammingConstraint::Propagate() { - force_lp_call_on_next_propagate_ = false; if (!enabled_) return true; if (time_limit_->LimitReached()) return true; - const int64_t timestamp_at_function_start = integer_trail_->num_enqueues(); - UpdateBoundsOfLpVariables(); // TODO(user): It seems the time we loose by not stopping early might be worth @@ -2163,15 +2160,6 @@ bool LinearProgrammingConstraint::Propagate() { int cuts_round = 0; while (simplex_.GetProblemStatus() == glop::ProblemStatus::OPTIMAL && cuts_round < max_cuts_rounds) { - // We are about to spend some effort finding cuts or changing the LP. - // If one of the LP solve done by this function propagated something, it - // seems better to reach the propagation fix point first before doing that. - if (integer_trail_->num_enqueues() > timestamp_at_function_start) { - force_lp_call_on_next_propagate_ = true; - watcher_->CallOnNextPropagate(watcher_id_); - return true; - } - // We wait for the first batch of problem constraints to be added before we // begin to generate cuts. Note that we rely on num_solves_ since on some // problems there is no other constraints than the cuts. diff --git a/ortools/sat/linear_programming_constraint.h b/ortools/sat/linear_programming_constraint.h index b64565b0e6..9fb24c3528 100644 --- a/ortools/sat/linear_programming_constraint.h +++ b/ortools/sat/linear_programming_constraint.h @@ -591,7 +591,6 @@ class LinearProgrammingConstraint : public PropagatorInterface, // True if the last time we solved the exact same LP at level zero, no cuts // and no lazy constraints where added. bool lp_at_level_zero_is_final_ = false; - bool force_lp_call_on_next_propagate_ = false; // Same as lp_solution_ but this vector is indexed by IntegerVariable. ModelLpVariableMapping& mirror_lp_variable_; diff --git a/ortools/sat/precedences.cc b/ortools/sat/precedences.cc index 20977b7f8c..7cccf3bb4c 100644 --- a/ortools/sat/precedences.cc +++ b/ortools/sat/precedences.cc @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -1107,6 +1106,17 @@ bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) { void BinaryRelationRepository::Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, IntegerValue rhs) { + if (lit.Index() != kNoLiteralIndex) { + num_enforced_relations_++; + DCHECK(a.coeff == 0 || a.var != kNoIntegerVariable); + DCHECK(b.coeff == 0 || b.var != kNoIntegerVariable); + } else { + DCHECK_NE(a.coeff, 0); + DCHECK_NE(b.coeff, 0); + DCHECK_NE(a.var, kNoIntegerVariable); + DCHECK_NE(b.var, kNoIntegerVariable); + } + Relation r; r.enforcement = lit; r.a = a; @@ -1130,13 +1140,90 @@ void BinaryRelationRepository::Add(Literal lit, LinearTerm a, LinearTerm b, void BinaryRelationRepository::Build() { DCHECK(!is_built_); is_built_ = true; - std::vector keys; + std::vector> literal_key_values; + std::vector> var_key_values; const int num_relations = relations_.size(); - keys.reserve(num_relations); + literal_key_values.reserve(num_enforced_relations_); + var_key_values.reserve(num_relations - num_enforced_relations_); for (int i = 0; i < num_relations; ++i) { - keys.push_back(relations_[i].enforcement.Index()); + const Relation& r = relations_[i]; + if (r.enforcement.Index() == kNoLiteralIndex) { + var_key_values.emplace_back(r.a.var, i); + var_key_values.emplace_back(r.b.var, i); + std::pair key(r.a.var, r.b.var); + if (relations_[i].a.var > relations_[i].b.var) { + std::swap(key.first, key.second); + } + var_pair_to_relations_[key].push_back(i); + } else { + literal_key_values.emplace_back(r.enforcement.Index(), i); + } } - lit_to_relations_.ResetFromFlatMapping(keys, IdentityMap()); + lit_to_relations_.ResetFromPairs(literal_key_values); + var_to_relations_.ResetFromPairs(var_key_values); +} + +bool BinaryRelationRepository::PropagateLocalBounds( + const IntegerTrail& integer_trail, Literal lit, + const absl::flat_hash_map& input, + absl::flat_hash_map* output) const { + DCHECK_NE(lit.Index(), kNoLiteralIndex); + + auto get_lower_bound = [&](IntegerVariable var) { + const auto it = input.find(var); + if (it != input.end()) return it->second; + return integer_trail.LevelZeroLowerBound(var); + }; + auto get_upper_bound = [&](IntegerVariable var) { + return -get_lower_bound(NegationOf(var)); + }; + auto update_lower_bound_by_var = [&](IntegerVariable var, IntegerValue lb) { + if (lb <= integer_trail.LevelZeroLowerBound(var)) return; + const auto [it, inserted] = output->insert({var, lb}); + if (!inserted) { + it->second = std::max(it->second, lb); + } + }; + auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) { + update_lower_bound_by_var(NegationOf(var), -ub); + }; + auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b, + IntegerValue lhs, IntegerValue rhs) { + if (a.coeff == 0) return; + + // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply + // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) + if (b.coeff != 0) { + lhs = lhs - b.coeff * get_upper_bound(b.var); + rhs = rhs - b.coeff * get_lower_bound(b.var); + } + update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff)); + update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff)); + }; + auto update_var_bounds_from_relation = [&](Relation r) { + r.a.MakeCoeffPositive(); + r.b.MakeCoeffPositive(); + update_var_bounds(r.a, r.b, r.lhs, r.rhs); + update_var_bounds(r.b, r.a, r.lhs, r.rhs); + }; + if (lit.Index() < lit_to_relations_.size()) { + for (const int relation_index : lit_to_relations_[lit]) { + update_var_bounds_from_relation(relations_[relation_index]); + } + } + for (const auto& [var, _] : input) { + if (var >= var_to_relations_.size()) continue; + for (const int relation_index : var_to_relations_[var]) { + update_var_bounds_from_relation(relations_[relation_index]); + } + } + + // Check feasibility. + // TODO(user): we might do that earlier? + for (const auto [var, lb] : *output) { + if (lb > integer_trail.LevelZeroUpperBound(var)) return false; + } + return true; } bool GreaterThanAtLeastOneOfDetector::AddRelationFromIndices( @@ -1205,7 +1292,8 @@ int GreaterThanAtLeastOneOfDetector:: // Collect all relations impacted by this clause. std::vector> infos; for (const Literal l : clause) { - for (const int index : repository_.relation_indices(l.Index())) { + for (const int index : + repository_.IndicesOfRelationsEnforcedBy(l.Index())) { const Relation& r = repository_.relation(index); if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { infos.push_back({r.a.var, index}); @@ -1260,6 +1348,7 @@ int GreaterThanAtLeastOneOfDetector:: util_intops::StrongVector> var_to_relations; for (int index = 0; index < repository_.size(); ++index) { const Relation& r = repository_.relation(index); + if (r.enforcement.Index() == kNoLiteralIndex) continue; if (r.a.var != kNoIntegerVariable && IntTypeAbs(r.a.coeff) == 1) { if (r.a.var >= var_to_relations.size()) { var_to_relations.resize(r.a.var + 1); @@ -1388,59 +1477,5 @@ int GreaterThanAtLeastOneOfDetector::AddGreaterThanAtLeastOneOfConstraints( return num_added_constraints; } -bool BinaryRelationRepository::PropagateLocalBounds( - const IntegerTrail& integer_trail, Literal lit, - const absl::flat_hash_map& input, - absl::flat_hash_map* output) const { - DCHECK_NE(lit.Index(), kNoLiteralIndex); - if (lit.Index() >= lit_to_relations_.size()) return true; - - auto get_lower_bound = [&](IntegerVariable var) { - const auto it = input.find(var); - if (it != input.end()) return it->second; - return integer_trail.LevelZeroLowerBound(var); - }; - auto get_upper_bound = [&](IntegerVariable var) { - return -get_lower_bound(NegationOf(var)); - }; - auto update_lower_bound_by_var = [&](IntegerVariable var, IntegerValue lb) { - if (lb <= integer_trail.LevelZeroLowerBound(var)) return; - const auto [it, inserted] = output->insert({var, lb}); - if (!inserted) { - it->second = std::max(it->second, lb); - } - }; - auto update_upper_bound_by_var = [&](IntegerVariable var, IntegerValue ub) { - update_lower_bound_by_var(NegationOf(var), -ub); - }; - auto update_var_bounds = [&](const LinearTerm& a, const LinearTerm& b, - IntegerValue lhs, IntegerValue rhs) { - if (a.coeff == 0) return; - - // lb(b.y) <= b.y <= ub(b.y) and lhs <= a.x + b.y <= rhs imply - // ceil((lhs - ub(b.y)) / a) <= x <= floor((rhs - lb(b.y)) / a) - if (b.coeff != 0) { - lhs = lhs - b.coeff * get_upper_bound(b.var); - rhs = rhs - b.coeff * get_lower_bound(b.var); - } - update_lower_bound_by_var(a.var, MathUtil::CeilOfRatio(lhs, a.coeff)); - update_upper_bound_by_var(a.var, MathUtil::FloorOfRatio(rhs, a.coeff)); - }; - for (const int relation_index : lit_to_relations_[lit]) { - auto r = relations_[relation_index]; - r.a.MakeCoeffPositive(); - r.b.MakeCoeffPositive(); - update_var_bounds(r.a, r.b, r.lhs, r.rhs); - update_var_bounds(r.b, r.a, r.lhs, r.rhs); - } - - // Check feasibility. - // TODO(user): we might do that earlier? - for (const auto [var, lb] : *output) { - if (lb > integer_trail.LevelZeroUpperBound(var)) return false; - } - return true; -} - } // namespace sat } // namespace operations_research diff --git a/ortools/sat/precedences.h b/ortools/sat/precedences.h index 04b1c8843b..d4a98c9bf1 100644 --- a/ortools/sat/precedences.h +++ b/ortools/sat/precedences.h @@ -474,6 +474,10 @@ struct LinearTerm { var = NegationOf(var); } } + + bool operator==(const LinearTerm& other) const { + return var == other.var && coeff == other.coeff; + } }; // A relation of the form enforcement => a + b \in [lhs, rhs]. @@ -485,9 +489,15 @@ struct Relation { LinearTerm b; IntegerValue lhs; IntegerValue rhs; + + bool operator==(const Relation& other) const { + return enforcement == other.enforcement && a == other.a && b == other.b && + lhs == other.lhs && rhs == other.rhs; + } }; -// A repository of all the enforced linear constraints of size 1 or 2. +// A repository of all the enforced linear constraints of size 1 or 2, and of +// all the non-enforced linear constraints of size 2. // // TODO(user): This is not always needed, find a way to clean this once we // don't need it. @@ -497,12 +507,35 @@ class BinaryRelationRepository { // The returned relation is guaranteed to only have positive variables. const Relation& relation(int index) const { return relations_[index]; } - absl::Span relation_indices(LiteralIndex lit) const { + // Returns the indices of the relations that are enforced by the given + // literal. + absl::Span IndicesOfRelationsEnforcedBy(LiteralIndex lit) const { if (lit >= lit_to_relations_.size()) return {}; return lit_to_relations_[lit]; } - // Adds a relation lit => a + b \in [lhs, rhs]. + // Returns the indices of the non-enforced relations that contain the given + // (positive) variable. + absl::Span IndicesOfRelationsContaining( + IntegerVariable var) const { + if (var >= var_to_relations_.size()) return {}; + return var_to_relations_[var]; + } + + // Returns the indices of the non-enforced relations that contain the given + // (positive) variables. + absl::Span IndicesOfRelationsBetween(IntegerVariable var1, + IntegerVariable var2) const { + std::pair key(var1, var2); + if (var1 > var2) std::swap(var1, var2); + auto it = var_pair_to_relations_.find(key); + if (it == var_pair_to_relations_.end()) return {}; + return it->second; + } + + // Adds a conditional relation lit => a + b \in [lhs, rhs] (one of the terms + // can be zero), or an always true binary relation a + b \in [lhs, rhs] (both + // terms must be non-zero). void Add(Literal lit, LinearTerm a, LinearTerm b, IntegerValue lhs, IntegerValue rhs); @@ -512,8 +545,8 @@ class BinaryRelationRepository { // Assuming level-zero bounds + any (var >= value) in the input map, // fills "output" with a "propagated" set of bounds assuming lit is true (by - // using the relations enforced by lit). Note that we will only fill bounds > - // level-zero ones in output. + // using the relations enforced by lit, as well as the non-enforced ones). + // Note that we will only fill bounds > level-zero ones in output. // // Returns false if the new bounds are infeasible at level zero. // @@ -526,8 +559,13 @@ class BinaryRelationRepository { private: bool is_built_ = false; + int num_enforced_relations_ = 0; std::vector relations_; CompactVectorVector lit_to_relations_; + CompactVectorVector var_to_relations_; + absl::flat_hash_map, + std::vector> + var_pair_to_relations_; }; // Detects if at least one of a subset of linear of size 2 or 1, touching the diff --git a/ortools/sat/precedences_test.cc b/ortools/sat/precedences_test.cc index f72587706d..b5bba5475b 100644 --- a/ortools/sat/precedences_test.cc +++ b/ortools/sat/precedences_test.cc @@ -14,9 +14,10 @@ #include "ortools/sat/precedences.h" #include +#include #include -#include "absl/types/span.h" +#include "absl/container/flat_hash_map.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/sat/integer.h" @@ -32,6 +33,7 @@ namespace sat { namespace { using ::testing::ElementsAre; +using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; // A simple macro to make the code more readable. @@ -362,7 +364,7 @@ TEST(PrecedencesPropagatorTest, TrickyCycle) { EXPECT_FALSE(propagator->Propagate(trail)); EXPECT_THAT(trail->FailingClause(), ElementsAre(Literal(-1))); - // Test that the code dectected properly a positive cycle in the dependency + // Test that the code detected properly a positive cycle in the dependency // graph instead of just pushing the bounds until the upper bound is reached. EXPECT_LT(integer_trail->num_enqueues(), 10); } @@ -443,6 +445,151 @@ TEST(PrecedenceRelationsTest, CollectPrecedences) { EXPECT_TRUE(p.empty()); } +TEST(BinaryRelationRepositoryTest, Build) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable z = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {NegationOf(x), 1}, {y, 1}, 2, 8); + repository.Add(Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10); + repository.Add(lit_a, {x, -3}, {NegationOf(y), 2}, 1, 15); + repository.Add(lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5); + repository.Add(Literal(kNoLiteralIndex), {x, 3}, {y, -1}, 5, 15); + repository.Add(Literal(kNoLiteralIndex), {x, 1}, {z, -1}, 0, 10); + repository.Build(); + + EXPECT_EQ(repository.size(), 6); + EXPECT_EQ(repository.relation(0), (Relation{lit_a, {x, -1}, {y, 1}, 2, 8})); + EXPECT_EQ(repository.relation(1), + (Relation{Literal(kNoLiteralIndex), {x, 2}, {y, -2}, 0, 10})); + EXPECT_EQ(repository.relation(2), (Relation{lit_a, {x, -3}, {y, -2}, 1, 15})); + EXPECT_EQ(repository.relation(3), + (Relation{lit_b, {x, -3}, {kNoIntegerVariable, 0}, 3, 5})); + EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_a), + UnorderedElementsAre(0, 2)); + EXPECT_THAT(repository.IndicesOfRelationsEnforcedBy(lit_b), + UnorderedElementsAre(3)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(x), + UnorderedElementsAre(1, 4, 5)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(y), + UnorderedElementsAre(1, 4)); + EXPECT_THAT(repository.IndicesOfRelationsContaining(z), + UnorderedElementsAre(5)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(x, y), + UnorderedElementsAre(1, 4)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(x, z), + UnorderedElementsAre(5)); + EXPECT_THAT(repository.IndicesOfRelationsBetween(z, y), IsEmpty()); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_EnforcedRelation) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 5))); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_UnenforcedRelation) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal kNoLiteral = Literal(kNoLiteralIndex); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 + repository.Add(kNoLiteral, {x, -1}, {y, 1}, 2, 10); // y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 5))); +} + +TEST(BinaryRelationRepositoryTest, + PropagateLocalBounds_EnforcedBoundSmallerThanLevelZeroBound) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + const Literal lit_b = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, -5, 10); // lit_a => y => x - 5 + repository.Add(lit_b, {x, -1}, {y, 1}, 2, 10); // lit_b => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, IsEmpty()); +} + +TEST(BinaryRelationRepositoryTest, + PropagateLocalBounds_EnforcedBoundSmallerThanOutputBound) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 2, 10); // lit_a => y => x + 2 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output = {{y, 8}}; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_TRUE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -8), + std::make_pair(y, 8))); +} + +TEST(BinaryRelationRepositoryTest, PropagateLocalBounds_Infeasible) { + Model model; + const IntegerVariable x = model.Add(NewIntegerVariable(0, 10)); + const IntegerVariable y = model.Add(NewIntegerVariable(0, 10)); + const Literal lit_a = Literal(model.Add(NewBooleanVariable()), true); + BinaryRelationRepository repository; + repository.Add(lit_a, {x, -1}, {y, 1}, 8, 10); // lit_a => y => x + 8 + repository.Build(); + IntegerTrail* integer_trail = model.GetOrCreate(); + absl::flat_hash_map input = {{x, 3}}; + absl::flat_hash_map output; + + const bool result = + repository.PropagateLocalBounds(*integer_trail, lit_a, input, &output); + + EXPECT_FALSE(result); + EXPECT_THAT(output, UnorderedElementsAre(std::make_pair(NegationOf(x), -2), + std::make_pair(y, 11))); +} + TEST(GreaterThanAtLeastOneOfDetectorTest, AddGreaterThanAtLeastOneOf) { Model model; const IntegerVariable a = model.Add(NewIntegerVariable(2, 10)); diff --git a/ortools/sat/routing_cuts.cc b/ortools/sat/routing_cuts.cc index 4227110676..050962f712 100644 --- a/ortools/sat/routing_cuts.cc +++ b/ortools/sat/routing_cuts.cc @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include #include @@ -122,9 +124,188 @@ MinOutgoingFlowHelper::~MinOutgoingFlowHelper() { {"RoutingDp/num_full_dp_early_abort", num_full_dp_early_abort_}); stats.push_back( {"RoutingDp/num_full_dp_work_abort", num_full_dp_work_abort_}); + for (const auto& [name, count] : num_by_type_) { + stats.push_back({absl::StrCat("RoutingDp/num_bounds_", name), count}); + } shared_stats_->AddStats(stats); } +int MinOutgoingFlowHelper::ComputeDemandBasedMinOutgoingFlow( + absl::Span subset, const RouteRelationsHelper& helper) { + DCHECK_EQ(helper.num_nodes(), in_subset_.size()); + DCHECK_EQ(helper.num_arcs(), tails_.size()); + + // TODO(user): When we try multiple algorithm from this class on the same + // subset, we should initialize the graph just once as this is costly on large + // problem. + InitializeGraph(subset); + + int min_outgoing_flow = 1; + std::string best_name; + for (int d = 0; d < helper.num_dimensions(); ++d) { + for (const bool negate_variables : {false, true}) { + for (const bool use_incoming : {true, false}) { + bool gcd_was_used = false; + const int bound = ComputeMinNumberOfBins( + RelaxIntoSpecialBinPackingProblem(subset, d, negate_variables, + use_incoming, helper), + &gcd_was_used); + if (bound > min_outgoing_flow) { + min_outgoing_flow = bound; + best_name = absl::StrCat((use_incoming ? "in_" : "out_"), + (gcd_was_used ? "gcd_" : ""), + (negate_variables ? "neg_" : ""), d); + } + } + } + } + + if (min_outgoing_flow > 1) num_by_type_[best_name]++; + return min_outgoing_flow; +} + +absl::Span +MinOutgoingFlowHelper::RelaxIntoSpecialBinPackingProblem( + absl::Span subset, int dimension, bool negate_variables, + bool use_incoming, const RouteRelationsHelper& helper) { + tmp_bin_packing_problem_.clear(); + + // Computes UB/LB. + IntegerValue min_lower_bound = kMaxIntegerValue; + IntegerValue max_upper_bound = kMinIntegerValue; + for (const int n : subset) { + IntegerVariable var = helper.GetNodeVariable(n, dimension); + if (var == kNoIntegerVariable) return {}; + if (negate_variables) var = NegationOf(var); + min_lower_bound = + std::min(min_lower_bound, integer_trail_.LevelZeroLowerBound(var)); + max_upper_bound = + std::max(max_upper_bound, integer_trail_.LevelZeroUpperBound(var)); + } + + // TODO(user): This should probably be moved to RouteRelationsHelper. + auto get_relation_lhs = [&](int arc_index) -> std::optional { + const auto& r = helper.GetArcRelation(arc_index, dimension); + if (r.empty()) { + // Use X_head-X_tail \in [lb(X_head)-ub(X_tail), ub(X_head)-lb(X_tail)]. + const IntegerVariable tail_var = + helper.GetNodeVariable(tails_[arc_index], dimension); + const IntegerVariable head_var = + helper.GetNodeVariable(heads_[arc_index], dimension); + if (negate_variables) { + // Opposite of the rhs. + return integer_trail_.LevelZeroLowerBound(tail_var) - + integer_trail_.LevelZeroUpperBound(head_var); + } + return integer_trail_.LevelZeroLowerBound(head_var) - + integer_trail_.LevelZeroUpperBound(tail_var); + } else if (r.head_coeff != 1 || r.tail_coeff != -1) { + return std::nullopt; + } + return negate_variables ? -r.rhs : r.lhs; + }; + + for (const int n : subset) { + IntegerVariable var = helper.GetNodeVariable(n, dimension); + if (negate_variables) var = NegationOf(var); + + const absl::Span arcs = + use_incoming ? incoming_arc_indices_[n] : outgoing_arc_indices_[n]; + IntegerValue demand = kMaxIntegerValue; + for (const int a : arcs) { + const auto lhs = get_relation_lhs(a); + if (!lhs.has_value()) return {}; + demand = std::min(demand, lhs.value()); + } + + ItemOrBin obj; + obj.capacity = + use_incoming + ? max_upper_bound - integer_trail_.LevelZeroLowerBound(var) + : integer_trail_.LevelZeroUpperBound(var) - min_lower_bound; + obj.demand = demand; + + // TODO(user): Also use MUST_BE_ITEM. + if (arcs.empty()) { + obj.type = ItemOrBin::MUST_BE_BIN; + obj.demand = 0; // We don't want kMaxIntegerValue ! + } else { + obj.type = ItemOrBin::ITEM_OR_BIN; + } + tmp_bin_packing_problem_.push_back(obj); + } + + return absl::MakeSpan(tmp_bin_packing_problem_); +} + +int MinOutgoingFlowHelper::ComputeMinNumberOfBins(absl::Span objects, + bool* gcd_was_used) { + if (objects.empty()) return 0; + + IntegerValue sum_of_demands(0); + int64_t gcd = 0; + for (const ItemOrBin& obj : objects) { + sum_of_demands = CapAddI(sum_of_demands, obj.demand); + gcd = std::gcd(gcd, std::abs(obj.demand.value())); + } + + // TODO(user): we can probably handle a couple of extra case rather than just + // bailing out here and below. + if (AtMinOrMaxInt64I(sum_of_demands)) return 0; + + // If the gcd of all the demands term is positive, we can divide everything. + *gcd_was_used = (gcd > 1); + if (gcd > 1) { + for (ItemOrBin& obj : objects) { + obj.demand /= gcd; + obj.capacity = FloorRatio(obj.capacity, gcd); + } + sum_of_demands /= gcd; + } + + // For a given choice of bins (set B), a feasible problem must satisfy. + // sum_{i \notin B} demands_i <= sum_{i \in B} capacity_i. + // + // Using this we can compute a lower bound on the number of bins needed + // using a greedy algorithm that chooses B in order to make the above + // inequality as "loose" as possible. + // + // This puts 'a' before 'b' if we get more unused capacity by using 'a' as a + // bin and 'b' as an item rather than the other way around. If we call the + // other items demands D and capacities C, the two options are: + // - option1: a.demand + D <= b.capacity + C + // - option2: b.demand + D <= a.capacity + C + // + // The option2 above leads to more unused capacity: + // (a.capacity - b.demand) > (b.capacity - a.demand). + std::stable_sort(objects.begin(), objects.end(), + [](const ItemOrBin& a, const ItemOrBin& b) { + if (a.type != b.type) { + // We want in order: + // MUST_BE_BIN, ITEM_OR_BIN, MUST_BE_ITEM. + return a.type > b.type; + } + return a.capacity + a.demand > b.capacity + b.demand; + }); + + // We start with no bins (sum_of_demands=everything, sum_of_capacity=0) and + // add the best bins one by one until we have sum_of_demands <= + // sum_of_capacity. + int num_bins = 0; + IntegerValue sum_of_capacity(0); + for (; num_bins < objects.size(); ++num_bins) { + const ItemOrBin& obj = objects[num_bins]; + if (obj.type != ItemOrBin::MUST_BE_BIN && + sum_of_demands <= sum_of_capacity) { + return num_bins; + } + sum_of_capacity = CapAddI(sum_of_capacity, obj.capacity); + if (AtMinOrMaxInt64I(sum_of_capacity)) return num_bins; + sum_of_demands -= obj.demand; + } + return num_bins; +} + int MinOutgoingFlowHelper::ComputeMinOutgoingFlow( absl::Span subset) { DCHECK_GE(subset.size(), 1); @@ -451,131 +632,331 @@ IntegerVariable UniqueSharedVariable(const Relation& r1, const Relation& r2) { if (r1.b.var == r2.b.var && r1.a.var != r2.a.var) return r1.b.var; return kNoIntegerVariable; } + +class RouteRelationsBuilder { + public: + using Relation = RouteRelationsHelper::Relation; + + RouteRelationsBuilder( + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span literals, + const BinaryRelationRepository& binary_relation_repository) + : num_nodes_(num_nodes), + num_arcs_(tails.size()), + tails_(tails), + heads_(heads), + literals_(literals), + binary_relation_repository_(binary_relation_repository) {} + + int num_dimensions() const { return num_dimensions_; } + + const std::vector& flat_node_dim_variables() const { + return flat_node_dim_variables_; + } + + const std::vector& flat_arc_dim_relations() const { + return flat_arc_dim_relations_; + } + + bool Build() { + // Step 1: find the number of dimensions (as the number of connected + // components in the graph of binary relations), and find to which dimension + // each variable belongs. + ComputeDimensionOfEachVariable(); + if (num_dimensions_ == 0) return false; + + // Step 2: find the variables which can be unambiguously associated with a + // node and dimension. + // - compute the indices of the binary relations which can be unambiguously + // associated with the incoming and outgoing arcs of each node, per + // dimension. + ComputeAdjacentRelationsPerNodeAndDimension(); + // - find variable associations by using variables which are uniquely shared + // by two adjacent relations of a node. + std::queue> node_dim_pairs = + ComputeVarAssociationsFromSharedVariableOfAdjacentRelations(); + if (node_dim_pairs.empty()) return false; + // - find more variable associations by using arcs from nodes with + // an associated variable, whose other end has no associated variable, and + // where only one variable can be associated with it. + ComputeVarAssociationsFromRelationsWithSingleFreeVar(node_dim_pairs); + + // Step 3: compute the relation for each arc and dimension, now that the + // variables associated with each node and dimension are known. + ComputeArcRelations(); + return true; + } + + private: + IntegerVariable& node_variable(int node, int dimension) { + return flat_node_dim_variables_[node * num_dimensions_ + dimension]; + }; + + const Relation& arc_relation(int arc_index, int dimension) const { + return flat_arc_dim_relations_[arc_index * num_dimensions_ + dimension]; + } + + void SetArcRelation(int arc_index, int dimension, IntegerVariable tail_var, + const sat::Relation& r) { + Relation& relation = + flat_arc_dim_relations_[arc_index * num_dimensions_ + dimension]; + // If several relations are associated with the same arc, keep the first one + // found, unless the new one has opposite +1/-1 coefficients (these + // relations are preferred because they can be used to compute "demand + // based" min outgoing flows). + if (!relation.empty()) { + if (IntTypeAbs(r.a.coeff) != 1 || r.b.coeff != -r.a.coeff) return; + } + if (tail_var == r.a.var) { + relation.tail_coeff = r.a.coeff; + relation.head_coeff = r.b.coeff; + } else { + relation.tail_coeff = r.b.coeff; + relation.head_coeff = r.a.coeff; + } + relation.lhs = r.lhs; + relation.rhs = r.rhs; + if (relation.head_coeff < 0) { + relation.tail_coeff = -relation.tail_coeff; + relation.head_coeff = -relation.head_coeff; + relation.lhs = -relation.lhs; + relation.rhs = -relation.rhs; + std::swap(relation.lhs, relation.rhs); + } + } + + void ComputeDimensionOfEachVariable() { + // Step 1: find the number of dimensions (as the number of connected + // components in the graph of binary relations). + // TODO(user): see if we can use a shared + // DenseConnectedComponentsFinder with one node per variable of the whole + // model instead. + ConnectedComponentsFinder cc_finder; + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + num_arcs_per_literal_[literals_[i]]++; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + cc_finder.AddEdge(r.a.var, r.b.var); + } + } + const std::vector> connected_components = + cc_finder.FindConnectedComponents(); + for (int i = 0; i < connected_components.size(); ++i) { + for (const IntegerVariable var : connected_components[i]) { + dimension_by_var_[var] = i; + } + } + num_dimensions_ = connected_components.size(); + } + + void ComputeAdjacentRelationsPerNodeAndDimension() { + adjacent_relation_indices_ = std::vector>>( + num_dimensions_, std::vector>(num_nodes_)); + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + // If a literal is associated with more than one arc, a relation + // associated with this literal cannot be unambiguously associated with an + // arc. + if (num_arcs_per_literal_[literals_[i]] > 1) continue; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + const int dimension = dimension_by_var_[r.a.var]; + adjacent_relation_indices_[dimension][tails_[i]].push_back( + relation_index); + adjacent_relation_indices_[dimension][heads_[i]].push_back( + relation_index); + } + } + } + + // Returns the (node, dimension) pairs for which a variable association has + // been found. + std::queue> + ComputeVarAssociationsFromSharedVariableOfAdjacentRelations() { + flat_node_dim_variables_ = std::vector( + num_nodes_ * num_dimensions_, kNoIntegerVariable); + std::queue> result; + for (int n = 0; n < num_nodes_; ++n) { + for (int d = 0; d < num_dimensions_; ++d) { + // If two relations on incoming or outgoing arcs of n have a unique + // shared variable, such as in the case of l <-X,Y-> n <-Y,Z-> m (i.e. a + // relation between X and Y on the (l,n) arc, and a relation between Y + // and Z on the (n,m) arc), then this variable is necessarily associated + // with n. + for (const int r1_index : adjacent_relation_indices_[d][n]) { + const auto& r1 = binary_relation_repository_.relation(r1_index); + for (const int r2_index : adjacent_relation_indices_[d][n]) { + if (r1_index == r2_index) continue; + const auto& r2 = binary_relation_repository_.relation(r2_index); + const IntegerVariable shared_var = UniqueSharedVariable(r1, r2); + if (shared_var == kNoIntegerVariable) continue; + DCHECK_EQ(dimension_by_var_[shared_var], d); + IntegerVariable& node_var = node_variable(n, d); + if (node_var == kNoIntegerVariable) { + result.push({n, d}); + } else if (node_var != shared_var) { + VLOG(2) << "Several vars per node and dimension in route with " + << num_nodes_ << " nodes and " << num_arcs_ << " arcs"; + return {}; + } + node_var = shared_var; + } + } + } + } + return result; + } + + void ComputeVarAssociationsFromRelationsWithSingleFreeVar( + std::queue>& node_dim_pairs_to_process) { + std::vector> adjacent_arcs_per_node(num_nodes_); + for (int i = 0; i < num_arcs_; ++i) { + if (tails_[i] == heads_[i]) continue; + adjacent_arcs_per_node[tails_[i]].push_back(i); + adjacent_arcs_per_node[heads_[i]].push_back(i); + } + while (!node_dim_pairs_to_process.empty()) { + const auto [node, dimension] = node_dim_pairs_to_process.front(); + const IntegerVariable node_var = node_variable(node, dimension); + DCHECK_NE(node_var, kNoIntegerVariable); + node_dim_pairs_to_process.pop(); + for (const int arc_index : adjacent_arcs_per_node[node]) { + const int tail = tails_[arc_index]; + const int head = heads_[arc_index]; + DCHECK(node == tail || node == head); + int other_node = node == tail ? head : tail; + if (node_variable(other_node, dimension) != kNoIntegerVariable) { + continue; + } + IntegerVariable candidate_var = kNoIntegerVariable; + bool candidate_var_is_unique = true; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[arc_index])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + if (r.a.var == node_var) { + if (candidate_var != kNoIntegerVariable && + candidate_var != r.b.var) { + candidate_var_is_unique = false; + break; + } + candidate_var = r.b.var; + } + if (r.b.var == node_var) { + if (candidate_var != kNoIntegerVariable && + candidate_var != r.a.var) { + candidate_var_is_unique = false; + break; + } + candidate_var = r.a.var; + } + } + if (candidate_var != kNoIntegerVariable && candidate_var_is_unique) { + node_variable(other_node, dimension) = candidate_var; + node_dim_pairs_to_process.push({other_node, dimension}); + } + } + } + } + + void ComputeArcRelations() { + flat_arc_dim_relations_ = + std::vector(num_arcs_ * num_dimensions_, Relation()); + int num_inconsistent_relations = 0; + for (int i = 0; i < num_arcs_; ++i) { + const int tail = tails_[i]; + const int head = heads_[i]; + if (tail == head) continue; + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsEnforcedBy( + literals_[i])) { + const auto& r = binary_relation_repository_.relation(relation_index); + if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { + continue; + } + const int dimension = dimension_by_var_[r.a.var]; + IntegerVariable& tail_var = node_variable(tail, dimension); + IntegerVariable& head_var = node_variable(head, dimension); + if (tail_var == kNoIntegerVariable || head_var == kNoIntegerVariable) { + continue; + } + if (!((tail_var == r.a.var && head_var == r.b.var) || + (tail_var == r.b.var && head_var == r.a.var))) { + ++num_inconsistent_relations; + continue; + } + SetArcRelation(i, dimension, tail_var, r); + } + // If some relations are missing for this arc, check if we can use + // enforced relations to fill them. + for (int d = 0; d < num_dimensions_; ++d) { + if (!arc_relation(i, d).empty()) continue; + const IntegerVariable tail_var = node_variable(tail, d); + const IntegerVariable head_var = node_variable(head, d); + if (tail_var == kNoIntegerVariable || head_var == kNoIntegerVariable) { + continue; + } + for (const int relation_index : + binary_relation_repository_.IndicesOfRelationsBetween(tail_var, + head_var)) { + SetArcRelation(i, d, tail_var, + binary_relation_repository_.relation(relation_index)); + } + } + } + if (num_inconsistent_relations > 0) { + VLOG(2) << num_inconsistent_relations + << " inconsistent relations in route with " << num_nodes_ + << " nodes and " << num_arcs_ << " arcs"; + } + } + + const int num_nodes_; + const int num_arcs_; + absl::Span tails_; + absl::Span heads_; + absl::Span literals_; + const BinaryRelationRepository& binary_relation_repository_; + + int num_dimensions_; + absl::flat_hash_map dimension_by_var_; + absl::flat_hash_map num_arcs_per_literal_; + // The indices of the binary relations associated with the incoming and + // outgoing arcs of each node, per dimension. + std::vector>> adjacent_relation_indices_; + // The variable associated with node n and dimension d (or kNoIntegerVariable + // if there is none) is at index n * num_dimensions_ + d. + std::vector flat_node_dim_variables_; + // The relation associated with arc a and dimension d (or kNoRelation if + // there is none) is at index a * num_dimensions_ + d. + std::vector flat_arc_dim_relations_; +}; } // namespace std::unique_ptr RouteRelationsHelper::Create( int num_nodes, absl::Span tails, absl::Span heads, absl::Span literals, const BinaryRelationRepository& binary_relation_repository) { - const int num_arcs = tails.size(); - // TODO(user): see if we can use a shared DenseConnectedComponentsFinder - // with one node per variable of the whole model instead. - ConnectedComponentsFinder cc_finder; - // The indices of the binary relations associated with the incoming and - // outgoing arcs of each node. - std::vector> adjacent_relation_indices(num_nodes); - for (int i = 0; i < num_arcs; ++i) { - if (tails[i] == heads[i]) continue; - for (const int relation_index : - binary_relation_repository.relation_indices(literals[i])) { - const auto& r = binary_relation_repository.relation(relation_index); - if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { - continue; - } - cc_finder.AddEdge(r.a.var, r.b.var); - adjacent_relation_indices[tails[i]].push_back(relation_index); - adjacent_relation_indices[heads[i]].push_back(relation_index); - } - } - const std::vector> connected_components = - cc_finder.FindConnectedComponents(); - absl::flat_hash_map component_by_var; - for (int i = 0; i < connected_components.size(); ++i) { - for (const IntegerVariable var : connected_components[i]) { - component_by_var[var] = i; - } - } - - const int num_dimensions = connected_components.size(); - std::vector flat_node_dim_variables( - num_nodes * num_dimensions, kNoIntegerVariable); - for (int n = 0; n < num_nodes; ++n) { - // If two relations on incoming or outgoing arcs of n have a unique shared - // variable, such as in the case of l <-X,Y-> n <-Y,Z-> m (i.e. a relation - // between X and Y on the (l,n) arc, and a relation between Y and Z on the - // (n,m) arc), then this variable is necessarily associated with n. - for (const int r1_index : adjacent_relation_indices[n]) { - const auto& r1 = binary_relation_repository.relation(r1_index); - for (const int r2_index : adjacent_relation_indices[n]) { - if (r1_index == r2_index) continue; - const auto& r2 = binary_relation_repository.relation(r2_index); - const IntegerVariable shared_var = UniqueSharedVariable(r1, r2); - if (shared_var == kNoIntegerVariable) continue; - const int dimension = component_by_var[shared_var]; - IntegerVariable& node_var = - flat_node_dim_variables[n * num_dimensions + dimension]; - if (node_var != kNoIntegerVariable && node_var != shared_var) { - VLOG(2) << "Several vars per node and dimension in route with " - << num_nodes << " nodes and " << num_arcs << " arcs"; - return nullptr; - } - node_var = shared_var; - } - } - } - - std::vector flat_arc_dim_relations(num_arcs * num_dimensions, - Relation()); - for (int i = 0; i < num_arcs; ++i) { - const int tail = tails[i]; - const int head = heads[i]; - if (tail == head) continue; - for (const int relation_index : - binary_relation_repository.relation_indices(literals[i])) { - const auto& r = binary_relation_repository.relation(relation_index); - if (r.a.var == kNoIntegerVariable || r.b.var == kNoIntegerVariable) { - continue; - } - const int dimension = component_by_var[r.a.var]; - IntegerVariable& tail_var = - flat_node_dim_variables[tail * num_dimensions + dimension]; - IntegerVariable& head_var = - flat_node_dim_variables[head * num_dimensions + dimension]; - // In a case such as l <-X,Y-> n <-Y,Z-> m the above algorithm cannot find - // that X and Z are associated with l and m, respectively. But once Y is - // associated with n, we can recover that X and Z are associated with l - // and m by looking at the two relations. - if (head_var == kNoIntegerVariable) { - if (tail_var == r.a.var) { - head_var = r.b.var; - } else if (tail_var == r.b.var) { - head_var = r.a.var; - } else { - continue; - } - } else if (tail_var == kNoIntegerVariable) { - if (head_var == r.a.var) { - tail_var = r.b.var; - } else if (head_var == r.b.var) { - tail_var = r.a.var; - } else { - continue; - } - } - Relation& arc_relation = - flat_arc_dim_relations[i * num_dimensions + dimension]; - if (tail_var == r.a.var) { - arc_relation.tail_coeff = r.a.coeff; - arc_relation.head_coeff = r.b.coeff; - } else { - arc_relation.tail_coeff = r.b.coeff; - arc_relation.head_coeff = r.a.coeff; - } - arc_relation.lhs = r.lhs; - arc_relation.rhs = r.rhs; - if (arc_relation.head_coeff < 0) { - arc_relation.tail_coeff = -arc_relation.tail_coeff; - arc_relation.head_coeff = -arc_relation.head_coeff; - arc_relation.lhs = -arc_relation.lhs; - arc_relation.rhs = -arc_relation.rhs; - std::swap(arc_relation.lhs, arc_relation.rhs); - } - } - } - - auto helper = std::unique_ptr(new RouteRelationsHelper( - num_dimensions, std::move(flat_node_dim_variables), - std::move(flat_arc_dim_relations))); + RouteRelationsBuilder builder(num_nodes, tails, heads, literals, + binary_relation_repository); + if (!builder.Build()) return nullptr; + std::unique_ptr helper(new RouteRelationsHelper( + builder.num_dimensions(), builder.flat_node_dim_variables(), + builder.flat_arc_dim_relations())); if (VLOG_IS_ON(2)) helper->LogStats(); return helper; } @@ -585,7 +966,9 @@ RouteRelationsHelper::RouteRelationsHelper( std::vector flat_arc_dim_relations) : num_dimensions_(num_dimensions), flat_node_dim_variables_(std::move(flat_node_dim_variables)), - flat_arc_dim_relations_(std::move(flat_arc_dim_relations)) {} + flat_arc_dim_relations_(std::move(flat_arc_dim_relations)) { + DCHECK_GE(num_dimensions_, 1); +} void RouteRelationsHelper::RemoveArcs( absl::Span sorted_arc_indices) { @@ -1037,6 +1420,15 @@ bool OutgoingCutHelper::TrySubsetCut(std::string name, // TODO(user): This is still not as good as the "capacity" bounds below in // some cases. Fix! we should be able to use the same relation to infer the // capacity bounds somehow. + if (route_relations_helper_ != nullptr) { + const int bound = + min_outgoing_flow_helper_.ComputeDemandBasedMinOutgoingFlow( + subset, *route_relations_helper_); + if (bound > min_outgoing_flow) { + absl::StrAppend(&name, "AutomaticDimension"); + min_outgoing_flow = bound; + } + } if (subset.size() < params_.routing_cut_subset_size_for_tight_binary_relation_bound()) { const int bound = diff --git a/ortools/sat/routing_cuts.h b/ortools/sat/routing_cuts.h index c761920d68..e30e9a45f2 100644 --- a/ortools/sat/routing_cuts.h +++ b/ortools/sat/routing_cuts.h @@ -54,15 +54,11 @@ class RouteRelationsHelper { int num_dimensions() const { return num_dimensions_; } int num_nodes() const { - return num_dimensions_ == 0 - ? 0 - : flat_node_dim_variables_.size() / num_dimensions_; + return flat_node_dim_variables_.size() / num_dimensions_; } int num_arcs() const { - return num_dimensions_ == 0 - ? 0 - : flat_arc_dim_relations_.size() / num_dimensions_; + return flat_arc_dim_relations_.size() / num_dimensions_; } // Returns the variable associated with the given node and dimension, or @@ -122,6 +118,14 @@ class MinOutgoingFlowHelper { ~MinOutgoingFlowHelper(); + // Returns the minimum flow going out of `subset`, based on a generalization + // of the CVRP "rounded capacity inequalities", by using the given helper, if + // possible (this requires all nodes to have an associated variable, and all + // relations to have +1, -1 coefficients). The complexity is O((subset.size() + // + num_arcs()) * num_dimensions()). + int ComputeDemandBasedMinOutgoingFlow(absl::Span subset, + const RouteRelationsHelper& helper); + // Returns the minimum flow going out of `subset`, based on a conservative // estimate of the maximum number of nodes of a feasible path inside this // subset. `subset` must not be empty and must not contain the depot (node 0). @@ -152,6 +156,103 @@ class MinOutgoingFlowHelper { private: void InitializeGraph(absl::Span subset); + // This is used to represent a "special bin packing" problem where we have + // objects that can either be items with a given demand or bins with a given + // capacity. The problem is to choose the minimum number of objects that will + // be bins, such that the other objects (items) can be packed inside. + struct ItemOrBin { + // Only one option will apply, this can either be an item with given demand + // or a bin with given capacity. + // + // Important: We support negative demands and negative capacity. We just + // need that the sum of demand <= capacity for the item in that bin. + IntegerValue demand = 0; + IntegerValue capacity = 0; + + // We described the problem where each object can be an item or a bin, but + // in practice we might have restriction on what object can be which, and we + // use this field to indicate that. + // + // The numerical order is important as we use that in the greedy algorithm. + // See ComputeMinNumberOfBins() code. + enum { + MUST_BE_ITEM = 0, // capacity will be set at zero. + ITEM_OR_BIN = 1, + MUST_BE_BIN = 2, // demand will be set at zero. + } type = ITEM_OR_BIN; + }; + + // Given a "special bin packing" problem as decribed above, return a lower + // bound on the number of bins that needs to be taken. + // + // This simply sorts the object according to a greedy criteria and minimize + // the number of bins such that the "demands <= capacities" constraint is + // satisfied. + // + // TODO(user): Use fancier DP to derive tighter bound. Also, when there are + // many dimensions, the choice of which item go to which bin is correlated, + // can we exploit this? + // + // TODO(user): As this get more used and fancier, it is easy to write a CP-SAT + // model to solve a "special bin packing" problem and test on random problems + // that this is indeed a correct bound. + int ComputeMinNumberOfBins(absl::Span objects, bool* gcd_was_used); + + // Given a subset S to serve in a route constraint, returns a special bin + // packing problem (defined above) where the minimum number of bins will + // correspond to the minimum number of vehicles needed to serve this subset. + // + // One way to derive such reduction is as follow. + // + // If we look at a path going through the subset, it will touch in order the + // nodes P = {n_0, ..., n_e}. It will enter S at a "start" node n_0 and leave + // at a "end" node n_e. + // + // We assume (see the RouteRelationsHelper) that each node n has an + // associated variable X_n, and that each arc t->h has an associated relation + // lhs(t,h) <= X_h - X_t. Summing all these inequalities along the path above + // we get: + // Sum_{i \in [1..e]} lhs(n_i, n_(i+1)) <= X_(n_e) - X_(n_0) + // introducing: + // - d(n) = min_(i \in S) lhs(i, n) [minimum incoming weight in subset S] + // - UB = max_(i \in S) upper_bound(X_i) + // We get: + // Sum_{n \in P \ n_0} d(n) <= UB - lower_bound(n_0) + // + // Here we can see that the "starting node" n0 is on the "capacity" side and + // will serve the role of a bin with capacity (UB - lower_bound(n_0)), whereas + // the other nodes n will be seen as "item" with demands d(i). + // + // Given that the set of paths going throug S must be disjoint and serve all + // the nodes, we get exactly the special bin packing problem described above + // where the starting nodes are the bins and the other inner-nodes are the + // items. + // + // Note that if a node has no incoming arc from within S, it must be a start + // (i.e. a bin). And if a node has no incoming arcs from outside S, it cannot + // be a start an must be an inner node (i.e. an item). We can exploit this to + // derive better bounds. + // + // We just explained the reduction using incoming arcs and starts of route, + // but we can do the same with outgoing arcs and ends of route. We can also + // change the dimension (the X_i) and variable direction used in the + // RouteRelationsHelper to exploit relations X_h - X_t <= rhs(t,h) instead. + // + // We provide a reduction for the cross product of: + // - Each possible dimension in the RouteRelationsHelper. + // - lhs or rhs (when negate_variables = true) in X - Y \in [lhs, rhs]. + // - (start and incoming arcs) or (ends and outgoing arcs). + // + // Warning: the returned Span<> is only valid until the next call to this + // function. + // + // TODO(user): Given the info for a subset, we can derive bounds for any + // smaller set included in it. We just have to ignore the MUST_BE_ITEM + // type as this might no longer be true. That might be interseting. + absl::Span RelaxIntoSpecialBinPackingProblem( + absl::Span subset, int dimension, bool negate_variables, + bool use_incoming, const RouteRelationsHelper& helper); + // Returns the minimum flow going out of a subset of size `subset_size`, // assuming that the longest feasible path inside this subset has // `longest_path_length` nodes and that there are at most `max_longest_paths` @@ -197,11 +298,14 @@ class MinOutgoingFlowHelper { std::vector> next_node_var_lower_bounds_; + std::vector tmp_bin_packing_problem_; + // Statistics. int64_t num_full_dp_skips_ = 0; int64_t num_full_dp_calls_ = 0; int64_t num_full_dp_early_abort_ = 0; int64_t num_full_dp_work_abort_ = 0; + absl::flat_hash_map num_by_type_; }; // Given a graph with nodes in [0, num_nodes) and a set of arcs (the order is diff --git a/ortools/sat/routing_cuts_test.cc b/ortools/sat/routing_cuts_test.cc index e372e15984..a48250bd73 100644 --- a/ortools/sat/routing_cuts_test.cc +++ b/ortools/sat/routing_cuts_test.cc @@ -124,6 +124,178 @@ TEST(MinOutgoingFlowHelperTest, CapacityConstraints) { EXPECT_EQ(tight_min_flow, 2); } +class DemandBasedMinOutgoingFlowHelperTest + : public testing::TestWithParam> {}; + +TEST_P(DemandBasedMinOutgoingFlowHelperTest, BasicCapacities) { + // If true, the load variables are the load of the vehicle leaving each node, + // otherwise they are the load of the vehicle arriving at each node. + const bool use_outgoing_load = GetParam().first; + // If true, vehicles pickup items at each node, otherwise they deliver items. + const bool pickup = GetParam().second; + + Model model; + const int num_nodes = 5; + // A complete graph with num_nodes. + std::vector tails; + std::vector heads; + std::vector literals; + absl::flat_hash_map, Literal> literal_by_arc; + for (int tail = 0; tail < num_nodes; ++tail) { + for (int head = 0; head < num_nodes; ++head) { + if (tail == head) continue; + tails.push_back(tail); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + literal_by_arc[{tail, head}] = literals.back(); + } + } + const std::vector demands = {0, 11, 12, 13, 14}; + std::vector loads; + const int max_capacity = 49; + for (int n = 0; n < num_nodes; ++n) { + if (pickup == use_outgoing_load) { + loads.push_back(model.Add(NewIntegerVariable(demands[n], max_capacity))); + } else { + loads.push_back( + model.Add(NewIntegerVariable(0, max_capacity - demands[n]))); + } + } + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (const auto& [arc, literal] : literal_by_arc) { + const auto& [tail, head] = arc; + if (tail == 0 || head == 0) continue; + if (pickup) { + // loads[head] - loads[tail] >= demand + repository->Add(literal, {loads[head], 1}, {loads[tail], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } else { + // loads[tail] - loads[head] >= demand + repository->Add(literal, {loads[tail], 1}, {loads[head], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2, 3, 4}, *route_relations_helper); + + // The total demand is 50, and the maximum capacity is 49. + EXPECT_EQ(min_flow, 2); +} + +TEST_P(DemandBasedMinOutgoingFlowHelperTest, + NodesWithoutIncomingOrOutgoingArc) { + // If true, the load variables are the load of the vehicle leaving each node, + // otherwise they are the load of the vehicle arriving at each node. + const bool use_outgoing_load = GetParam().first; + // If true, vehicles pickup items at each node, otherwise they deliver items. + const bool pickup = GetParam().second; + + Model model; + // A graph with 4 nodes and 4 arcs, with 1 node without incoming arc and 1 + // node without outgoing arc: + // + // 1 --> 2 + // ^ | + // | v + // 0 --> 3 + const std::vector tails = {0, 0, 1, 2}; + const std::vector heads = {1, 3, 2, 3}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + const std::vector demands = {11, 12, 13, 14}; + std::vector loads; + const int num_nodes = 4; + const int max_capacity = 49; + for (int n = 0; n < num_nodes; ++n) { + if (pickup == use_outgoing_load) { + loads.push_back(model.Add(NewIntegerVariable(demands[n], max_capacity))); + } else { + loads.push_back( + model.Add(NewIntegerVariable(0, max_capacity - demands[n]))); + } + } + // Capacity constraints. + auto* repository = model.GetOrCreate(); + for (int i = 0; i < 4; ++i) { + const int head = heads[i]; + const int tail = tails[i]; + if (pickup) { + // loads[head] - loads[tail] >= demand + repository->Add(literals[i], {loads[head], 1}, {loads[tail], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } else { + // loads[tail] - loads[head] >= demand + repository->Add(literals[i], {loads[tail], 1}, {loads[head], -1}, + demands[use_outgoing_load ? head : tail], 1000); + } + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {0, 1, 2, 3}, *route_relations_helper); + + // The total demand is 50, and the maximum capacity is 49. + EXPECT_EQ(min_flow, 2); +} + +INSTANTIATE_TEST_SUITE_P(AllCombinations, DemandBasedMinOutgoingFlowHelperTest, + testing::Values(std::make_pair(true, true), + std::make_pair(true, false), + std::make_pair(false, true), + std::make_pair(false, false))); + +TEST(MinOutgoingFlowHelperTest, DemandBasedMinOutgoingFlow_IsolatedNodes) { + Model model; + const int num_nodes = 5; + // A star graph with num_nodes-1 nodes and a depot. + std::vector tails; + std::vector heads; + std::vector literals; + std::vector variables; + auto* repository = model.GetOrCreate(); + // The depot variable. + variables.push_back(model.Add(NewIntegerVariable(0, 100))); + for (int head = 1; head < num_nodes; ++head) { + tails.push_back(0); + heads.push_back(head); + literals.push_back(Literal(model.Add(NewBooleanVariable()), true)); + variables.push_back(model.Add(NewIntegerVariable(0, 100))); + // Dummy relation, used only to associate a variable with each node. + repository->Add(literals.back(), {variables[head], 1}, {variables[0], -1}, + 1, 100); + } + repository->Build(); + std::unique_ptr route_relations_helper = + RouteRelationsHelper::Create(num_nodes, tails, heads, literals, + *repository); + ASSERT_NE(route_relations_helper, nullptr); + // Subject under test. + MinOutgoingFlowHelper helper(num_nodes, tails, heads, literals, &model); + + const int min_flow = helper.ComputeDemandBasedMinOutgoingFlow( + {1, 2, 3, 4}, *route_relations_helper); + + EXPECT_EQ(min_flow, 4); +} + TEST(MinOutgoingFlowHelperTest, TimeWindows) { Model model; const int num_nodes = 5; @@ -480,6 +652,57 @@ TEST(RouteRelationsHelperTest, Basic) { IsEmpty())); } +TEST(RouteRelationsHelperTest, UnenforcedRelations) { + Model model; + // Graph: 0--l0-->1 + // ^\ | + // l3 | \_l4_ | l1 + // | \v + // 3<--l2--2 + // + const int num_nodes = 4; + const std::vector tails = {0, 1, 2, 3, 0}; + const std::vector heads = {1, 2, 3, 0, 2}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Add relations with "time" variables A, B, C, D intended to be associated + // with nodes 0, 1, 2, 3 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 1, 1); + repository.Add(literals[1], {c, 1}, {b, -1}, 2, 2); + repository.Add(literals[2], {d, 1}, {c, -1}, 3, 3); + repository.Add(literals[3], {a, 1}, {d, -1}, 4, 4); + // Several unenforced relations on the diagonal arc. The one with the +/-1 + // coefficients should be preferred. + repository.Add(Literal(kNoLiteralIndex), {c, 3}, {a, -2}, 1, 9); + repository.Add(Literal(kNoLiteralIndex), {c, 1}, {a, -1}, 5, 5); + repository.Add(Literal(kNoLiteralIndex), {c, 2}, {a, -3}, 3, 8); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeVariablesByDimension(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, a), Pair(1, b), Pair(2, c), Pair(3, d)))); + // The unenforced relation is taken into account. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 1, 1}), Pair(1, Relation{-1, 1, 2, 2}), + Pair(2, Relation{-1, 1, 3, 3}), Pair(3, Relation{-1, 1, 4, 4}), + Pair(4, Relation{-1, 1, 5, 5})))); +} + TEST(RouteRelationsHelperTest, SeveralVariablesPerNode) { Model model; // A graph with 3 nodes and the following arcs: 0--l0-->1--l2-->2 @@ -538,6 +761,7 @@ TEST(RouteRelationsHelperTest, SeveralRelationsPerArc) { std::unique_ptr helper = RouteRelationsHelper::Create( num_nodes, tails, heads, literals, repository); + ASSERT_NE(helper, nullptr); EXPECT_EQ(helper->num_dimensions(), 1); EXPECT_EQ(helper->GetNodeVariable(0, 0), a); EXPECT_EQ(helper->GetNodeVariable(1, 0), b); @@ -548,6 +772,140 @@ TEST(RouteRelationsHelperTest, SeveralRelationsPerArc) { AnyOf(Eq(Relation{-1, 1, 70, 1000}), Eq(Relation{-3, 2, 100, 200}))); } +TEST(RouteRelationsHelperTest, SeveralArcsPerLiteral) { + // A graph with 3 nodes and the following arcs: 0--l0-->1--l0-->2, both + // enforced by the same literal l0. + Model model; + const int num_nodes = 3; + const std::vector tails = {0, 1}; + const std::vector heads = {1, 2}; + const Literal literal(model.Add(NewBooleanVariable()), true); + const std::vector literals = {literal, literal}; + // Add relations with "time" variables A, B, C intended to be associated with + // nodes 0, 1, 2 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 50, 1000); + repository.Add(literals[0], {c, 1}, {b, -1}, 40, 1000); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, repository); + + // No variable should be associated with any node, since there is no unique + // way to do this ([A, B, C] or [C, B, A], for nodes [0, 1, 2] respectively). + // As a consequence, no relation should be recovered either. + EXPECT_EQ(helper, nullptr); +} + +TEST(RouteRelationsHelperTest, InconsistentRelationIsSkipped) { + // Graph: 0--l0-->1--l1-->2--l3-->3--l4-->4 + // | ^ + // | | + // l3 ------->5-------- l5 + // + Model model; + const int num_nodes = 6; + const std::vector tails = {0, 1, 2, 3, 1, 5}; + const std::vector heads = {1, 2, 3, 4, 5, 3}; + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true)}; + // Variables a, b, c, d, e, f are supposed to be associated with nodes 0, 1, + // 2, 3, 4, 5 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable f = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); + repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); + repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); + repository.Add(literals[3], {e, 1}, {d, -1}, 3, 3); + repository.Add(literals[4], {f, 1}, {b, -1}, 4, 4); + // Inconsistent relation for arc 5->3 (should be between f and d). + repository.Add(literals[5], {f, 2}, {b, -1}, 5, 5); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeVariablesByDimension(*helper), + UnorderedElementsAre( + UnorderedElementsAre(Pair(0, a), Pair(1, b), Pair(2, c), + Pair(3, d), Pair(4, e), Pair(5, f)))); + // The relation for arc 5->3 is filtered out because it is inconsistent. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 0, 0}), Pair(1, Relation{-1, 1, 1, 1}), + Pair(2, Relation{-1, 1, 2, 2}), Pair(3, Relation{-1, 1, 3, 3}), + Pair(4, Relation{-1, 1, 4, 4})))); +} + +TEST(RouteRelationsHelperTest, InconsistentRelationWithMultipleArcsPerLiteral) { + // Graph: 0--l0-->1<--- + // ^ | | + // l3 l1 | + // | v l4 + // 3<--l2--2 | + // | | + // ----l4----->4 + Model model; + const int num_nodes = 5; + const std::vector tails = {0, 1, 2, 3, 4, 3}; + const std::vector heads = {1, 2, 3, 0, 1, 4}; + const Literal l4 = Literal(model.Add(NewBooleanVariable()), true); + const std::vector literals = { + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + Literal(model.Add(NewBooleanVariable()), true), + l4, + l4}; + // Variables a, b, c, d, e are supposed to be associated with nodes 0, 1, 2, + // 3, 4 respectively. + const IntegerVariable a = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable b = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable c = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable d = model.Add(NewIntegerVariable(0, 100)); + const IntegerVariable e = model.Add(NewIntegerVariable(0, 100)); + BinaryRelationRepository repository; + repository.Add(literals[0], {b, 1}, {a, -1}, 0, 0); + repository.Add(literals[1], {c, 1}, {b, -1}, 1, 1); + repository.Add(literals[2], {d, 1}, {c, -1}, 2, 2); + repository.Add(literals[3], {a, 1}, {d, -1}, 3, 3); + // Inconsistent relation for arc 4->1 (should be between e and b). Note that + // arcs 4->1 and 4->3 are enforced by the same literal. + repository.Add(literals[4], {e, 1}, {d, -1}, 4, 4); + repository.Add(literals[5], {e, 1}, {d, -1}, 5, 5); + repository.Build(); + + std::unique_ptr helper = RouteRelationsHelper::Create( + num_nodes, tails, heads, literals, repository); + + ASSERT_NE(helper, nullptr); + EXPECT_THAT(GetNodeVariablesByDimension(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, a), Pair(1, b), Pair(2, c), Pair(3, d), Pair(4, e)))); + // The relation for arc 4->1 is filtered out because it is inconsistent. + EXPECT_THAT( + GetRelationByDimensionAndArc(*helper), + UnorderedElementsAre(UnorderedElementsAre( + Pair(0, Relation{-1, 1, 0, 0}), Pair(1, Relation{-1, 1, 1, 1}), + Pair(2, Relation{-1, 1, 2, 2}), Pair(3, Relation{-1, 1, 3, 3}), + Pair(5, Relation{-1, 1, 5, 5})))); +} + TEST(ExtractAllSubsetsFromForestTest, Basic) { std::vector parents = {3, 3, 1, 3, 1, 3}; diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 7b87b0b3d5..c518de8285 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -62,7 +62,57 @@ BaseEvent::BaseEvent(int t, SchedulingConstraintHelper* x_helper) x_start_max(x_helper->StartMax(t)), x_end_min(x_helper->EndMin(t)), x_end_max(x_helper->EndMax(t)), - x_size_min(x_helper->SizeMin(t)) {} + x_size_min(x_helper->SizeMin(t)), + x_size_max(x_helper->SizeMax(t)) {} + +void BaseEvent::PropagateDecomposedEnergy( + const VariablesAssignment& assignment) { + if (decomposed_energy.empty()) return; + + IntegerValue new_x_size_min = kMaxIntegerValue; + IntegerValue new_x_size_max = kMinIntegerValue; + IntegerValue new_y_size_min = kMaxIntegerValue; + IntegerValue new_y_size_max = kMinIntegerValue; + IntegerValue new_energy_min = kMaxIntegerValue; + int new_size = 0; + for (const auto [lit, fixed_size, fixed_demand] : decomposed_energy) { + // Filter out false literals and out of bounds values. + if (assignment.LiteralIsFalse(lit) || fixed_size < x_size_min || + fixed_size > x_size_max || fixed_demand < y_size_min || + fixed_demand > y_size_max) { + continue; + } + + if (assignment.LiteralIsTrue(lit)) { + new_x_size_min = fixed_size; + new_x_size_max = fixed_size; + new_y_size_min = fixed_demand; + new_y_size_max = fixed_demand; + new_energy_min = fixed_size * fixed_demand; + decomposed_energy.clear(); + decomposed_energy.push_back({lit, fixed_size, fixed_demand}); + new_size = 1; + break; + } + + new_x_size_min = std::min(new_x_size_min, fixed_size); + new_x_size_max = std::max(new_x_size_max, fixed_size); + new_y_size_min = std::min(new_y_size_min, fixed_demand); + new_y_size_max = std::max(new_y_size_max, fixed_demand); + new_energy_min = std::min(new_energy_min, fixed_size * fixed_demand); + decomposed_energy[new_size++] = {lit, fixed_size, fixed_demand}; + } + decomposed_energy.resize(new_size); + CHECK(!decomposed_energy.empty()); + + // Update the event. + x_size_min = new_x_size_min; + x_size_max = new_x_size_max; + y_size_min = new_y_size_min; + y_size_max = new_y_size_max; + energy_min = new_energy_min; + use_energy = energy_min > x_size_min * y_size_min; +} struct EnergyEvent : BaseEvent { EnergyEvent(int t, SchedulingConstraintHelper* x_helper) @@ -603,6 +653,8 @@ CutGenerator CreateCumulativeEnergyCutGenerator( model](LinearConstraintManager* manager) { if (!helper->SynchronizeAndSetTimeDirection(true)) return false; if (!demands_helper->CacheAllEnergyValues()) return true; + const VariablesAssignment& assignment = + model->GetOrCreate()->Assignment(); const auto& lp_values = manager->LpValues(); std::vector events; @@ -616,8 +668,10 @@ CutGenerator CreateCumulativeEnergyCutGenerator( EnergyEvent e(i, helper); e.y_size = demands_helper->Demands()[i]; e.y_size_min = demands_helper->DemandMin(i); + e.y_size_max = demands_helper->DemandMax(i); e.decomposed_energy = demands_helper->DecomposedEnergies()[i]; e.energy_min = demands_helper->EnergyMin(i); + e.PropagateDecomposedEnergy(assignment); e.energy_is_quadratic = demands_helper->EnergyIsQuadratic(i); if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); @@ -670,6 +724,7 @@ CutGenerator CreateNoOverlapEnergyCutGenerator( EnergyEvent e(i, helper); e.y_size = IntegerValue(1); e.y_size_min = IntegerValue(1); + e.y_size_max = IntegerValue(1); e.energy_min = e.x_size_min; if (!helper->IsPresent(i)) { e.presence_literal_index = helper->PresenceLiteral(i).Index(); @@ -763,13 +818,8 @@ CutGenerator CreateCumulativeTimeTableCutGenerator( // negative events appear after for the correctness of the algo below. std::stable_sort(events.begin(), events.end(), [](const TimeTableEvent& i, const TimeTableEvent& j) { - if (i.time == j.time) { - if (i.is_positive == j.is_positive) { - return i.interval_index < j.interval_index; - } - return !i.is_positive; - } - return i.time < j.time; + return std::tie(i.time, i.is_positive) < + std::tie(j.time, j.is_positive); }); double sum_of_demand_lp = 0.0; @@ -1022,8 +1072,10 @@ std::string CtEvent::DebugString() const { ", x_start_min = ", x_start_min.value(), ", x_start_max = ", x_start_max.value(), ", x_size_min = ", x_size_min.value(), + ", x_size_max = ", x_size_max.value(), ", x_end_min = ", x_end_min.value(), ", x_end_max = ", x_end_max.value(), ", x_lp_end = ", x_lp_end, ", y_size_min = ", y_size_min.value(), + ", y_size_max = ", y_size_max.value(), ", energy_min = ", energy_min.value(), ", use_energy = ", use_energy, ", lifted = ", lifted, ", decomposed_energy = [", absl::StrJoin(decomposed_energy, ", ", @@ -1272,63 +1324,35 @@ void GenerateShortCompletionTimeCutsWithExactBound( top_n_cuts.TransferToManager(manager); } +namespace { + // Returns a copy of the event with the start time increased to time. // Energy (min and decomposed) are updated accordingly. CtEvent CopyAndTrimEventAfter(const CtEvent& old_event, IntegerValue time, const VariablesAssignment& assignment) { CHECK_GT(time, old_event.x_start_min); + CHECK_GT(old_event.x_start_min + old_event.x_size_min, time); CtEvent event = old_event; // Copy. event.lifted = true; // Trim the decomposed energy and compute the energy min in the window. - CHECK_GT(event.x_end_min, time); const IntegerValue shift = time - event.x_start_min; CHECK_GT(shift, IntegerValue(0)); - const IntegerValue simple_size_min = - std::max(event.x_size_min - shift, IntegerValue(0)); - const IntegerValue simple_energy_min = event.y_size_min * simple_size_min; - - if (event.decomposed_energy.empty()) { - event.energy_min = simple_energy_min; - event.x_size_min = simple_size_min; - } else { - IntegerValue min_energy = kMaxIntegerValue; - IntegerValue min_size = kMaxIntegerValue; - event.decomposed_energy.clear(); - for (auto [lit, fixed_size, fixed_demand] : old_event.decomposed_energy) { - if (assignment.LiteralIsFalse(lit)) continue; - if (assignment.LiteralIsTrue(lit)) { - min_size = std::max(fixed_size - shift, IntegerValue(0)); - min_energy = min_size * fixed_demand; - CHECK(event.decomposed_energy.empty()); - event.decomposed_energy.push_back({lit, fixed_size, fixed_demand}); - break; - } - fixed_size = std::max(fixed_size - shift, IntegerValue(0)); - event.decomposed_energy.push_back({lit, fixed_size, fixed_demand}); - min_energy = std::min(min_energy, fixed_size * fixed_demand); - min_size = std::min(min_size, fixed_size); + event.x_size_min -= shift; + event.x_size_max -= shift; + event.energy_min = event.x_size_min * event.y_size_min; + if (!event.decomposed_energy.empty()) { + // Trim durations + for (auto& [literal, size, demand] : event.decomposed_energy) { + CHECK_GT(size, shift); + size -= shift; } - CHECK_NE(min_energy, kMaxIntegerValue); - CHECK_GE(min_energy, simple_energy_min) - << "old_event = " << old_event.DebugString() << ", time = " << time - << ", shift = " << shift << ", new_event = " << event.DebugString(); - if (min_size != simple_size_min) { - LOG(INFO) << ", time = " << time << ", shift = " << shift; - LOG(INFO) << "old_event = " << old_event.DebugString(); - LOG(INFO) << ", new_event = " << event.DebugString(); - } - CHECK_GE(min_size, simple_size_min); - event.x_size_min = min_size; - event.energy_min = min_energy; - event.use_energy = event.energy_min > simple_energy_min; + event.PropagateDecomposedEnergy(assignment); } event.x_start_min = time; return event; } -namespace { - // Collects all possible demand values for the event, and adds them to the // subset sum of the reachable capacity of the cumulative constraint. void AddEventDemandsToCapacitySubsetSum( @@ -1336,7 +1360,7 @@ void AddEventDemandsToCapacitySubsetSum( IntegerValue capacity_max, std::vector& tmp_possible_demands, MaxBoundedSubsetSum& dp) { if (dp.CurrentMax() != capacity_max) { - if (event.y_size_is_fixed) { + if (event.y_size_is_fixed()) { dp.Add(event.y_size_min.value()); } else if (!event.decomposed_energy.empty()) { tmp_possible_demands.clear(); @@ -1506,23 +1530,23 @@ void GenerateCompletionTimeCutsWithEnergy(absl::string_view cut_name, } } if (min_shape_area < event.energy_min * event.x_size_min) { - LOG(INFO) << "min_shape_area: " << min_shape_area - << " energy_min: " << event.energy_min - << " x_size_min: " << event.x_size_min - << "simple_min_shape_area: " - << event.energy_min * event.x_size_min; - LOG(INFO) << " event = " << event.DebugString(); + VLOG(2) << "min_shape_area: " << min_shape_area + << " energy_min: " << event.energy_min + << " x_size_min: " << event.x_size_min + << "simple_min_shape_area: " + << event.energy_min * event.x_size_min; + VLOG(2) << " event = " << event.DebugString(); } CHECK_GE(min_shape_area, event.energy_min * event.x_size_min); if (!AddTo(min_shape_area, &sum_event_contributions)) break; if (min_shape_area > event.energy_min * event.x_size_min) { + VLOG(2) << "min_shape_area: " << min_shape_area + << " simple_min_shape_area: " + << event.energy_min * event.x_size_min; + VLOG(2) << " event = " << event.DebugString(); + uses_shapes = true; - VLOG(1) << "energy_min: " << event.energy_min - << " size_min: " << event.x_size_min - << " demand_min: " << event.y_size_min - << " regular_area: " << event.energy_min * event.x_size_min - << " min_shape_area: " << min_shape_area; } } if (!AddSquareTo(event.energy_min, &sum_square_energy)) break; @@ -1621,6 +1645,7 @@ CutGenerator CreateNoOverlapCompletionTimeCutGenerator( event.x_end = end_expr; event.x_lp_end = end_expr.LpValue(lp_values); event.y_size_min = IntegerValue(1); + event.y_size_max = IntegerValue(1); event.energy_min = size_min; events.push_back(event); } @@ -1666,6 +1691,8 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( capacity](bool mirror) { std::vector events; const auto& lp_values = manager->LpValues(); + const VariablesAssignment& assignment = + model->GetOrCreate()->Assignment(); for (int index = 0; index < helper->NumTasks(); ++index) { if (!helper->IsPresent(index)) continue; if (helper->SizeMin(index) > 0 && @@ -1674,11 +1701,12 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( event.x_end = helper->Ends()[index]; event.x_lp_end = event.x_end.LpValue(lp_values); event.y_size_min = demands_helper->DemandMin(index); + event.y_size_max = demands_helper->DemandMax(index); event.energy_min = demands_helper->EnergyMin(index); event.use_energy = event.energy_min > event.x_size_min * event.y_size_min; event.decomposed_energy = demands_helper->DecomposedEnergies()[index]; - event.y_size_is_fixed = demands_helper->DemandIsFixed(index); + event.PropagateDecomposedEnergy(assignment); events.push_back(event); } } diff --git a/ortools/sat/scheduling_cuts.h b/ortools/sat/scheduling_cuts.h index 5f78d2ca98..e9a6a4be9b 100644 --- a/ortools/sat/scheduling_cuts.h +++ b/ortools/sat/scheduling_cuts.h @@ -22,6 +22,7 @@ #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" #include "ortools/sat/model.h" +#include "ortools/sat/sat_base.h" #include "ortools/sat/scheduling_helpers.h" namespace operations_research { @@ -109,9 +110,11 @@ struct BaseEvent { IntegerValue x_end_min; IntegerValue x_end_max; IntegerValue x_size_min; + IntegerValue x_size_max; // Cache of the bounds on the y direction. IntegerValue y_size_min; + IntegerValue y_size_max; // The energy min of this event. IntegerValue energy_min; @@ -119,6 +122,16 @@ struct BaseEvent { // If non empty, a decomposed view of the energy of this event. // First value in each pair is x_size, second is y_size. std::vector decomposed_energy; + + // Indicates if the events used the optional energy information from the + // model. + bool use_energy = false; + + // If we know that the size on y is fixed, we can use some heuristic to + // compute the maximum subset sums under the capacity and use that instead + // of the full capacity. + bool y_size_is_fixed() const { return y_size_min == y_size_max; } + void PropagateDecomposedEnergy(const VariablesAssignment& assignment); }; // Stores the event for a rectangle along the two axis x and y. @@ -132,19 +145,10 @@ struct CtEvent : BaseEvent { AffineExpression x_end; double x_lp_end; - // Indicates if the events used the optional energy information from the - // model. - bool use_energy = false; - // Indicates if the cut is lifted, that is if it includes tasks that are not // strictly contained in the current time window. bool lifted = false; - // If we know that the size on y is fixed, we can use some heuristic to - // compute the maximum subset sums under the capacity and use that instead - // of the full capacity. - bool y_size_is_fixed = false; - std::string DebugString() const; }; diff --git a/ortools/sat/scheduling_helpers.cc b/ortools/sat/scheduling_helpers.cc index a0012e5635..bbca7a12f1 100644 --- a/ortools/sat/scheduling_helpers.cc +++ b/ortools/sat/scheduling_helpers.cc @@ -21,9 +21,11 @@ #include #include "absl/log/check.h" +#include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" +#include "ortools/base/strong_vector.h" #include "ortools/sat/implied_bounds.h" #include "ortools/sat/integer.h" #include "ortools/sat/integer_base.h" @@ -781,79 +783,26 @@ bool SchedulingDemandHelper::CacheAllEnergyValues() { const int num_tasks = cached_energies_min_.size(); const bool is_at_level_zero = sat_solver_->CurrentDecisionLevel() == 0; for (int t = 0; t < num_tasks; ++t) { - if (!decomposed_energies_[t].empty()) { - // Checks the propagation of the exactly one on literals. - int num_literals_at_true = 0; - int num_unassigned_literals = 0; - for (const auto [lit, fixed_size, fixed_demand] : - decomposed_energies_[t]) { - if (assignment_.LiteralIsTrue(lit)) { - ++num_literals_at_true; - } else if (!assignment_.LiteralIsFalse(lit)) { - ++num_unassigned_literals; - } - } - if (num_literals_at_true > 1 || - (num_literals_at_true == 1 && num_unassigned_literals > 0)) { - VLOG(1) << "Exactly one on literals not satisfied"; - return false; - } - - // Checks the consistency of implied values and literal values with the - // cached bounds of the size and demand expressions. + // Try to reduce the size of the decomposed energy vector. + if (is_at_level_zero) { int new_size = 0; - IntegerValue min_size = kMaxIntegerValue; - IntegerValue max_size = kMinIntegerValue; - IntegerValue min_demand = kMaxIntegerValue; - IntegerValue max_demand = kMinIntegerValue; - for (int i = 0; i < decomposed_energies_[t].size(); ++i) { - // Try to reduce the size of the decomposed energy vector. - if (is_at_level_zero && - assignment_.LiteralIsFalse(decomposed_energies_[t][i].literal)) { + if (assignment_.LiteralIsFalse(decomposed_energies_[t][i].literal)) { continue; } - if (!assignment_.LiteralIsFalse(decomposed_energies_[t][i].literal)) { - const IntegerValue fixed_size = decomposed_energies_[t][i].left_value; - const IntegerValue fixed_demand = - decomposed_energies_[t][i].right_value; - min_size = std::min(min_size, fixed_size); - max_size = std::max(max_size, fixed_size); - min_demand = std::min(min_demand, fixed_demand); - max_demand = std::max(max_demand, fixed_demand); - } decomposed_energies_[t][new_size++] = decomposed_energies_[t][i]; } decomposed_energies_[t].resize(new_size); - - // Check consistency. - if (min_size != helper_->SizeMin(t) || max_size != helper_->SizeMax(t) || - min_demand != DemandMin(t) || max_demand != DemandMax(t)) { - VLOG(1) << "Consistency check failed: size=[" << min_size << ".." - << max_size << "], demand=[" << min_demand << ".." << max_demand - << "], helper size=[" << helper_->SizeMin(t) << ".." - << helper_->SizeMax(t) << "], helper demand=[" << DemandMin(t) - << ".." << DemandMax(t) << "]"; - return false; - } } - if (t < override_energy_min_.size()) { - cached_energies_min_[t] = override_energy_min_[t]; - } else { - cached_energies_min_[t] = - std::max(SimpleEnergyMin(t), DecomposedEnergyMin(t)); - } + cached_energies_min_[t] = + std::max(SimpleEnergyMin(t), DecomposedEnergyMin(t)); if (cached_energies_min_[t] <= kMinIntegerValue) return false; energy_is_quadratic_[t] = decomposed_energies_[t].empty() && !demands_.empty() && !integer_trail_->IsFixed(demands_[t]) && !helper_->SizeIsFixed(t); - if (t < override_energy_max_.size()) { - cached_energies_max_[t] = override_energy_max_[t]; - } else { - cached_energies_max_[t] = - std::min(SimpleEnergyMax(t), DecomposedEnergyMax(t)); - } + cached_energies_max_[t] = + std::min(SimpleEnergyMax(t), DecomposedEnergyMax(t)); if (cached_energies_max_[t] >= kMaxIntegerValue) return false; } @@ -875,18 +824,28 @@ bool SchedulingDemandHelper::DemandIsFixed(int t) const { } bool SchedulingDemandHelper::DecreaseEnergyMax(int t, IntegerValue value) { - if (value < EnergyMin(t)) { - if (helper_->IsOptional(t)) { - return helper_->PushTaskAbsence(t); - } else { - return helper_->ReportConflict(); - } - } else if (!decomposed_energies_[t].empty()) { + if (helper_->IsAbsent(t)) return true; + if (value < EnergyMin(t)) return helper_->PushTaskAbsence(t); + + if (!decomposed_energies_[t].empty()) { for (const auto [lit, fixed_size, fixed_demand] : decomposed_energies_[t]) { if (fixed_size * fixed_demand > value) { - if (assignment_.LiteralIsTrue(lit)) return helper_->ReportConflict(); + // `lit` encodes that the energy is higher than value. So either lit + // must be false or the task must be absent. if (assignment_.LiteralIsFalse(lit)) continue; - if (!helper_->PushLiteral(lit.Negated())) return false; + if (assignment_.LiteralIsTrue(lit)) { + // Task must be absent. + if (helper_->PresenceLiteral(t) != lit) { + helper_->MutableLiteralReason()->push_back(lit.Negated()); + } + return helper_->PushTaskAbsence(t); + } + if (helper_->IsPresent(t)) { + // Task is present, `lit` must be false. + DCHECK(!helper_->IsOptional(t) || helper_->PresenceLiteral(t) != lit); + helper_->AddPresenceReason(t); + if (!helper_->PushLiteral(lit.Negated())) return false; + } } } } else { diff --git a/ortools/sat/scheduling_helpers.h b/ortools/sat/scheduling_helpers.h index ea02a2f6e8..b4353868f8 100644 --- a/ortools/sat/scheduling_helpers.h +++ b/ortools/sat/scheduling_helpers.h @@ -551,12 +551,6 @@ class SchedulingDemandHelper { // Visible for testing. void OverrideDecomposedEnergies( const std::vector>& energies); - void OverrideEnergyBounds(const std::vector& energy_min, - const std::vector& energy_max) { - override_energy_min_ = energy_min; - override_energy_max_ = energy_max; - } - // Returns the decomposed energy terms compatible with the current literal // assignment. It must not be used to create reasons if not at level 0. // It returns en empty vector if the decomposed energy is not available. @@ -589,10 +583,6 @@ class SchedulingDemandHelper { // A representation of the energies as a set of alternative. // If subvector is empty, we don't have this representation. std::vector> decomposed_energies_; - - // Override energy bounds. - std::vector override_energy_min_; - std::vector override_energy_max_; }; // ============================================================================= diff --git a/ortools/sat/scheduling_helpers_test.cc b/ortools/sat/scheduling_helpers_test.cc index baaaf7a017..526b195d08 100644 --- a/ortools/sat/scheduling_helpers_test.cc +++ b/ortools/sat/scheduling_helpers_test.cc @@ -53,13 +53,13 @@ TEST(SchedulingDemandHelperTest, EnergyInWindow) { Model model; const AffineExpression start(model.Add(NewIntegerVariable(0, 10))); - const AffineExpression size(model.Add(NewIntegerVariable(2, 4))); + const AffineExpression size(model.Add(NewIntegerVariable(2, 10))); const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); auto* repo = model.GetOrCreate(); const IntervalVariable inter = repo->CreateInterval(start, end, size, kNoLiteralIndex, false); - const AffineExpression demand(model.Add(NewIntegerVariable(2, 4))); + const AffineExpression demand(model.Add(NewIntegerVariable(2, 10))); SchedulingConstraintHelper* helper = repo->GetOrCreateHelper({inter}); SchedulingDemandHelper demands_helper({demand}, helper, &model); @@ -85,13 +85,13 @@ TEST(SchedulingDemandHelperTest, EnergyInWindowTakeIntoAccountWindowSize) { Model model; const AffineExpression start(model.Add(NewIntegerVariable(0, 4))); - const AffineExpression size(model.Add(NewIntegerVariable(6, 8))); + const AffineExpression size(model.Add(NewIntegerVariable(6, 10))); const AffineExpression end(model.Add(NewIntegerVariable(0, 10))); auto* repo = model.GetOrCreate(); const IntervalVariable inter = repo->CreateInterval(start, end, size, kNoLiteralIndex, false); - const AffineExpression demand(model.Add(NewIntegerVariable(6, 8))); + const AffineExpression demand(model.Add(NewIntegerVariable(6, 10))); SchedulingConstraintHelper* helper = repo->GetOrCreateHelper({inter}); SchedulingDemandHelper demands_helper({demand}, helper, &model);