[CP-SAT] work on scheduling

This commit is contained in:
Laurent Perron
2023-06-14 15:13:14 +02:00
parent d564afe6f1
commit 24ea806809
16 changed files with 665 additions and 316 deletions

View File

@@ -235,6 +235,7 @@ cc_library(
hdrs = ["parameters_validation.h"],
visibility = ["//visibility:public"],
deps = [
":cp_model_search",
":sat_parameters_cc_proto",
"//ortools/base",
"@com_google_absl//absl/container:flat_hash_set",

View File

@@ -1146,6 +1146,64 @@ void LoadLinearConstraint(const ConstraintProto& ct, Model* m) {
max_sum += std::max(term_a, term_b);
}
// Load precedences.
if (!HasEnforcementLiteral(ct)) {
auto* precedences = m->GetOrCreate<PrecedenceRelations>();
// To avoid overflow in the code below, we tighten the bounds.
// Note that we detect and do not add trivial relation.
int64_t rhs_min = ct.linear().domain(0);
int64_t rhs_max = ct.linear().domain(ct.linear().domain().size() - 1);
rhs_min = std::max(rhs_min, min_sum.value());
rhs_max = std::min(rhs_max, max_sum.value());
if (vars.size() == 2) {
if (std::abs(coeffs[0]) == std::abs(coeffs[1])) {
const int64_t magnitude = std::abs(coeffs[0]);
IntegerVariable v1 = vars[0];
IntegerVariable v2 = vars[1];
if (coeffs[0] < 0) v1 = NegationOf(v1);
if (coeffs[1] > 0) v2 = NegationOf(v2);
// magnitude * v1 <= magnitude * v2 + rhs_max.
precedences->Add(v1, v2, CeilOfRatio(-rhs_max, magnitude));
// magnitude * v1 >= magnitude * v2 + rhs_min.
precedences->Add(v2, v1, CeilOfRatio(rhs_min, magnitude));
}
} else if (vars.size() == 3) {
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 3; ++j) {
if (i == j) continue;
if (std::abs(coeffs[i]) != std::abs(coeffs[j])) continue;
const int other = 3 - i - j; // i + j + other = 0 + 1 + 2.
// Make the terms magnitude * v1 - magnitude * v2 ...
const int64_t magnitude = std::abs(coeffs[i]);
IntegerVariable v1 = vars[i];
IntegerVariable v2 = vars[j];
if (coeffs[i] < 0) v1 = NegationOf(v1);
if (coeffs[j] > 0) v2 = NegationOf(v2);
// magnitude * v1 + other_lb <= magnitude * v2 + rhs_max
const int64_t coeff = coeffs[other];
const int64_t other_lb =
coeff > 0
? coeff * integer_trail->LowerBound(vars[other]).value()
: coeff * integer_trail->UpperBound(vars[other]).value();
precedences->Add(v1, v2, CeilOfRatio(other_lb - rhs_max, magnitude));
// magnitude * v1 + other_ub >= magnitude * v2 + rhs_min
const int64_t other_ub =
coeff > 0
? coeff * integer_trail->UpperBound(vars[other]).value()
: coeff * integer_trail->LowerBound(vars[other]).value();
precedences->Add(v2, v1, CeilOfRatio(rhs_min - other_ub, magnitude));
}
}
}
}
const SatParameters& params = *m->GetOrCreate<SatParameters>();
const IntegerValue domain_size_limit(
params.max_domain_size_when_encoding_eq_neq_constraints());

View File

@@ -429,17 +429,8 @@ int ValidSumSeed(int base_seed, int delta) {
return static_cast<int>(result);
}
// Note: in flatzinc setting, we know we always have a fixed search defined.
//
// Things to try:
// - Specialize for purely boolean problems
// - Disable linearization_level options for non linear problems
// - Fast restart in randomized search
// - Different propatation levels for scheduling constraints
std::vector<SatParameters> GetDiverseSetOfParameters(
const SatParameters& base_params, const CpModelProto& cp_model) {
// Defines a set of named strategies so it is easier to read in one place
// the one that are used. See below.
absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
const SatParameters& base_params) {
absl::flat_hash_map<std::string, SatParameters> strategies;
// The "default" name can be used for the base_params unchanged.
@@ -530,6 +521,19 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
strategies["objective_lb_search_max_lp"] = new_params;
}
{
SatParameters new_params = base_params;
new_params.set_linearization_level(2);
new_params.set_use_objective_shaving_search(true);
new_params.set_cp_model_presolve(false);
new_params.set_cp_model_probing_level(0);
new_params.set_symmetry_level(0);
if (base_params.use_dual_scheduling_heuristics()) {
AddDualSchedulingHeuristics(new_params);
}
strategies["objective_shaving_search"] = new_params;
}
{
SatParameters new_params = base_params;
new_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
@@ -596,10 +600,27 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
}
// Add user defined ones.
// Note that this might overwrite our default ones.
for (const SatParameters& params : base_params.subsolver_params()) {
strategies[params.name()] = params;
}
return strategies;
}
// Note: in flatzinc setting, we know we always have a fixed search defined.
//
// Things to try:
// - Specialize for purely boolean problems
// - Disable linearization_level options for non linear problems
// - Fast restart in randomized search
// - Different propatation levels for scheduling constraints
std::vector<SatParameters> GetDiverseSetOfParameters(
const SatParameters& base_params, const CpModelProto& cp_model) {
// Defines a set of named strategies so it is easier to read in one place
// the one that are used. See below.
const auto strategies = GetNamedParameters(base_params);
// We only use a "fixed search" worker if some strategy is specified or
// if we have a scheduling model.
//
@@ -615,6 +636,11 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
// like if there is no lp, or everything is already linearized at level 1.
std::vector<std::string> names;
// Starts by adding user specified ones.
for (const std::string& name : base_params.extra_subsolvers()) {
names.push_back(name);
}
const int num_workers_to_generate =
base_params.num_workers() - base_params.shared_tree_num_workers();
// We use the default if empty.
@@ -644,7 +670,7 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
names.push_back("probing_max_lp");
}
if (num_workers_to_generate >= 24) {
names.push_back("objective_lb_search_max_lp");
names.push_back("objective_shaving_search");
}
#if !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP)
if (absl::GetFlag(FLAGS_cp_model_use_max_hs)) names.push_back("max_hs");
@@ -666,11 +692,6 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
}
}
// Add subsolvers.
for (const std::string& name : base_params.extra_subsolvers()) {
names.push_back(name);
}
// Remove the names that should be ignored.
absl::flat_hash_set<std::string> to_ignore;
for (const std::string& name : base_params.ignore_subsolvers()) {
@@ -686,12 +707,6 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
// Creates the diverse set of parameters with names and seed.
std::vector<SatParameters> result;
for (const std::string& name : names) {
if (!strategies.contains(name)) {
// TODO(user): Check that at parameter validation and return nice error
// instead.
LOG(WARNING) << "Unknown parameter name '" << name << "'";
continue;
}
SatParameters params = strategies.at(name);
// Do some filtering.
@@ -717,7 +732,7 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
if (name == "less_encoding") continue;
// Disable subsolvers that do not implement the determistic mode.
// Disable subsolvers that do not implement the deterministic mode.
//
// TODO(user): Enable lb_tree_search in deterministic mode.
if (params.interleave_search() &&
@@ -730,6 +745,7 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
if (params.optimize_with_lb_tree_search()) continue;
if (params.optimize_with_core()) continue;
if (params.use_objective_lb_search()) continue;
if (params.use_objective_shaving_search()) continue;
if (params.search_branching() == SatParameters::LP_SEARCH) continue;
if (params.search_branching() == SatParameters::PSEUDO_COST_SEARCH) {
continue;

View File

@@ -16,6 +16,7 @@
#include <cstdint>
#include <functional>
#include <string>
#include <vector>
#include "ortools/base/integral_types.h"
@@ -94,6 +95,14 @@ std::function<BooleanOrIntegerLiteral()> InstrumentSearchStrategy(
const std::function<BooleanOrIntegerLiteral()>& instrumented_strategy,
Model* model);
// Returns all the named set of parameters known to the solver. This include our
// default strategies like "max_lp", "core", etc... It is visible here so that
// this can be reused by parameter validation.
//
// Usually, named strategies just override a few field from the base_params.
absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
const SatParameters& base_params);
// Returns up to base_params.num_workers() different parameters.
// We do not always return num_worker parameters to leave room for strategies
// like LNS that do not consume a full worker and can always be interleaved.

View File

@@ -1413,6 +1413,10 @@ void LoadBaseModel(const CpModelProto& model_proto, Model* model) {
// Fully encode variables as needed by the search strategy.
AddFullEncodingFromSearchBranching(model_proto, model);
// Reserve space for the precedence relations.
model->GetOrCreate<PrecedenceRelations>()->Resize(
model->GetOrCreate<IntegerTrail>()->NumIntegerVariables().value());
// Load the constraints.
absl::btree_set<std::string> unsupported_types;
int num_ignored_constraints = 0;
@@ -2626,6 +2630,111 @@ class FullProblemSolver : public SubSolver {
bool previous_task_is_completed_ ABSL_GUARDED_BY(mutex_) = true;
};
class ObjectiveLbSolver : public SubSolver {
public:
ObjectiveLbSolver(const std::string& name,
const SatParameters& local_parameters,
SharedClasses* shared)
: SubSolver(name, FULL_PROBLEM),
local_params_(local_parameters),
shared_(shared),
local_cp_model_(*shared->model_proto) {}
~ObjectiveLbSolver() override = default;
bool IsDone() override {
{
absl::MutexLock mutex_lock(&mutex_);
if (!previous_task_is_completed_) return false;
}
return false;
}
bool TaskIsAvailable() override {
if (shared_->SearchIsDone()) return false;
absl::MutexLock mutex_lock(&mutex_);
return previous_task_is_completed_;
return false;
}
std::function<void()> GenerateTask(int64_t /*task_id*/) override {
{
absl::MutexLock mutex_lock(&mutex_);
previous_task_is_completed_ = false;
}
return [this]() {
Model m;
*m.GetOrCreate<SatParameters>() = local_params_;
auto* local_time_limit = m.GetOrCreate<TimeLimit>();
shared_->time_limit->UpdateLocalLimit(local_time_limit);
auto* obj = local_cp_model_.mutable_objective();
const IntegerValue lb = shared_->response->GetInnerObjectiveLowerBound();
obj->clear_domain();
obj->add_domain(lb.value());
obj->add_domain(lb.value());
if (obj->vars().size() == 1 && obj->coeffs(0) == 1) {
// In this case we can reduce the domain of the variable directly.
// This is better otherwise we will not propagate proper level zero
// bound before the linear relaxation is computed.
const int obj_var = obj->vars(0);
local_cp_model_.mutable_variables(obj_var)->clear_domain();
local_cp_model_.mutable_variables(obj_var)->add_domain(lb.value());
local_cp_model_.mutable_variables(obj_var)->add_domain(lb.value());
}
// We cannot export bounds since our model is more constrained, but we can
// import them.
if (shared_->bounds != nullptr) {
RegisterVariableBoundsLevelZeroImport(local_cp_model_,
shared_->bounds.get(), &m);
}
auto* local_response_manager = m.GetOrCreate<SharedResponseManager>();
local_response_manager->InitializeObjective(local_cp_model_);
local_response_manager->SetSynchronizationMode(true);
LoadCpModel(local_cp_model_, &m);
SolveLoadedCpModel(local_cp_model_, &m);
CpSolverResponse local_response = local_response_manager->GetResponse();
if (local_response.status() == CpSolverStatus::OPTIMAL ||
local_response.status() == CpSolverStatus::FEASIBLE) {
shared_->response->NewSolution(local_response.solution(), name());
} else if (local_response.status() == CpSolverStatus::INFEASIBLE) {
shared_->response->UpdateInnerObjectiveBounds(name(), lb + 1,
kMaxIntegerValue);
}
absl::MutexLock mutex_lock(&mutex_);
previous_task_is_completed_ = true;
deterministic_time_since_last_synchronize_ +=
local_time_limit->GetElapsedDeterministicTime();
};
}
void Synchronize() override {
absl::MutexLock mutex_lock(&mutex_);
deterministic_time_ += deterministic_time_since_last_synchronize_;
shared_->time_limit->AdvanceDeterministicTime(
deterministic_time_since_last_synchronize_);
deterministic_time_since_last_synchronize_ = 0.0;
}
std::string StatisticsString() const override { return ""; }
private:
SatParameters local_params_;
SharedClasses* shared_;
CpModelProto local_cp_model_;
absl::Mutex mutex_;
double deterministic_time_since_last_synchronize_ ABSL_GUARDED_BY(mutex_) =
0.0;
bool previous_task_is_completed_ ABSL_GUARDED_BY(mutex_) = true;
};
class FeasibilityPumpSolver : public SubSolver {
public:
FeasibilityPumpSolver(const SatParameters& local_parameters,
@@ -3166,11 +3275,17 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
GetDiverseSetOfParameters(params, model_proto)) {
// TODO(user): This is currently not supported here.
if (params.optimize_with_max_hs()) continue;
++num_full_problem_solvers;
if (local_params.use_objective_shaving_search()) {
subsolvers.push_back(std::make_unique<ObjectiveLbSolver>(
local_params.name(), local_params, &shared));
continue;
}
subsolvers.push_back(std::make_unique<FullProblemSolver>(
local_params.name(), local_params,
/*split_in_chunks=*/params.interleave_search(), &shared));
num_full_problem_solvers++;
}
}
@@ -3188,8 +3303,6 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
subsolvers.push_back(std::move(unique_helper));
// By default we use the user provided parameters.
SatParameters local_params = params;
local_params.set_name("default");
// TODO(user): for now this is not deterministic so we disable it on
// interleave search. Fix.
if (!testing && params.use_rins_lns() && !params.interleave_search()) {
@@ -3202,17 +3315,16 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
incomplete_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxationInducedNeighborhoodGenerator>(
helper, shared.response, nullptr, shared.lp_solutions.get(),
/*incomplete_solutions=*/nullptr,
absl::StrCat("rins_lns_", local_params.name())),
local_params, helper, &shared));
/*incomplete_solutions=*/nullptr, "rins_lns"),
params, helper, &shared));
// RENS.
incomplete_subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxationInducedNeighborhoodGenerator>(
helper, /*response_manager=*/nullptr, nullptr,
shared.lp_solutions.get(), shared.incomplete_solutions.get(),
absl::StrCat("rens_lns_", local_params.name())),
local_params, helper, &shared));
"rens_lns"),
params, helper, &shared));
}
const bool feasibility_jump_possible =
@@ -3362,21 +3474,20 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
// Enqueue all the possible LNS neighborhood subsolvers.
// Each will have their own metrics.
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxRandomVariablesGenerator>(
helper, absl::StrCat("rnd_var_lns_", local_params.name())),
local_params, helper, &shared));
std::make_unique<RelaxRandomVariablesGenerator>(helper, "rnd_var_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxRandomConstraintsGenerator>(
helper, absl::StrCat("rnd_cst_lns_", local_params.name())),
local_params, helper, &shared));
std::make_unique<RelaxRandomConstraintsGenerator>(helper,
"rnd_cst_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<VariableGraphNeighborhoodGenerator>(
helper, absl::StrCat("graph_var_lns_", local_params.name())),
local_params, helper, &shared));
std::make_unique<VariableGraphNeighborhoodGenerator>(helper,
"graph_var_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<ConstraintGraphNeighborhoodGenerator>(
helper, absl::StrCat("graph_cst_lns_", local_params.name())),
local_params, helper, &shared));
std::make_unique<ConstraintGraphNeighborhoodGenerator>(helper,
"graph_cst_lns"),
params, helper, &shared));
// Create the rnd_obj_lns worker if the number of terms in the objective is
// big enough, and it is no more than half the number of variables in the
@@ -3386,9 +3497,9 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
model_proto.objective().vars_size() >=
model_proto.objective().vars().size() * 2) {
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RelaxObjectiveVariablesGenerator>(
helper, absl::StrCat("rnd_obj_lns_", local_params.name())),
local_params, helper, &shared));
std::make_unique<RelaxObjectiveVariablesGenerator>(helper,
"rnd_obj_lns"),
params, helper, &shared));
}
// TODO(user): If we have a model with scheduling + routing. We create
@@ -3398,19 +3509,16 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
!helper->TypeToConstraints(ConstraintProto::kCumulative).empty()) {
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomIntervalSchedulingNeighborhoodGenerator>(
helper, absl::StrCat("scheduling_random_intervals_lns_",
local_params.name())),
local_params, helper, &shared));
helper, "scheduling_random_intervals_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RandomPrecedenceSchedulingNeighborhoodGenerator>(
helper, absl::StrCat("scheduling_random_precedences_lns_",
local_params.name())),
local_params, helper, &shared));
helper, "scheduling_random_precedences_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<SchedulingTimeWindowNeighborhoodGenerator>(
helper,
absl::StrCat("scheduling_time_window_lns_", local_params.name())),
local_params, helper, &shared));
helper, "scheduling_time_window_lns"),
params, helper, &shared));
const std::vector<std::vector<int>> intervals_in_constraints =
helper->GetUniqueIntervalSets();
@@ -3418,9 +3526,8 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<SchedulingResourceWindowsNeighborhoodGenerator>(
helper, intervals_in_constraints,
absl::StrCat("scheduling_resource_windows_lns_",
local_params.name())),
local_params, helper, &shared));
"scheduling_resource_windows_lns"),
params, helper, &shared));
}
}
@@ -3431,20 +3538,19 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
if (num_circuit + num_routes > 0) {
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingRandomNeighborhoodGenerator>(
helper, absl::StrCat("routing_random_lns_", local_params.name())),
local_params, helper, &shared));
helper, "routing_random_lns"),
params, helper, &shared));
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingPathNeighborhoodGenerator>(
helper, absl::StrCat("routing_path_lns_", local_params.name())),
local_params, helper, &shared));
helper, "routing_path_lns"),
params, helper, &shared));
}
if (num_routes > 0 || num_circuit > 1) {
subsolvers.push_back(std::make_unique<LnsSolver>(
std::make_unique<RoutingFullPathNeighborhoodGenerator>(
helper,
absl::StrCat("routing_full_path_lns_", local_params.name())),
local_params, helper, &shared));
helper, "routing_full_path_lns"),
params, helper, &shared));
}
}

View File

@@ -199,13 +199,15 @@ std::function<void(Model*)> Cumulative(
// TODO(user): If more than one variable are after the same set of
// intervals, we should regroup them in a single constraint rather than
// having two independent constraint doing the same propagation.
std::vector<PrecedencesPropagator::FullIntegerPrecedence>
full_precedences;
model->GetOrCreate<PrecedencesPropagator>()->ComputeFullPrecedences(
!parameters.exploit_all_precedences(), index_to_end_vars,
&full_precedences);
for (const PrecedencesPropagator::FullIntegerPrecedence& data :
full_precedences) {
std::vector<FullIntegerPrecedence> full_precedences;
if (parameters.exploit_all_precedences()) {
model->GetOrCreate<PrecedenceRelations>()->ComputeFullPrecedences(
index_to_end_vars, &full_precedences);
} else {
model->GetOrCreate<PrecedencesPropagator>()->ComputePartialPrecedences(
index_to_end_vars, &full_precedences);
}
for (const FullIntegerPrecedence& data : full_precedences) {
const int size = data.indices.size();
if (size <= 1) continue;

View File

@@ -364,6 +364,7 @@ std::function<BooleanOrIntegerLiteral()> ShaveObjectiveLb(Model* model) {
const IntegerValue obj_lb = integer_trail->LowerBound(obj_var);
const IntegerValue obj_ub = integer_trail->UpperBound(obj_var);
if (obj_lb == obj_ub) return result;
const IntegerValue mid = (obj_ub - obj_lb) / 2;
const IntegerValue new_ub =
obj_lb + absl::LogUniform<int64_t>(*random, 0, mid.value());

View File

@@ -15,7 +15,6 @@
#include <algorithm>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -59,7 +58,7 @@ IntervalVariable IntervalsRepository::CreateInterval(AffineExpression start,
LiteralIndex is_present,
bool add_linear_relation) {
// Create the interval.
const IntervalVariable i(starts_.size());
const IntervalVariable i(static_cast<int>(starts_.size()));
starts_.push_back(start);
ends_.push_back(end);
sizes_.push_back(size);
@@ -165,7 +164,7 @@ void SchedulingConstraintHelper::SetLevel(int level) {
void SchedulingConstraintHelper::RegisterWith(GenericLiteralWatcher* watcher) {
const int id = watcher->Register(this);
const int num_tasks = starts_.size();
const int num_tasks = static_cast<int>(starts_.size());
for (int t = 0; t < num_tasks; ++t) {
watcher->WatchIntegerVariable(sizes_[t].var, id, t);
watcher->WatchIntegerVariable(starts_[t].var, id, t);
@@ -257,7 +256,7 @@ bool SchedulingConstraintHelper::ResetFromSubset(
const SchedulingConstraintHelper& other, absl::Span<const int> tasks) {
current_time_direction_ = other.current_time_direction_;
const int num_tasks = tasks.size();
const int num_tasks = static_cast<int>(tasks.size());
starts_.resize(num_tasks);
ends_.resize(num_tasks);
minus_ends_.resize(num_tasks);
@@ -279,7 +278,7 @@ bool SchedulingConstraintHelper::ResetFromSubset(
}
void SchedulingConstraintHelper::InitSortedVectors() {
const int num_tasks = starts_.size();
const int num_tasks = static_cast<int>(starts_.size());
recompute_all_cache_ = true;
recompute_cache_.resize(num_tasks, true);
@@ -595,7 +594,7 @@ void SchedulingConstraintHelper::WatchAllTasks(int id,
GenericLiteralWatcher* watcher,
bool watch_start_max,
bool watch_end_max) const {
const int num_tasks = starts_.size();
const int num_tasks = static_cast<int>(starts_.size());
for (int t = 0; t < num_tasks; ++t) {
watcher->WatchLowerBound(starts_[t], id);
watcher->WatchLowerBound(ends_[t], id);
@@ -753,7 +752,7 @@ IntegerValue SchedulingDemandHelper::DecomposedEnergyMax(int t) const {
}
void SchedulingDemandHelper::CacheAllEnergyValues() {
const int num_tasks = cached_energies_min_.size();
const int num_tasks = static_cast<int>(cached_energies_min_.size());
const bool is_at_level_zero = sat_solver_->CurrentDecisionLevel() == 0;
for (int t = 0; t < num_tasks; ++t) {
// Try to reduce the size of the decomposed energy vector.
@@ -780,18 +779,52 @@ void SchedulingDemandHelper::CacheAllEnergyValues() {
}
}
IntegerValue SchedulingDemandHelper::SimpleDemandMin(int t) const {
return integer_trail_->LowerBound(demands_[t]);
}
IntegerValue SchedulingDemandHelper::SimpleDemandMax(int t) const {
return integer_trail_->UpperBound(demands_[t]);
}
IntegerValue SchedulingDemandHelper::DecomposedDemandMin(int t) const {
if (decomposed_energies_[t].empty()) return kMinIntegerValue;
IntegerValue decomposed_min = kMaxIntegerValue;
for (const LiteralValueValue lvv : decomposed_energies_[t]) {
if (assignment_.LiteralIsTrue(lvv.literal)) {
return lvv.right_value;
}
if (assignment_.LiteralIsFalse(lvv.literal)) continue;
decomposed_min = std::min(decomposed_min, lvv.right_value);
}
return decomposed_min;
}
IntegerValue SchedulingDemandHelper::DecomposedDemandMax(int t) const {
if (decomposed_energies_[t].empty()) return kMaxIntegerValue;
IntegerValue decomposed_max = kMaxIntegerValue;
for (const LiteralValueValue lvv : decomposed_energies_[t]) {
if (assignment_.LiteralIsTrue(lvv.literal)) {
return lvv.right_value;
}
if (assignment_.LiteralIsFalse(lvv.literal)) continue;
decomposed_max = std::max(decomposed_max, lvv.right_value);
}
return decomposed_max;
}
IntegerValue SchedulingDemandHelper::DemandMin(int t) const {
DCHECK_LT(t, demands_.size());
return integer_trail_->LowerBound(demands_[t]);
return std::max(SimpleDemandMin(t), DecomposedDemandMin(t));
}
IntegerValue SchedulingDemandHelper::DemandMax(int t) const {
DCHECK_LT(t, demands_.size());
return integer_trail_->UpperBound(demands_[t]);
return std::min(SimpleDemandMax(t), DecomposedDemandMax(t));
}
bool SchedulingDemandHelper::DemandIsFixed(int t) const {
return integer_trail_->IsFixed(demands_[t]);
return DemandMin(t) == DemandMax(t);
}
bool SchedulingDemandHelper::DecreaseEnergyMax(int t, IntegerValue value) {
@@ -826,7 +859,22 @@ bool SchedulingDemandHelper::DecreaseEnergyMax(int t, IntegerValue value) {
void SchedulingDemandHelper::AddDemandMinReason(int t) {
DCHECK_LT(t, demands_.size());
if (demands_[t].var != kNoIntegerVariable) {
const IntegerValue decomposed_min = DecomposedDemandMin(t);
if (decomposed_min >= SimpleDemandMin(t)) {
auto* reason = helper_->MutableLiteralReason();
const int old_size = static_cast<int>(reason->size());
for (const auto [lit, fixed_size, fixed_demand] : decomposed_energies_[t]) {
if (assignment_.LiteralIsTrue(lit)) {
reason->resize(old_size);
reason->push_back(lit.Negated());
return;
} else if (fixed_demand < decomposed_min &&
assignment_.LiteralIsFalse(lit)) {
reason->push_back(lit);
}
}
} else if (demands_[t].var != kNoIntegerVariable &&
decomposed_energies_[t].empty()) {
helper_->MutableIntegerReason()->push_back(
integer_trail_->LowerBoundAsLiteral(demands_[t].var));
}
@@ -837,7 +885,7 @@ void SchedulingDemandHelper::AddEnergyMinReason(int t) {
const IntegerValue value = cached_energies_min_[t];
if (DecomposedEnergyMin(t) >= value) {
auto* reason = helper_->MutableLiteralReason();
const int old_size = reason->size();
const int old_size = static_cast<int>(reason->size());
for (const auto [lit, fixed_size, fixed_demand] : decomposed_energies_[t]) {
if (assignment_.LiteralIsTrue(lit)) {
reason->resize(old_size);
@@ -880,7 +928,7 @@ bool SchedulingDemandHelper::AddLinearizedDemand(
void SchedulingDemandHelper::OverrideLinearizedEnergies(
const std::vector<LinearExpression>& energies) {
const int num_tasks = energies.size();
const int num_tasks = static_cast<int>(energies.size());
DCHECK_EQ(num_tasks, helper_->NumTasks());
linearized_energies_.resize(num_tasks);
for (int t = 0; t < num_tasks; ++t) {
@@ -957,7 +1005,7 @@ void SchedulingDemandHelper::AddEnergyMinInWindowReason(
helper_->AddEndMaxReason(t, end_max);
auto* literal_reason = helper_->MutableLiteralReason();
const int old_size = literal_reason->size();
const int old_size = static_cast<int>(literal_reason->size());
DCHECK(!decomposed_energies_[t].empty());
for (const auto [lit, fixed_size, fixed_demand] : decomposed_energies_[t]) {

View File

@@ -591,6 +591,11 @@ class SchedulingDemandHelper {
IntegerValue DecomposedEnergyMin(int t) const;
IntegerValue DecomposedEnergyMax(int t) const;
IntegerValue SimpleDemandMin(int t) const;
IntegerValue SimpleDemandMax(int t) const;
IntegerValue DecomposedDemandMin(int t) const;
IntegerValue DecomposedDemandMax(int t) const;
IntegerTrail* integer_trail_;
SatSolver* sat_solver_; // To get the current propagation level.
const VariablesAssignment& assignment_;

View File

@@ -610,7 +610,7 @@ void AddRoutesCutGenerator(const ConstraintProto& ct, Model* m,
// Scan the intervals of a cumulative/no_overlap constraint, and its capacity (1
// for the no_overlap). It returns the index of the makespan interval if found,
// or -1 otherwise.
// or std::nullopt otherwise.
//
// Currently, this requires the capacity to be fixed in order to scan for a
// makespan interval.
@@ -623,15 +623,16 @@ void AddRoutesCutGenerator(const ConstraintProto& ct, Model* m,
//
// These property ensures that all other intervals ends before the start of
// the makespan interval.
int DetectMakespan(const std::vector<IntervalVariable>& intervals,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity, Model* model) {
std::optional<int> DetectMakespan(
const std::vector<IntervalVariable>& intervals,
const std::vector<AffineExpression>& demands,
const AffineExpression& capacity, Model* model) {
IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
// TODO(user): Supports variable capacity.
if (!integer_trail->IsFixed(capacity)) {
return -1;
return std::nullopt;
}
// Detect the horizon (max of all end max of all intervals).
@@ -644,47 +645,96 @@ int DetectMakespan(const std::vector<IntervalVariable>& intervals,
const IntegerValue capacity_value = integer_trail->FixedValue(capacity);
for (int i = 0; i < intervals.size(); ++i) {
if (repository->IsAbsent(intervals[i])) continue;
if (!repository->IsPresent(intervals[i])) continue;
const AffineExpression& end = repository->End(intervals[i]);
if (integer_trail->IsFixed(demands[i]) &&
integer_trail->FixedValue(demands[i]) == capacity_value &&
integer_trail->IsFixed(end) &&
integer_trail->FixedValue(end) == horizon &&
integer_trail->LowerBound(repository->Size(intervals[i])) > 0 &&
repository->IsPresent(intervals[i])) {
integer_trail->LowerBound(repository->Size(intervals[i])) > 0) {
return i;
}
}
return -1;
return std::nullopt;
}
namespace {
std::optional<AffineExpression> DetectMakespanFromPrecedences(
const SchedulingConstraintHelper& helper, Model* model) {
if (helper.NumTasks() == 0) return {};
const std::vector<AffineExpression>& ends = helper.Ends();
std::vector<IntegerVariable> end_vars;
for (const AffineExpression& end : ends) {
// TODO(user): Deal with constant end.
if (end.var == kNoIntegerVariable) return {};
if (end.coeff != 1) return {};
end_vars.push_back(end.var);
}
std::vector<FullIntegerPrecedence> output;
auto* precedences = model->GetOrCreate<PrecedenceRelations>();
precedences->ComputeFullPrecedences(end_vars, &output);
for (const auto& p : output) {
// TODO(user): What if we have more than one candidate makespan ?
if (p.indices.size() != ends.size()) continue;
// We have a Makespan!
// We want p.var - min_delta >= max(end).
IntegerValue min_delta = kMaxIntegerValue;
for (int i = 0; i < p.indices.size(); ++i) {
// end_vars[indices[i]] + p.offset[i] <= p.var
// [interval_end - end_offset][indices[i]] + p.offset[i] <= p.var
//
// TODO(user): It is possible this min_delta becomes positive but for a
// real makespan, we could make sure it is <= 0. This can happen if we
// have an optional interval because we didn't compute the offset assuming
// this interval was present. We could potentially fix it if we know that
// presence_literal => p.var >= end.
min_delta =
std::min(min_delta, p.offsets[i] - ends[p.indices[i]].constant);
}
VLOG(2) << "Makespan detected >= ends + " << min_delta;
return AffineExpression(p.var, IntegerValue(1), -min_delta);
}
return {};
}
} // namespace
void AppendNoOverlapRelaxationAndCutGenerator(const ConstraintProto& ct,
Model* model,
LinearRelaxation* relaxation) {
if (HasEnforcementLiteral(ct)) return;
auto* mapping = model->GetOrCreate<CpModelMapping>();
std::vector<IntervalVariable> intervals =
mapping->Intervals(ct.no_overlap().intervals());
const IntegerValue one(1);
std::vector<AffineExpression> demands(intervals.size(), one);
const int makespan_index =
DetectMakespan(intervals, demands, /*capacity=*/one, model);
std::optional<AffineExpression> makespan;
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
if (makespan_index != -1) {
makespan = repository->Start(intervals[makespan_index]);
std::optional<AffineExpression> makespan;
const std::optional<int> makespan_index =
DetectMakespan(intervals, demands, /*capacity=*/one, model);
if (makespan_index.has_value()) {
makespan = repository->Start(intervals[makespan_index.value()]);
demands.pop_back(); // the vector is filled with ones.
intervals.erase(intervals.begin() + makespan_index);
intervals.erase(intervals.begin() + makespan_index.value());
}
SchedulingConstraintHelper* helper = repository->GetOrCreateHelper(intervals);
if (!helper->SynchronizeAndSetTimeDirection(true)) return;
SchedulingDemandHelper* demands_helper =
new SchedulingDemandHelper(demands, helper, model);
model->TakeOwnership(demands_helper);
if (!makespan.has_value()) {
makespan = DetectMakespanFromPrecedences(*helper, model);
}
AddCumulativeRelaxation(/*capacity=*/one, helper, demands_helper, makespan,
model, relaxation);
if (model->GetOrCreate<SatParameters>()->linearization_level() > 1) {
@@ -702,15 +752,15 @@ void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
std::vector<AffineExpression> demands =
mapping->Affines(ct.cumulative().demands());
const AffineExpression capacity = mapping->Affine(ct.cumulative().capacity());
const int makespan_index =
const std::optional<int> makespan_index =
DetectMakespan(intervals, demands, capacity, model);
std::optional<AffineExpression> makespan;
IntervalsRepository* repository = model->GetOrCreate<IntervalsRepository>();
if (makespan_index != -1) {
if (makespan_index.has_value()) {
// We remove the makespan data from the intervals the demands vector.
makespan = repository->Start(intervals[makespan_index]);
demands.erase(demands.begin() + makespan_index);
intervals.erase(intervals.begin() + makespan_index);
makespan = repository->Start(intervals[makespan_index.value()]);
demands.erase(demands.begin() + makespan_index.value());
intervals.erase(intervals.begin() + makespan_index.value());
}
// We try to linearize the energy of each task (size * demand).
@@ -720,6 +770,10 @@ void AppendCumulativeRelaxationAndCutGenerator(const ConstraintProto& ct,
new SchedulingDemandHelper(demands, helper, model);
model->TakeOwnership(demands_helper);
if (!makespan.has_value()) {
makespan = DetectMakespanFromPrecedences(*helper, model);
}
// We can now add the relaxation and the cut generators.
AddCumulativeRelaxation(capacity, helper, demands_helper, makespan, model,
relaxation);

View File

@@ -22,6 +22,7 @@
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "ortools/sat/cp_model_search.h"
#include "ortools/sat/sat_parameters.pb.h"
namespace operations_research {
@@ -160,53 +161,27 @@ std::string ValidateParameters(const SatParameters& params) {
return "Enumerating all solutions does not work with interleaved search";
}
absl::flat_hash_set<std::string> valid_subsolvers({
"auto",
"core_default_lp",
"core_max_lp",
"core_or_no_lp",
"core",
"default_lp",
"default",
"fixed",
"lb_tree_search",
"less_encoding",
"max_hs",
"max_lp",
"no_lp",
"objective_lb_search_max_lp",
"objective_lb_search_no_lp",
"objective_lb_search",
"probing_max_lp",
"probing_no_lp",
"probing",
"pseudo_costs",
"quick_restart_max_lp",
"quick_restart_no_lp",
"quick_restart",
"reduced_costs",
});
for (const SatParameters& new_subsolver : params.subsolver_params()) {
if (new_subsolver.name().empty()) {
return "New subsolver parameter defined without a name";
}
valid_subsolvers.insert(new_subsolver.name());
}
const auto strategies = GetNamedParameters(params);
for (const std::string& subsolver : params.subsolvers()) {
if (!valid_subsolvers.contains(subsolver)) {
if (!strategies.contains(subsolver)) {
return absl::StrCat("subsolver \'", subsolver, "\' is not valid");
}
}
for (const std::string& subsolver : params.extra_subsolvers()) {
if (!valid_subsolvers.contains(subsolver)) {
if (!strategies.contains(subsolver)) {
return absl::StrCat("subsolver \'", subsolver, "\' is not valid");
}
}
for (const std::string& subsolver : params.ignore_subsolvers()) {
if (!valid_subsolvers.contains(subsolver)) {
if (!strategies.contains(subsolver)) {
return absl::StrCat("subsolver \'", subsolver, "\' is not valid");
}
}

View File

@@ -47,6 +47,158 @@
namespace operations_research {
namespace sat {
void PrecedenceRelations::Add(IntegerVariable tail, IntegerVariable head,
IntegerValue offset) {
// In some case we load new linear constraint as part of the linear
// relaxation. We just ignore anything after the first
// ComputeFullPrecedences() call.
if (is_built_) return;
// Ignore trivial relation: tail + offset <= head.
if (integer_trail_->UpperBound(tail) + offset <=
integer_trail_->LowerBound(head)) {
return;
}
if (PositiveVariable(tail) == PositiveVariable(head)) return;
// TODO(user): Remove once we support non-DAG.
if (offset < 0) return;
graph_.AddArc(tail.value(), head.value());
graph_.AddArc(NegationOf(head).value(), NegationOf(tail).value());
arc_offset_.push_back(offset);
arc_offset_.push_back(offset);
}
void PrecedenceRelations::Build() {
CHECK(!is_built_);
is_built_ = true;
std::vector<int> permutation;
graph_.Build(&permutation);
util::Permute(permutation, &arc_offset_);
// Is it a DAG?
// Get a topological order of the DAG formed by all the arcs that are present.
//
// TODO(user): This can fail if we don't have a DAG. We could just skip Bad
// edges instead, and have a sub-DAG as an heuristic. Or analyze the arc
// weight and make sure cycle are not an issue. We can also start with arcs
// with strictly positive weight.
//
// TODO(user): Only explore the sub-graph reachable from "vars".
const int num_nodes = graph_.num_nodes();
DenseIntStableTopologicalSorter sorter(num_nodes);
for (int arc = 0; arc < graph_.num_arcs(); ++arc) {
sorter.AddEdge(graph_.Tail(arc), graph_.Head(arc));
}
int next;
bool graph_has_cycle = false;
topological_order_.clear();
while (sorter.GetNext(&next, &graph_has_cycle, nullptr)) {
topological_order_.push_back(IntegerVariable(next));
if (graph_has_cycle) {
is_dag_ = false;
return;
}
}
is_dag_ = !graph_has_cycle;
}
void PrecedenceRelations::ComputeFullPrecedences(
const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output) {
output->clear();
if (!is_built_) Build();
if (!is_dag_) return;
VLOG(2) << "num_nodes: " << graph_.num_nodes()
<< " num_arcs: " << graph_.num_arcs() << " is_dag: " << is_dag_;
// Compute all precedences.
// We loop over the node in topological order, and we maintain for all
// variable we encounter, the list of "to_consider" variables that are before.
//
// TODO(user): use vector of fixed size.
absl::flat_hash_set<IntegerVariable> is_interesting;
absl::flat_hash_set<IntegerVariable> to_consider(vars.begin(), vars.end());
absl::flat_hash_map<IntegerVariable,
absl::flat_hash_map<IntegerVariable, IntegerValue>>
vars_before_with_offset;
absl::flat_hash_map<IntegerVariable, IntegerValue> tail_map;
for (const IntegerVariable tail_var : topological_order_) {
if (!to_consider.contains(tail_var) &&
!vars_before_with_offset.contains(tail_var)) {
continue;
}
// We copy the data for tail_var here, because the pointer is not stable.
// TODO(user): optimize when needed.
tail_map.clear();
{
const auto it = vars_before_with_offset.find(tail_var);
if (it != vars_before_with_offset.end()) {
tail_map = it->second;
}
}
for (const int arc : graph_.OutgoingArcs(tail_var.value())) {
CHECK_EQ(tail_var.value(), graph_.Tail(arc));
const IntegerVariable head_var = IntegerVariable(graph_.Head(arc));
const IntegerValue arc_offset = arc_offset_[arc];
// No need to create an empty entry in this case.
if (tail_map.empty() && !to_consider.contains(tail_var)) continue;
auto& to_update = vars_before_with_offset[head_var];
for (const auto& [var_before, offset] : tail_map) {
if (!to_update.contains(var_before)) {
to_update[var_before] = arc_offset + offset;
} else {
to_update[var_before] =
std::max(arc_offset + offset, to_update[var_before]);
}
}
if (to_consider.contains(tail_var)) {
if (!to_update.contains(tail_var)) {
to_update[tail_var] = arc_offset;
} else {
to_update[tail_var] = std::max(arc_offset, to_update[tail_var]);
}
}
// Small filtering heuristic: if we have (before) < tail, and tail < head,
// we really do not need to list (before, tail) < head. We only need that
// if the list of variable before head contains some variable that are not
// already before tail.
if (to_update.size() > tail_map.size() + 1) {
is_interesting.insert(head_var);
} else {
is_interesting.erase(head_var);
}
}
// Extract the output for tail_var. Because of the topological ordering, the
// data for tail_var is already final now.
//
// TODO(user): Release the memory right away.
if (!is_interesting.contains(tail_var)) continue;
if (tail_map.size() == 1) continue;
FullIntegerPrecedence data;
data.var = tail_var;
IntegerValue min_offset = kMaxIntegerValue;
for (int i = 0; i < vars.size(); ++i) {
const auto offset_it = tail_map.find(vars[i]);
if (offset_it == tail_map.end()) continue;
data.indices.push_back(i);
data.offsets.push_back(offset_it->second);
min_offset = std::min(data.offsets.back(), min_offset);
}
output->push_back(std::move(data));
}
}
namespace {
void AppendLowerBoundReasonIfValid(IntegerVariable var,
@@ -241,146 +393,25 @@ void PrecedencesPropagator::ComputePrecedences(
}
}
void PrecedencesPropagator::ComputeFullPrecedences(
bool call_compute_precedences, const std::vector<IntegerVariable>& vars,
void PrecedencesPropagator::ComputePartialPrecedences(
const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output) {
output->clear();
DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
if (call_compute_precedences) {
std::vector<PrecedencesPropagator::IntegerPrecedences> before;
ComputePrecedences(vars, &before);
// Convert format.
const int size = before.size();
for (int i = 0; i < size;) {
FullIntegerPrecedence data;
data.var = before[i].var;
const IntegerVariable var = before[i].var;
DCHECK_NE(var, kNoIntegerVariable);
for (; i < size && before[i].var == var; ++i) {
data.indices.push_back(before[i].index);
data.offsets.push_back(before[i].offset);
}
output->push_back(std::move(data));
}
return;
}
// Get a topological order of the DAG formed by all the arcs that are present.
//
// TODO(user): This can fail if we don't have a DAG. We could just skip Bad
// edges instead, and have a sub-DAG as an heuristic. Or analyze the arc
// weight and make sure cycle are not an issue. We can also start with arcs
// with strictly positive weight.
//
// TODO(user): Only explore the sub-graph reachable from "vars".
const int num_nodes = integer_trail_->NumIntegerVariables().value();
DenseIntStableTopologicalSorter sorter(num_nodes);
for (const auto& arcs : impacted_arcs_) {
for (const ArcIndex arc_index : arcs) {
const ArcInfo& arc = arcs_[arc_index];
if (arc.tail_var == arc.head_var) continue;
if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
sorter.AddEdge(arc.tail_var.value(), arc.head_var.value());
}
}
int next;
bool graph_has_cycle = false;
std::vector<IntegerVariable> topological_order;
while (sorter.GetNext(&next, &graph_has_cycle, nullptr)) {
topological_order.push_back(IntegerVariable(next));
if (graph_has_cycle) return;
}
// Compute all precedences.
// We loop over the node in topological order, and we maintain for all
// variable we encounter, the list of "to_consider" variables that are before.
//
// TODO(user): use vector of fixed size.
absl::flat_hash_set<IntegerVariable> is_interesting;
absl::flat_hash_set<IntegerVariable> to_consider(vars.begin(), vars.end());
absl::flat_hash_map<IntegerVariable,
absl::flat_hash_map<IntegerVariable, IntegerValue>>
vars_before_with_offset;
absl::flat_hash_map<IntegerVariable, IntegerValue> tail_map;
for (const IntegerVariable tail_var : topological_order) {
if (!to_consider.contains(tail_var) &&
!vars_before_with_offset.contains(tail_var)) {
continue;
}
// Update the relation for the variable that are after.
if (tail_var >= impacted_arcs_.size()) continue;
// We copy the data for tail_var here, because the pointer is not stable.
// TODO(user): optimize when needed.
tail_map.clear();
{
const auto it = vars_before_with_offset.find(tail_var);
if (it != vars_before_with_offset.end()) {
tail_map = it->second;
}
}
for (const ArcIndex arc_index : impacted_arcs_[tail_var]) {
const ArcInfo& arc = arcs_[arc_index];
if (arc.tail_var == arc.head_var) continue;
if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
CHECK_EQ(arc.tail_var, tail_var);
// No need to create an empty entry in this case.
if (tail_map.empty() && !to_consider.contains(tail_var)) continue;
IntegerValue arc_offset = arc.offset;
if (arc.offset_var != kNoIntegerVariable) {
arc_offset += integer_trail_->LowerBound(arc.offset_var);
}
auto& to_update = vars_before_with_offset[arc.head_var];
for (const auto& [var_before, offset] : tail_map) {
if (!to_update.contains(var_before)) {
to_update[var_before] = arc_offset + offset;
} else {
to_update[var_before] =
std::max(arc_offset + offset, to_update[var_before]);
}
}
if (to_consider.contains(tail_var)) {
if (!to_update.contains(tail_var)) {
to_update[tail_var] = arc_offset;
} else {
to_update[tail_var] = std::max(arc_offset, to_update[tail_var]);
}
}
// Small filtering heuristic: if we have (before) < tail, and tail < head,
// we really do not need to list (before, tail) < head. We only need that
// if the list of variable before head contains some variable that are not
// already before tail.
if (to_update.size() > tail_map.size() + 1) {
is_interesting.insert(arc.head_var);
} else {
is_interesting.erase(arc.head_var);
}
}
// Extract the output for tail_var. Because of the topological ordering, the
// data for tail_var is already final now.
//
// TODO(user): Release the memory right away.
if (!is_interesting.contains(tail_var)) continue;
if (tail_map.size() == 1) continue;
std::vector<PrecedencesPropagator::IntegerPrecedences> before;
ComputePrecedences(vars, &before);
// Convert format.
const int size = before.size();
for (int i = 0; i < size;) {
FullIntegerPrecedence data;
data.var = tail_var;
IntegerValue min_offset = kMaxIntegerValue;
for (int i = 0; i < vars.size(); ++i) {
const auto offset_it = tail_map.find(vars[i]);
if (offset_it == tail_map.end()) continue;
data.indices.push_back(i);
data.offsets.push_back(offset_it->second);
min_offset = std::min(data.offsets.back(), min_offset);
data.var = before[i].var;
const IntegerVariable var = before[i].var;
DCHECK_NE(var, kNoIntegerVariable);
for (; i < size && before[i].var == var; ++i) {
data.indices.push_back(before[i].index);
data.offsets.push_back(before[i].offset);
}
output->push_back(std::move(data));
}

View File

@@ -25,6 +25,7 @@
#include "ortools/base/integral_types.h"
#include "ortools/base/macros.h"
#include "ortools/base/strong_vector.h"
#include "ortools/graph/graph.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/model.h"
#include "ortools/sat/sat_base.h"
@@ -36,6 +37,61 @@
namespace operations_research {
namespace sat {
struct FullIntegerPrecedence {
IntegerVariable var;
std::vector<int> indices;
std::vector<IntegerValue> offsets;
};
// Stores all the precedences relation of the form "tail_X + offset <= head_X"
// that we could extract from the linear constraint of the model. These are
// stored in a directed graph.
//
// TODO(user): Support conditional relation.
// TODO(user): Support non-DAG like graph.
// TODO(user): Support variable offset that can be updated as search progress.
class PrecedenceRelations {
public:
explicit PrecedenceRelations(Model* model)
: integer_trail_(model->GetOrCreate<IntegerTrail>()) {}
void Resize(int num_variables) {
graph_.ReserveNodes(num_variables);
graph_.AddNode(num_variables - 1);
}
// Add a relation tail + offset <= head.
void Add(IntegerVariable tail, IntegerVariable head, IntegerValue offset);
// Returns a set of relations var >= max_i(vars[index[i]] + offsets[i]).
//
// This currently only works if the precedence relation form a DAG.
// If not we will just abort. TODO(user): generalize.
//
// TODO(user): Put some work limit in place, as this can be slow. Complexity
// is in O(vars.size()) * num_arcs.
//
// TODO(user): Since we don't need ALL precedences, we could just work on a
// sub-DAG of the full precedence graph instead of aborting. Or we can just
// support the general non-DAG cases.
//
// TODO(user): Many relations can be redundant. Filter them.
void ComputeFullPrecedences(const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output);
private:
void Build();
IntegerTrail* integer_trail_;
util::StaticGraph<> graph_;
std::vector<IntegerValue> arc_offset_;
bool is_built_ = false;
bool is_dag_ = false;
std::vector<IntegerVariable> topological_order_;
};
// This class implement a propagator on simple inequalities between integer
// variables of the form (i1 + offset <= i2). The offset can be constant or
// given by the value of a third integer variable. Offsets can also be negative.
@@ -126,35 +182,15 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface {
std::vector<Literal>* literal_reason,
std::vector<IntegerLiteral>* integer_reason) const;
// Similar to ComputePrecedences() but this uses a "slow algorithm". It is not
// meant to be called often and explore the full precedences graph.
// This just wrap ComputePrecedences() above and convert its output format to
// the same format as PrecedenceRelations::ComputeFullPrecedences(). This is
// less efficient but more convenient to use.
//
// If call_compute_precedence is true, this just wrap ComputePrecedences() and
// convert its output to this function format, which is less efficient but
// more convenient to use.
//
// Important: This currently assumes the full precedence graph form a DAG.
// Otherwise it will just fail and returns no precedence. By contrast
// ComputePrecedences() still work.
//
// TODO(user): Put some work limit in place, as this can be slow. Complexity
// is in O(vars.size()) * num_arcs.
//
// TODO(user): Since we don't need ALL precedences, we could just work on a
// sub-DAG of the full precedence graph instead of aborting.
//
// TODO(user): Many relations can be redundant. Filter them.
//
// Returns a bunch of precedences relations:
// An IntegerVariable >= to vars[indices[i]] + offset[i], for i in indices.
struct FullIntegerPrecedence {
IntegerVariable var;
std::vector<int> indices;
std::vector<IntegerValue> offsets;
};
void ComputeFullPrecedences(bool call_compute_precedences,
const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output);
void ComputePartialPrecedences(const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output);
// Advanced usage. To be called once all the constraints have been added to
// the model. This will loop over all "node" in this class, and if one of its
@@ -385,7 +421,8 @@ inline std::function<void(Model*)> LowerOrEqualWithOffset(IntegerVariable a,
IntegerVariable b,
int64_t offset) {
return [=](Model* model) {
return model->GetOrCreate<PrecedencesPropagator>()->AddPrecedenceWithOffset(
model->GetOrCreate<PrecedenceRelations>()->Add(a, b, IntegerValue(offset));
model->GetOrCreate<PrecedencesPropagator>()->AddPrecedenceWithOffset(
a, b, IntegerValue(offset));
};
}

View File

@@ -23,7 +23,7 @@ option csharp_namespace = "Google.OrTools.Sat";
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 253
// NEXT TAG: 254
message SatParameters {
// In some context, like in a portfolio of search, it makes sense to name a
// given parameters set for logging purpose.
@@ -610,8 +610,7 @@ message SatParameters {
repeated string subsolvers = 207;
// A convenient way to add more workers types.
// Note that these will be added at the end of the list, so this is only
// useful if you run with many workers.
// These will be added at the beginning of the list.
repeated string extra_subsolvers = 219;
// Rather than fully specifying subsolvers, it is often convenient to just
@@ -1036,6 +1035,11 @@ message SatParameters {
// minimizing) starting from the lower bound of the objective.
optional bool use_objective_lb_search = 228 [default = false];
// This search differs from the previous search as it will not use assumptions
// to bound the objective, and it will recreate a full model with the
// hardcoded objective value.
optional bool use_objective_shaving_search = 253 [default = false];
// The solver ignores the pseudo costs of variables with number of recordings
// less than this threshold.
optional int64 pseudo_cost_reliability_threshold = 123 [default = 100];

View File

@@ -274,25 +274,20 @@ std::vector<int64_t> FindPossibleDemands(const EnergyEvent& event,
return possible_demands;
}
// Will scan all event, compute the cumulated energy of all events, and returns
// whether it exceeds available_energy_lp.
// This generates the actual cut and compute its activity vs the
// available_energy_lp.
bool CutIsEfficient(
const std::vector<EnergyEvent>& events, IntegerValue window_start,
IntegerValue window_end, double available_energy_lp,
const absl::StrongVector<IntegerVariable, double>& lp_values,
Model* model) {
// Scan all events and sum their energetic contributions.
double energy_from_events_lp = 0.0;
LinearConstraintBuilder tmp_energy(model);
LinearConstraintBuilder* temp_builder) {
temp_builder->Clear();
for (const EnergyEvent& event : events) {
tmp_energy.Clear();
if (!AddOneEvent(event, window_start, window_end, &tmp_energy)) {
if (!AddOneEvent(event, window_start, window_end, temp_builder)) {
return false;
}
energy_from_events_lp += tmp_energy.BuildExpression().LpValue(lp_values);
}
return energy_from_events_lp >=
return temp_builder->BuildExpression().LpValue(lp_values) >=
available_energy_lp * (1.0 + kMinCutViolation);
}
@@ -398,6 +393,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(
const double capacity_lp = ToDouble(capacity);
const double makespan_lp = makespan.LpValue(lp_values);
const double makespan_min_lp = ToDouble(makespan_min);
LinearConstraintBuilder temp_builder(model);
for (int i = 0; i + 1 < num_time_points; ++i) {
// Checks the time limit if the problem is too big.
if (events.size() > 50 && time_limit->LimitReached()) return;
@@ -455,7 +451,7 @@ void GenerateCumulativeEnergeticCutsWithMakespanAndFixedCapacity(
? max_energy_up_to_makespan_lp
: ToDouble(cumulated_max_energy);
if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
lp_values, model)) {
lp_values, &temp_builder)) {
OverloadedTimeWindowWithMakespan w;
w.start = window_start;
w.end = window_end;
@@ -555,6 +551,7 @@ void GenerateCumulativeEnergeticCuts(
time_points_set.end());
const int num_time_points = time_points.size();
LinearConstraintBuilder temp_builder(model);
for (int i = 0; i + 1 < num_time_points; ++i) {
// Checks the time limit if the problem is too big.
if (events.size() > 50 && time_limit->LimitReached()) return;
@@ -569,7 +566,7 @@ void GenerateCumulativeEnergeticCuts(
ToDouble(window_end - window_start) * capacity_lp;
if (available_energy_lp >= max_possible_energy_lp) break;
if (CutIsEfficient(events, window_start, window_end, available_energy_lp,
lp_values, model)) {
lp_values, &temp_builder)) {
overloaded_time_windows.push_back({window_start, window_end});
}
}

View File

@@ -284,7 +284,12 @@ bool TimeTableEdgeFinding::TimeTableEdgeFindingPass() {
// interval are entirely contained in it.
// TODO(user): check that we should not fail if the interval is
// overloaded, i.e., available_energy < 0.
if (max_task == -1) continue;
//
// We also defensively abort if the demand_min is 0.
// This may happen along a energy_min > 0 if the literals in the
// decomposed_energy have been fixed, and not yet propagated to the demand
// affine expression.
if (max_task == -1 || demands_->DemandMin(max_task) == 0) continue;
// Compute the amount of energy available to schedule max_task.
const IntegerValue window_energy =
@@ -321,7 +326,7 @@ bool TimeTableEdgeFinding::TimeTableEdgeFindingPass() {
//
// TODO(user): Because this use updated bounds, it might be more than what
// we accounted for in the precomputation. This is correct but could be
// improved uppon.
// improved upon.
const IntegerValue mandatory_size_in_window =
std::max(IntegerValue(0),
std::min(window_max, helper_->EndMin(max_task)) -