[CP-SAT] revisit search heuristics, lns workers search heuristics; scheduling propagation

This commit is contained in:
Laurent Perron
2023-10-02 19:52:11 +02:00
parent 4255e18c56
commit d07127d40a
17 changed files with 355 additions and 184 deletions

View File

@@ -23,6 +23,7 @@
#include "absl/flags/flag.h"
#include "absl/log/check.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "google/protobuf/text_format.h"
#include "google/protobuf/wrappers.pb.h"
#include "ortools/base/init_google.h"
@@ -197,7 +198,7 @@ struct AlternativeTaskData {
// main task of the job.
void CreateAlternativeTasks(
const JsspInputProblem& problem,
const std::vector<std::vector<JobTaskData>>& job_to_tasks, int64_t horizon,
absl::Span<const std::vector<JobTaskData>> job_to_tasks, int64_t horizon,
std::vector<std::vector<std::vector<AlternativeTaskData>>>&
job_task_to_alternatives,
CpModelBuilder& cp_model) {
@@ -296,7 +297,7 @@ struct MachineTaskData {
std::vector<std::vector<MachineTaskData>> GetDataPerMachine(
const JsspInputProblem& problem,
const std::vector<std::vector<std::vector<AlternativeTaskData>>>&
absl::Span<const std::vector<std::vector<AlternativeTaskData>>>
job_task_to_alternatives) {
const int num_jobs = problem.jobs_size();
const int num_machines = problem.machines_size();
@@ -442,8 +443,8 @@ void CreateMachines(
// Collect all objective terms and add them to the model.
void CreateObjective(
const JsspInputProblem& problem,
const std::vector<std::vector<JobTaskData>>& job_to_tasks,
const std::vector<std::vector<std::vector<AlternativeTaskData>>>&
absl::Span<const std::vector<JobTaskData>> job_to_tasks,
absl::Span<const std::vector<std::vector<AlternativeTaskData>>>
job_task_to_alternatives,
int64_t horizon, IntVar makespan, CpModelBuilder& cp_model) {
LinearExpr objective;
@@ -512,7 +513,7 @@ void CreateObjective(
// and not the alternate copies.
void AddCumulativeRelaxation(
const JsspInputProblem& problem,
const std::vector<std::vector<JobTaskData>>& job_to_tasks,
absl::Span<const std::vector<JobTaskData>> job_to_tasks,
IntervalVar makespan_interval, CpModelBuilder& cp_model) {
const int num_jobs = problem.jobs_size();
const int num_machines = problem.machines_size();
@@ -579,33 +580,13 @@ void AddCumulativeRelaxation(
cumul.AddDemand(makespan_interval, component.size());
}
}
// Add a global cumulative that contains all main intervals.
//
// On most benchmarks, this is the only cumulative constraint created as the
// graph of connected interval has only one component.
//
// Even on benchmarks with cliques, it still helps, as it allows a global
// energetic reasoning that uses the makespan.
LOG(INFO) << "Add global cumulative with " << num_tasks << " intervals and "
<< num_machines << " machines";
CumulativeConstraint global_cumul = cp_model.AddCumulative(num_machines);
for (int j = 0; j < num_jobs; ++j) {
const int num_tasks_in_job = problem.jobs(j).tasks_size();
for (int t = 0; t < num_tasks_in_job; ++t) {
global_cumul.AddDemand(job_to_tasks[j][t].interval, 1);
}
}
if (absl::GetFlag(FLAGS_use_interval_makespan)) {
global_cumul.AddDemand(makespan_interval, num_machines);
}
}
// This redundant linear constraints states that the sum of durations of all
// tasks is a lower bound of the makespan * number of machines.
void AddMakespanRedundantConstraints(
const JsspInputProblem& problem,
const std::vector<std::vector<JobTaskData>>& job_to_tasks, IntVar makespan,
absl::Span<const std::vector<JobTaskData>> job_to_tasks, IntVar makespan,
CpModelBuilder& cp_model) {
const int num_machines = problem.machines_size();
@@ -621,8 +602,8 @@ void AddMakespanRedundantConstraints(
void DisplayJobStatistics(
const JsspInputProblem& problem, int64_t horizon,
const std::vector<std::vector<JobTaskData>>& job_to_tasks,
const std::vector<std::vector<std::vector<AlternativeTaskData>>>&
absl::Span<const std::vector<JobTaskData>> job_to_tasks,
absl::Span<const std::vector<std::vector<AlternativeTaskData>>>
job_task_to_alternatives) {
const int num_jobs = job_to_tasks.size();
int num_tasks = 0;
@@ -747,7 +728,7 @@ void Solve(const JsspInputProblem& problem) {
// CP-SAT now has a default strategy for scheduling problem that works best.
if (absl::GetFlag(FLAGS_display_sat_model)) {
LOG(INFO) << cp_model.Proto().DebugString();
LOG(INFO) << cp_model.Proto();
}
// Setup parameters.
@@ -766,6 +747,9 @@ void Solve(const JsspInputProblem& problem) {
parameters.add_extra_subsolvers("objective_shaving_search");
}
// Tells the solver we have a makespan objective.
parameters.set_push_all_tasks_toward_start(true);
const CpSolverResponse response =
SolveWithParameters(cp_model.Build(), parameters);

View File

@@ -22,6 +22,7 @@
#include <vector>
#include "absl/flags/flag.h"
#include "absl/types/span.h"
#include "google/protobuf/text_format.h"
#include "ortools/base/commandlineflags.h"
#include "ortools/base/init_google.h"
@@ -43,7 +44,7 @@ namespace sat {
void CheckAndPrint2DSolution(
const CpSolverResponse& response,
const packing::MultipleDimensionsBinPackingProblem& problem,
const std::vector<std::vector<IntervalVar>>& interval_by_item_dimension,
absl::Span<const std::vector<IntervalVar>> interval_by_item_dimension,
std::string* solution_in_ascii_form) {
const int num_items = problem.items_size();

View File

@@ -21,6 +21,7 @@
#include "absl/flags/parse.h"
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "ortools/base/logging.h"
#include "ortools/sat/cp_model.h"
#include "ortools/sat/cp_model.pb.h"
@@ -29,9 +30,9 @@
namespace operations_research {
namespace sat {
void PrintSolution(const std::vector<std::vector<int> >& data,
const std::vector<std::vector<bool> >& h_arcs,
const std::vector<std::vector<bool> >& v_arcs) {
void PrintSolution(absl::Span<const std::vector<int>> data,
absl::Span<const std::vector<bool>> h_arcs,
absl::Span<const std::vector<bool>> v_arcs) {
const int num_rows = data.size();
const int num_columns = data[0].size();
@@ -65,7 +66,7 @@ void PrintSolution(const std::vector<std::vector<int> >& data,
std::cout << last_line << std::endl;
}
void SlitherLink(const std::vector<std::vector<int> >& data) {
void SlitherLink(const std::vector<std::vector<int>>& data) {
const int num_rows = data.size();
const int num_columns = data[0].size();
@@ -201,7 +202,7 @@ void SlitherLink(const std::vector<std::vector<int> >& data) {
const CpSolverResponse response = Solve(builder.Build());
std::vector<std::vector<bool> > h_arcs(num_rows + 1);
std::vector<std::vector<bool>> h_arcs(num_rows + 1);
for (int y = 0; y < num_rows + 1; ++y) {
for (int x = 0; x < num_columns; ++x) {
const int arc = undirected_horizontal_arc(x, y);
@@ -211,7 +212,7 @@ void SlitherLink(const std::vector<std::vector<int> >& data) {
}
}
std::vector<std::vector<bool> > v_arcs(num_columns + 1);
std::vector<std::vector<bool>> v_arcs(num_columns + 1);
for (int y = 0; y < num_rows; ++y) {
for (int x = 0; x < num_columns + 1; ++x) {
const int arc = undirected_vertical_arc(x, y);
@@ -230,18 +231,18 @@ void SlitherLink(const std::vector<std::vector<int> >& data) {
int main(int argc, char** argv) {
absl::ParseCommandLine(argc, argv);
const std::vector<std::vector<int> > tiny = {{3, 3, 1}};
const std::vector<std::vector<int>> tiny = {{3, 3, 1}};
const std::vector<std::vector<int> > small = {
const std::vector<std::vector<int>> small = {
{3, 2, -1, 3}, {-1, -1, -1, 2}, {3, -1, -1, -1}, {3, -1, 3, 1}};
const std::vector<std::vector<int> > medium = {
const std::vector<std::vector<int>> medium = {
{-1, 0, -1, 1, -1, -1, 1, -1}, {-1, 3, -1, -1, 2, 3, -1, 2},
{-1, -1, 0, -1, -1, -1, -1, 0}, {-1, 3, -1, -1, 0, -1, -1, -1},
{-1, -1, -1, 3, -1, -1, 0, -1}, {1, -1, -1, -1, -1, 3, -1, -1},
{3, -1, 1, 3, -1, -1, 3, -1}, {-1, 0, -1, -1, 3, -1, 3, -1}};
const std::vector<std::vector<int> > big = {
const std::vector<std::vector<int>> big = {
{3, -1, -1, -1, 2, -1, 1, -1, 1, 2},
{1, -1, 0, -1, 3, -1, 2, 0, -1, -1},
{-1, 3, -1, -1, -1, -1, -1, -1, 3, -1},

View File

@@ -20,6 +20,7 @@
#include "absl/flags/flag.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/types/span.h"
#include "ortools/base/init_google.h"
#include "ortools/base/logging.h"
#include "ortools/sat/cp_model.h"
@@ -36,9 +37,9 @@ namespace operations_research {
namespace sat {
// Solve a single machine problem with weighted tardiness cost.
void Solve(const std::vector<int64_t>& durations,
const std::vector<int64_t>& due_dates,
const std::vector<int64_t>& weights) {
void Solve(absl::Span<const int64_t> durations,
absl::Span<const int64_t> due_dates,
absl::Span<const int64_t> weights) {
const int num_tasks = durations.size();
CHECK_EQ(due_dates.size(), num_tasks);
CHECK_EQ(weights.size(), num_tasks);
@@ -163,7 +164,8 @@ void Solve(const std::vector<int64_t>& durations,
Model model;
model.Add(NewSatParameters(absl::GetFlag(FLAGS_params)));
model.GetOrCreate<SatParameters>()->set_log_search_progress(true);
model.Add(NewFeasibleSolutionObserver([&](const CpSolverResponse& r) {
model.Add(NewFeasibleSolutionObserver([&, due_dates, durations,
weights](const CpSolverResponse& r) {
// Note that we compute the "real" cost here and do not use the tardiness
// variables. This is because in the core based approach, the tardiness
// variable might be fixed before the end date, and we just have a >=

View File

@@ -530,24 +530,32 @@ def SolveRcpsp(
# Solve model.
solver = cp_model.CpSolver()
if not _USE_INTERVAL_MAKESPAN.value:
solver.parameters.exploit_all_precedences = True
solver.parameters.use_hard_precedences_in_cumulative = True
# Parse user specified parameters.
if params:
text_format.Parse(params, solver.parameters)
# Favor objective_shaving_search over objective_lb_search.
if solver.parameters.num_workers >= 16 and solver.parameters.num_workers < 24:
solver.parameters.ignore_subsolvers.append("objective_lb_search")
solver.parameters.extra_subsolvers.append("objective_shaving_search")
# Experimental: Specify the fact that the objective is a makespan
solver.parameters.push_all_tasks_toward_start = True
# Enable logging in the main solve.
if in_main_solve:
solver.parameters.log_search_progress = True
#
status = solver.Solve(model)
if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
assignment = rcpsp_pb2.RcpspAssignment()
for t, _ in enumerate(problem.tasks):
if t in task_starts:
assignment.start_of_task.append(solver.Value(task_starts[t]))
for r in range(len(task_to_presence_literals[t])):
if solver.BooleanValue(task_to_presence_literals[t][r]):
for r, recipe_literal in enumerate(task_to_presence_literals[t]):
if solver.BooleanValue(recipe_literal):
assignment.selected_recipe_of_task.append(r)
break
else: # t is not an active task.

View File

@@ -418,23 +418,22 @@ bool NeighborhoodGeneratorHelper::IntervalIsActive(
return false;
}
void NeighborhoodGeneratorHelper::FilterInactiveIntervals(
std::vector<int> NeighborhoodGeneratorHelper::KeepActiveIntervals(
absl::Span<const int> unfiltered_intervals,
const CpSolverResponse& initial_solution,
std::vector<int>* filtered_intervals) const {
filtered_intervals->clear();
const CpSolverResponse& initial_solution) const {
std::vector<int> filtered_intervals;
filtered_intervals.reserve(unfiltered_intervals.size());
absl::ReaderMutexLock lock(&domain_mutex_);
for (const int i : unfiltered_intervals) {
if (IntervalIsActive(i, initial_solution)) filtered_intervals->push_back(i);
if (IntervalIsActive(i, initial_solution)) filtered_intervals.push_back(i);
}
return filtered_intervals;
}
std::vector<int> NeighborhoodGeneratorHelper::GetActiveIntervals(
const CpSolverResponse& initial_solution) const {
std::vector<int> result;
FilterInactiveIntervals(TypeToConstraints(ConstraintProto::kInterval),
initial_solution, &result);
return result;
return KeepActiveIntervals(TypeToConstraints(ConstraintProto::kInterval),
initial_solution);
}
std::vector<std::pair<int, int>>
@@ -2024,6 +2023,19 @@ Neighborhood RandomPrecedenceSchedulingNeighborhoodGenerator::Generate(
precedences, initial_solution, helper_);
}
namespace {
void AppendVarsFromAllIntervalIndices(absl::Span<const int> indices,
absl::Span<const int> all_intervals,
const CpModelProto& model_proto,
std::vector<int>* variables) {
for (const int index : indices) {
const std::vector<int> vars =
UsedVariables(model_proto.constraints(all_intervals[index]));
variables->insert(variables->end(), vars.begin(), vars.end());
}
}
} // namespace
Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate(
const CpSolverResponse& initial_solution, double difficulty,
absl::BitGenRef random) {
@@ -2038,21 +2050,17 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate(
std::vector<int> intervals_to_relax;
intervals_to_relax.reserve(partition.selected_indices.size());
std::vector<int> variables_to_fix;
for (const int index : partition.selected_indices) {
intervals_to_relax.push_back(active_intervals[index]);
}
intervals_to_relax.insert(intervals_to_relax.end(),
partition.selected_indices.begin(),
partition.selected_indices.end());
if (helper_.Parameters().push_all_tasks_toward_start()) {
for (const int index : partition.indices_before_selected) {
const int interval = active_intervals[index];
// Remove the interval so that we do not use it for precedences.
intervals_to_relax.push_back(interval);
// Fix all variables.
const std::vector<int> vars =
UsedVariables(helper_.ModelProto().constraints(interval));
variables_to_fix.insert(variables_to_fix.end(), vars.begin(), vars.end());
}
intervals_to_relax.insert(intervals_to_relax.end(),
partition.indices_before_selected.begin(),
partition.indices_before_selected.end());
AppendVarsFromAllIntervalIndices(partition.indices_before_selected,
active_intervals, helper_.ModelProto(),
&variables_to_fix);
}
gtl::STLSortAndRemoveDuplicates(&intervals_to_relax);
@@ -2068,25 +2076,21 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate(
std::vector<int> variables_to_fix;
std::vector<int> active_intervals;
for (const std::vector<int>& intervals : intervals_in_constraints_) {
helper_.FilterInactiveIntervals(intervals, initial_solution,
&active_intervals);
active_intervals = helper_.KeepActiveIntervals(intervals, initial_solution);
const TimePartition partition = PartitionIndicesAroundRandomTimeWindow(
active_intervals, helper_.ModelProto(), initial_solution, difficulty,
random);
for (const int index : partition.selected_indices) {
intervals_to_relax.push_back(intervals[index]);
}
intervals_to_relax.insert(intervals_to_relax.end(),
partition.selected_indices.begin(),
partition.selected_indices.end());
if (helper_.Parameters().push_all_tasks_toward_start()) {
for (const int index : partition.indices_before_selected) {
const int interval = intervals[index];
// Remove the interval so that we do not use it for precedences.
intervals_to_relax.push_back(interval);
const std::vector<int> vars =
UsedVariables(helper_.ModelProto().constraints(interval));
variables_to_fix.insert(variables_to_fix.end(), vars.begin(),
vars.end());
}
intervals_to_relax.insert(intervals_to_relax.end(),
partition.indices_before_selected.begin(),
partition.indices_before_selected.end());
AppendVarsFromAllIntervalIndices(partition.indices_before_selected,
active_intervals, helper_.ModelProto(),
&variables_to_fix);
}
}

View File

@@ -204,18 +204,11 @@ class NeighborhoodGeneratorHelper : public SubSolver {
return absl::MakeSpan(type_to_constraints_[type]);
}
// Checks if an interval is active w.r.t. the initial_solution.
// An interval is inactive if and only if it is either unperformed in the
// solution or constant in the model.
bool IntervalIsActive(int index,
const CpSolverResponse& initial_solution) const
ABSL_SHARED_LOCKS_REQUIRED(domain_mutex_);
// Filters a vector of intervals against the initial_solution, and returns
// only the active intervals.
void FilterInactiveIntervals(absl::Span<const int> unfiltered_intervals,
const CpSolverResponse& initial_solution,
std::vector<int>* filtered_intervals) const;
std::vector<int> KeepActiveIntervals(
absl::Span<const int> unfiltered_intervals,
const CpSolverResponse& initial_solution) const;
// Returns the list of indices of active interval constraints according
// to the initial_solution and the parameter lns_focus_on_performed_intervals.
@@ -291,6 +284,13 @@ class NeighborhoodGeneratorHelper : public SubSolver {
bool ObjectiveDomainIsConstraining() const
ABSL_SHARED_LOCKS_REQUIRED(domain_mutex_);
// Checks if an interval is active w.r.t. the initial_solution.
// An interval is inactive if and only if it is either unperformed in the
// solution or constant in the model.
bool IntervalIsActive(int index,
const CpSolverResponse& initial_solution) const
ABSL_SHARED_LOCKS_REQUIRED(domain_mutex_);
const SatParameters& parameters_;
const CpModelProto& model_proto_;
int shared_bounds_id_;

View File

@@ -573,7 +573,6 @@ absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
{
SatParameters new_params = base_params;
new_params.set_linearization_level(2);
new_params.set_use_objective_shaving_search(true);
new_params.set_cp_model_presolve(true);
new_params.set_cp_model_probing_level(0);
@@ -581,7 +580,14 @@ absl::flat_hash_map<std::string, SatParameters> GetNamedParameters(
if (base_params.use_dual_scheduling_heuristics()) {
AddDualSchedulingHeuristics(new_params);
}
strategies["objective_shaving_search"] = new_params;
new_params.set_linearization_level(0);
strategies["objective_shaving_search_no_lp"] = new_params;
new_params.set_linearization_level(2);
strategies["objective_shaving_search_max_lp"] = new_params;
}
{
@@ -696,44 +702,27 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
names.push_back(name);
}
const int num_workers_to_generate =
base_params.num_workers() - base_params.shared_tree_num_workers();
// We use the default if empty.
if (base_params.subsolvers().empty()) {
// Note that the order is important as the list can be truncated.
names.push_back("default_lp");
names.push_back("fixed");
names.push_back("core");
names.push_back("less_encoding");
names.push_back("no_lp");
names.push_back("max_lp");
names.push_back("reduced_costs");
names.push_back("pseudo_costs");
names.push_back("quick_restart");
names.push_back("reduced_costs");
names.push_back("quick_restart_no_lp");
names.push_back("pseudo_costs");
names.push_back("lb_tree_search");
names.push_back("probing");
names.push_back("objective_lb_search");
names.push_back("objective_shaving_search_no_lp");
names.push_back("objective_shaving_search_max_lp");
names.push_back("probing_max_lp");
names.push_back("objective_lb_search_no_lp");
names.push_back("objective_lb_search_max_lp");
// Do not add objective_lb_search if core is active and num_workers <= 16.
if (cp_model.has_objective() &&
(cp_model.objective().vars().size() == 1 || // core is not active
num_workers_to_generate >= 18)) {
names.push_back("objective_lb_search");
}
if (num_workers_to_generate >= 20) {
names.push_back("objective_lb_search_no_lp");
}
if (num_workers_to_generate >= 22) {
names.push_back("objective_shaving_search");
}
if (num_workers_to_generate >= 24) {
names.push_back("probing_max_lp");
}
if (num_workers_to_generate >= 28) {
names.push_back("objective_lb_search_max_lp");
}
#if !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP)
if (absl::GetFlag(FLAGS_cp_model_use_max_hs)) names.push_back("max_hs");
#endif // !defined(__PORTABLE_PLATFORM__) && defined(USE_SCIP)
@@ -824,34 +813,40 @@ std::vector<SatParameters> GetDiverseSetOfParameters(
result.push_back(params);
}
// In interleaved mode, we run all of them
// TODO(user): Actually make sure the gap num_workers <-> num_heuristics is
// contained.
if (base_params.interleave_search()) return result;
const int num_non_shared_workers = std::max(
0, base_params.num_workers() - base_params.shared_tree_num_workers());
if (cp_model.has_objective() && !cp_model.objective().vars().empty()) {
const auto heuristic_num_workers = [](int num_workers) {
DCHECK_GE(num_workers, 0);
if (num_workers == 1) return 1;
if (num_workers <= 4) return num_workers - 1;
if (num_workers <= 8) return num_workers - 2;
if (num_workers <= 16) return num_workers - (num_workers / 4 + 1);
return num_workers - (num_workers / 2 - 3);
};
const int target = std::min<int>(
heuristic_num_workers(num_non_shared_workers), result.size());
// If there is an objective, the extra workers will use LNS.
// Make sure we have at least min_num_lns_workers() of them.
const int target =
std::max(base_params.shared_tree_num_workers() > 0 ? 0 : 1,
num_workers_to_generate - base_params.min_num_lns_workers());
if (!base_params.interleave_search() && result.size() > target) {
result.resize(target);
}
} else {
if (result.size() > target) result.resize(target);
} else { // No objective.
// If strategies that do not require a full worker are present, leave a
// few workers for them.
const bool need_extra_workers =
!base_params.interleave_search() &&
(base_params.use_rins_lns() || base_params.use_feasibility_pump());
int target = num_workers_to_generate;
if (need_extra_workers && target > 4) {
if (target <= 8) {
target -= 1;
} else if (target == 9) {
target -= 2;
} else {
target -= 3;
}
}
if (!base_params.interleave_search() && result.size() > target) {
result.resize(target);
}
// Currently, we have 8 SAT search heuristics. So
const int num_extra_workers =
num_non_shared_workers <= 4 ? 0 : 1 + need_extra_workers;
const int target = std::min<int>(num_non_shared_workers - num_extra_workers,
result.size());
if (result.size() > target) result.resize(target);
}
return result;
}

View File

@@ -126,11 +126,6 @@ ABSL_FLAG(bool, cp_model_dump_models, false,
"format to 'FLAGS_cp_model_dump_prefix'{model|presolved_model|"
"mapping_model}.pb.txt.");
ABSL_FLAG(bool, cp_model_export_model, false,
"DEBUG ONLY. When set to true, SolveCpModel() will dump the model "
"proto of the original model format to "
"'FLAGS_cp_model_dump_prefix'{name}.pb.txt.");
ABSL_FLAG(bool, cp_model_dump_text_proto, true,
"DEBUG ONLY, dump models in text proto instead of binary proto.");
@@ -1537,6 +1532,7 @@ void LoadBaseModel(const CpModelProto& model_proto, Model* model) {
model->GetOrCreate<ProductDetector>()->ProcessImplicationGraph(
model->GetOrCreate<BinaryImplicationGraph>());
model->GetOrCreate<PrecedenceRelations>()->Build();
}
void LoadFeasibilityPump(const CpModelProto& model_proto, Model* model) {
@@ -3051,18 +3047,6 @@ class LnsSolver : public SubSolver {
if (!neighborhood.is_generated) return;
const int64_t num_calls = std::max(int64_t{1}, generator_->num_calls());
const double fully_solved_proportion =
static_cast<double>(generator_->num_fully_solved_calls()) /
static_cast<double>(num_calls);
std::string source_info =
neighborhood.source_info.empty() ? name() : neighborhood.source_info;
const std::string lns_info =
absl::StrFormat("%s(d=%0.2f s=%i t=%0.2f p=%0.2f stall=%d)",
source_info, data.difficulty, task_id,
data.deterministic_limit, fully_solved_proportion,
generator_->num_consecutive_non_improving_calls());
SatParameters local_params(parameters_);
local_params.set_max_deterministic_time(data.deterministic_limit);
local_params.set_stop_after_first_solution(false);
@@ -3072,8 +3056,35 @@ class LnsSolver : public SubSolver {
local_params.set_symmetry_level(0);
local_params.set_find_big_linear_overlap(false);
local_params.set_solution_pool_size(1); // Keep the best solution found.
local_params.set_search_branching(SatParameters::PORTFOLIO_SEARCH);
local_params.set_search_random_variable_pool_size(5);
// TODO(user): Tune these.
// TODO(user): This could be a good candidate for bandits.
const int64_t stall = generator_->num_consecutive_non_improving_calls();
const int search_index = stall < 10 ? 0 : task_id % 2;
absl::string_view search_info;
switch (search_index) {
case 0:
local_params.set_search_branching(SatParameters::AUTOMATIC_SEARCH);
local_params.set_linearization_level(0);
search_info = "auto_l0";
break;
default:
local_params.set_search_branching(SatParameters::PORTFOLIO_SEARCH);
local_params.set_search_random_variable_pool_size(5);
search_info = "folio_rnd";
break;
}
std::string source_info =
neighborhood.source_info.empty() ? name() : neighborhood.source_info;
const int64_t num_calls = std::max(int64_t{1}, generator_->num_calls());
const double fully_solved_proportion =
static_cast<double>(generator_->num_fully_solved_calls()) /
static_cast<double>(num_calls);
const std::string lns_info = absl::StrFormat(
"%s(d=%0.2f s=%i t=%0.2f p=%0.2f stall=%d h=%s)", source_info,
data.difficulty, task_id, data.deterministic_limit,
fully_solved_proportion, stall, search_info);
Model local_model(lns_info);
*(local_model.GetOrCreate<SatParameters>()) = local_params;
@@ -3432,7 +3443,7 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
int num_full_problem_solvers = 0;
if (testing) {
// Register something to find a first solution. Note that this is mainly
// used for experimentation, and using no LP ususally result in a faster
// used for experimentation, and using no_lp usually result in a faster
// first solution.
//
// TODO(user): merge code with standard solver. Just make sure that all
@@ -3531,7 +3542,7 @@ void SolveCpModelParallel(const CpModelProto& model_proto,
if (!model_proto.has_objective() || model_proto.objective().vars().empty() ||
!params.interleave_search() || params.test_feasibility_jump()) {
const int max_num_incomplete_solvers_running_before_the_first_solution =
params.num_workers() <= 8 ? 1 : (params.num_workers() <= 16 ? 2 : 3);
params.num_workers() <= 16 ? 1 : 2;
const int num_reserved_incomplete_solvers =
params.test_feasibility_jump()
? 0
@@ -3980,10 +3991,6 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) {
if (absl::GetFlag(FLAGS_cp_model_dump_models)) {
DumpModelProto(model_proto, "model");
}
if (absl::GetFlag(FLAGS_cp_model_export_model)) {
DumpModelProto(model_proto,
model_proto.name().empty() ? "model" : model_proto.name());
}
#endif // __PORTABLE_PLATFORM__
#if !defined(__PORTABLE_PLATFORM__)

View File

@@ -32,6 +32,7 @@
#include "ortools/sat/intervals.h"
#include "ortools/sat/linear_constraint.h"
#include "ortools/sat/model.h"
#include "ortools/sat/precedences.h"
#include "ortools/sat/sat_base.h"
#include "ortools/sat/sat_parameters.pb.h"
#include "ortools/sat/timetable.h"
@@ -282,8 +283,12 @@ NonOverlappingRectanglesDisjunctivePropagator::
x_(x->NumTasks(), model),
watcher_(model->GetOrCreate<GenericLiteralWatcher>()),
overload_checker_(&x_),
forward_detectable_precedences_(true, &x_),
backward_detectable_precedences_(false, &x_),
forward_detectable_precedences_(
true, model->GetOrCreate<PrecedenceRelations>(),
model->GetOrCreate<PrecedencesPropagator>(), &x_),
backward_detectable_precedences_(
false, model->GetOrCreate<PrecedenceRelations>(),
model->GetOrCreate<PrecedencesPropagator>(), &x_),
forward_not_last_(true, &x_),
backward_not_last_(false, &x_),
forward_edge_finding_(true, &x_),

View File

@@ -19,6 +19,7 @@
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "ortools/base/logging.h"
#include "ortools/sat/all_different.h"
#include "ortools/sat/integer.h"
#include "ortools/sat/integer_expr.h"
@@ -102,7 +103,9 @@ std::function<void(Model*)> Disjunctive(
}
for (const bool time_direction : {true, false}) {
DisjunctiveDetectablePrecedences* detectable_precedences =
new DisjunctiveDetectablePrecedences(time_direction, helper);
new DisjunctiveDetectablePrecedences(
time_direction, model->GetOrCreate<PrecedenceRelations>(),
model->GetOrCreate<PrecedencesPropagator>(), helper);
const int id = detectable_precedences->RegisterWith(watcher);
watcher->SetPropagatorPriority(id, 2);
model->TakeOwnership(detectable_precedences);
@@ -867,25 +870,71 @@ bool DisjunctiveDetectablePrecedences::PropagateSubwindow() {
// We need:
// - StartMax(ct) < EndMin(t) for the detectable precedence.
// - StartMin(ct) >= window_start for the value of task_set_end_min.
const AffineExpression after = helper_->Starts()[t];
const IntegerValue end_min_if_present =
helper_->ShiftedStartMin(t) + helper_->SizeMin(t);
const IntegerValue window_start =
sorted_tasks[critical_index].start_min;
IntegerValue min_slack = kMaxIntegerValue;
bool all_statically_before = true;
for (int i = critical_index; i < sorted_tasks.size(); ++i) {
const int ct = sorted_tasks[i].task;
DCHECK_NE(ct, t);
helper_->AddPresenceReason(ct);
helper_->AddEnergyAfterReason(ct, sorted_tasks[i].size_min,
window_start);
helper_->AddStartMaxReason(ct, end_min_if_present - 1);
// We only need the reason for being before if we don't already have
// a static precedence between the tasks.
const AffineExpression before = helper_->Ends()[ct];
const IntegerValue needed = before.constant - after.constant;
const IntegerValue known =
precedence_relations_->GetOffset(before.var, after.var);
if (known < needed || before.coeff != 1 || after.coeff != 1) {
all_statically_before = false;
helper_->AddStartMaxReason(ct, end_min_if_present - 1);
} else {
min_slack = std::min(min_slack, known - needed);
}
}
// Add the reason for t (we only need the end-min).
helper_->AddEndMinReason(t, end_min_if_present);
// We only need the end-min of t if not all the task are already known
// to be before.
IntegerValue new_start_min = task_set_end_min;
if (all_statically_before) {
// We can actually push further !
new_start_min += min_slack;
} else {
helper_->AddEndMinReason(t, end_min_if_present);
}
// If we detect precedences at level zero, lets add them to the
// precedence propagator. The hope is that this leads to faster
// propagation with better reason if they ever trigger.
if (helper_->CurrentDecisionLevel() == 0 && helper_->IsPresent(t)) {
if (after.coeff == 1 && after.var != kNoIntegerVariable) {
for (int i = critical_index; i < sorted_tasks.size(); ++i) {
const AffineExpression before =
helper_->Ends()[sorted_tasks[i].task];
if (before.coeff == 1 && before.var != kNoIntegerVariable) {
const IntegerValue offset = before.constant - after.constant;
precedence_relations_->UpdateOffset(before.var, after.var,
offset);
if (precedences_->AddPrecedenceWithOffsetIfNew(
before.var, after.var, offset)) {
VLOG(2) << t << " after #"
<< sorted_tasks.size() - critical_index << " "
<< before.DebugString() << " " << after.DebugString()
<< " " << offset;
}
}
}
}
}
// This augment the start-min of t. Note that t is not in task set
// yet, so we will use this updated start if we ever add it there.
if (!helper_->IncreaseStartMin(t, task_set_end_min)) {
if (!helper_->IncreaseStartMin(t, new_start_min)) {
return false;
}

View File

@@ -159,8 +159,12 @@ class DisjunctiveOverloadChecker : public PropagatorInterface {
class DisjunctiveDetectablePrecedences : public PropagatorInterface {
public:
DisjunctiveDetectablePrecedences(bool time_direction,
PrecedenceRelations* relations,
PrecedencesPropagator* precedences,
SchedulingConstraintHelper* helper)
: time_direction_(time_direction),
precedence_relations_(relations),
precedences_(precedences),
helper_(helper),
task_set_(helper->NumTasks()) {}
bool Propagate() final;
@@ -176,6 +180,8 @@ class DisjunctiveDetectablePrecedences : public PropagatorInterface {
std::vector<int> to_propagate_;
const bool time_direction_;
PrecedenceRelations* precedence_relations_;
PrecedencesPropagator* precedences_;
SchedulingConstraintHelper* helper_;
TaskSet task_set_;
};

View File

@@ -433,6 +433,8 @@ class SchedulingConstraintHelper : public PropagatorInterface,
// not handle this correctly.
bool InPropagationLoop() const { return integer_trail_->InPropagationLoop(); }
int CurrentDecisionLevel() const { return trail_->CurrentDecisionLevel(); }
private:
// Generic reason for a <= upper_bound, given that a = b + c in case the
// current upper bound of a is not good enough.

View File

@@ -71,7 +71,7 @@ void PrecedenceRelations::Add(IntegerVariable tail, IntegerVariable head,
}
void PrecedenceRelations::Build() {
CHECK(!is_built_);
if (is_built_) return;
is_built_ = true;
std::vector<int> permutation;
@@ -103,6 +103,49 @@ void PrecedenceRelations::Build() {
}
}
is_dag_ = !graph_has_cycle;
// Lets build full precedences if we don't have too many of them.
// TODO(user): Also do that if we don't have a DAG?
if (!is_dag_) return;
int work = 0;
const int kWorkLimit = 1e6;
absl::StrongVector<IntegerVariable, std::vector<IntegerVariable>> before(
graph_.num_nodes());
const auto add = [&before, this](IntegerVariable a, IntegerVariable b,
IntegerValue offset) {
const auto [it, inserted] = all_relations_.insert({{a, b}, offset});
if (inserted) {
before[b].push_back(a);
} else {
it->second = std::max(it->second, offset);
}
};
// TODO(user): We probably do not need to do both var and its negation.
for (const IntegerVariable tail_var : topological_order_) {
if (++work > kWorkLimit) break;
for (const int arc : graph_.OutgoingArcs(tail_var.value())) {
CHECK_EQ(tail_var.value(), graph_.Tail(arc));
const IntegerVariable head_var = IntegerVariable(graph_.Head(arc));
const IntegerValue arc_offset = arc_offset_[arc];
if (++work > kWorkLimit) break;
add(tail_var, head_var, arc_offset);
add(NegationOf(head_var), NegationOf(tail_var), -arc_offset);
for (const IntegerVariable before_var : before[tail_var]) {
if (++work > kWorkLimit) break;
const IntegerValue offset =
all_relations_.at({before_var, tail_var}) + arc_offset;
add(before_var, head_var, offset);
add(NegationOf(head_var), NegationOf(before_var), -offset);
}
}
}
VLOG(2) << "Full precedences. Work=" << work
<< " Relations=" << all_relations_.size();
}
void PrecedenceRelations::ComputeFullPrecedences(
@@ -589,6 +632,29 @@ void PrecedencesPropagator::AddArc(
}
}
bool PrecedencesPropagator::AddPrecedenceWithOffsetIfNew(IntegerVariable i1,
IntegerVariable i2,
IntegerValue offset) {
DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
if (i1 < impacted_arcs_.size() && i2 < impacted_arcs_.size()) {
for (const ArcIndex index : impacted_arcs_[i1]) {
const ArcInfo& arc = arcs_[index];
if (arc.head_var == i2) {
const IntegerValue current = ArcOffset(arc);
if (offset <= current) {
return false;
} else {
// TODO(user): Modify arc in place!
}
break;
}
}
}
AddPrecedenceWithOffset(i1, i2, offset);
return true;
}
// TODO(user): On jobshop problems with a lot of tasks per machine (500), this
// takes up a big chunk of the running time even before we find a solution.
// This is because, for each lower bound changed, we inspect 500 arcs even

View File

@@ -78,9 +78,40 @@ class PrecedenceRelations {
void ComputeFullPrecedences(const std::vector<IntegerVariable>& vars,
std::vector<FullIntegerPrecedence>* output);
private:
// If we don't have too many variable, we compute the full transtive closure
// and can query in O(1) if there is a relation between two variables.
// This can be used to optimize some scheduling propagation and reasons.
//
// Warning: If we there are too many, this will NOT contain all relations.
//
// Returns kMinIntegerValue if there are none.
// Otherwise a + offset <= b.
IntegerValue GetOffset(IntegerVariable a, IntegerVariable b) {
const auto it = all_relations_.find({a, b});
return it == all_relations_.end() ? kMinIntegerValue : it->second;
}
// Update the hash table of precedence relation.
void UpdateOffset(IntegerVariable a, IntegerVariable b, IntegerValue offset) {
InternalUpdate(a, b, offset);
InternalUpdate(NegationOf(b), NegationOf(a), -offset);
}
// The current code requires the internal data to be processed once all
// relations are loaded.
//
// TODO(user): Be more dynamic as we start to add relations during search.
void Build();
private:
void InternalUpdate(IntegerVariable a, IntegerVariable b,
IntegerValue offset) {
const auto [it, inserted] = all_relations_.insert({{a, b}, offset});
if (!inserted) {
it->second = std::max(it->second, offset);
}
}
IntegerTrail* integer_trail_;
util::StaticGraph<> graph_;
@@ -89,6 +120,9 @@ class PrecedenceRelations {
bool is_built_ = false;
bool is_dag_ = false;
std::vector<IntegerVariable> topological_order_;
absl::flat_hash_map<std::pair<IntegerVariable, IntegerVariable>, IntegerValue>
all_relations_;
};
// This class implement a propagator on simple inequalities between integer
@@ -159,6 +193,10 @@ class PrecedencesPropagator : public SatPropagator, PropagatorInterface {
IntegerVariable offset_var,
absl::Span<const Literal> presence_literals);
// This version check current precedence. It is however "slow".
bool AddPrecedenceWithOffsetIfNew(IntegerVariable i1, IntegerVariable i2,
IntegerValue offset);
// Finds all the IntegerVariable that are "after" at least two of the
// IntegerVariable in vars. Returns a vector of these precedences relation
// sorted by IntegerPrecedences.var so that it is efficient to find all the

View File

@@ -567,11 +567,7 @@ message SatParameters {
optional int32 num_workers = 206 [default = 0];
optional int32 num_search_workers = 100 [default = 0];
// If there is an objective and we are not in interleave mode, we will reserve
// at least this number of worker for LNS thread.
//
// TODO(user): Also define like for subsolvers the list of "active" type of
// neighborhood used.
// Obsolete parameter. No-op.
optional int32 min_num_lns_workers = 211 [default = 2];
// In multi-thread, the solver can be mainly seen as a portfolio of solvers
@@ -582,7 +578,6 @@ message SatParameters {
// left empty) that looks like:
// - default_lp (linearization_level:1)
// - fixed (only if fixed search specified or scheduling)
// - less_encoding (only if no objective)
// - no_lp (linearization_level:0)
// - max_lp (linearization_level:2)
// - pseudo_costs (only if objective, change search heuristic)
@@ -618,6 +613,9 @@ message SatParameters {
// possible to overwrite the default names above.
repeated SatParameters subsolver_params = 210;
// TODO(user): Also define like for subsolvers the list of "active"
// type of neighborhood used.
// Experimental. If this is true, then we interleave all our major search
// strategy and distribute the work amongst num_workers.
//
@@ -1278,7 +1276,7 @@ message SatParameters {
// Experimental code: specify if the objective pushes all tasks toward the
// start of the schedule.
optional bool push_all_tasks_toward_start = 262 [default = true];
optional bool push_all_tasks_toward_start = 262 [default = false];
// If true, we automatically detect variables whose constraint are always
// enforced by the same literal and we mark them as optional. This allows

View File

@@ -28,6 +28,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "ortools/base/logging.h"
#include "ortools/base/timer.h"
#if !defined(__PORTABLE_PLATFORM__)
#include "ortools/base/threadpool.h"
#endif // __PORTABLE_PLATFORM__
@@ -88,7 +89,11 @@ void SequentialLoop(std::vector<std::unique_ptr<SubSolver>>& subsolvers) {
const int best = NextSubsolverToSchedule(subsolvers, num_generated_tasks);
if (best == -1) break;
num_generated_tasks[best]++;
WallTimer timer;
timer.Start();
subsolvers[best]->GenerateTask(task_id++)();
subsolvers[best]->AddTaskDuration(timer.Get());
}
}