reduce memory usage in glop preprocessor

This commit is contained in:
Laurent Perron
2021-03-11 21:05:52 +01:00
parent c3acf7bff4
commit e14760e4b2
3 changed files with 326 additions and 143 deletions

View File

@@ -555,6 +555,11 @@ void LPSolver::RunRevisedSimplexIfNeeded(ProblemSolution* solution,
TimeLimit* time_limit) {
// Note that the transpose matrix is no longer needed at this point.
// This helps reduce the peak memory usage of the solver.
//
// TODO(user): actually, once the linear_program is loaded into the internal
// glop memory, there is no point keeping it around. Add a more complex
// Load/Solve API to RevisedSimplex so we can completely reclaim its memory
// right away.
current_linear_program_.ClearTransposeMatrix();
if (solution->status != ProblemStatus::INIT) return;
if (revised_simplex_ == nullptr) {

View File

@@ -189,6 +189,31 @@ void MainLpPreprocessor::RecoverSolution(ProblemSolution* solution) const {
// ColumnDeletionHelper
// --------------------------------------------------------
void ColumnsSaver::SaveColumn(ColIndex col, const SparseColumn& column) {
const int index = saved_columns_.size();
CHECK(saved_columns_index_.insert({col, index}).second);
saved_columns_.push_back(column);
}
void ColumnsSaver::SaveColumnIfNotAlreadyDone(ColIndex col,
const SparseColumn& column) {
const int index = saved_columns_.size();
const bool inserted = saved_columns_index_.insert({col, index}).second;
if (inserted) saved_columns_.push_back(column);
}
const SparseColumn& ColumnsSaver::SavedColumn(ColIndex col) const {
const auto it = saved_columns_index_.find(col);
CHECK(it != saved_columns_index_.end());
return saved_columns_[it->second];
}
const SparseColumn& ColumnsSaver::SavedOrEmptyColumn(ColIndex col) const {
const auto it = saved_columns_index_.find(col);
return it == saved_columns_index_.end() ? empty_column_
: saved_columns_[it->second];
}
void ColumnDeletionHelper::Clear() {
is_column_deleted_.clear();
stored_value_.clear();
@@ -408,11 +433,13 @@ namespace {
void SubtractColumnMultipleFromConstraintBound(ColIndex col,
Fractional multiple,
LinearProgram* lp) {
DenseColumn* lbs = lp->mutable_constraint_lower_bounds();
DenseColumn* ubs = lp->mutable_constraint_upper_bounds();
for (const SparseColumn::Entry e : lp->GetSparseColumn(col)) {
const RowIndex row = e.row();
const Fractional delta = multiple * e.coefficient();
lp->SetConstraintBounds(row, lp->constraint_lower_bounds()[row] - delta,
lp->constraint_upper_bounds()[row] - delta);
(*lbs)[row] -= delta;
(*ubs)[row] -= delta;
}
// While not needed for correctness, this allows the presolved problem to
// have the same objective value as the original one.
@@ -1157,7 +1184,6 @@ bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram* lp) {
if (num_forcing_constraints > 0) {
VLOG(1) << num_forcing_constraints << " forcing constraints.";
lp_is_maximization_problem_ = lp->IsMaximizationProblem();
deleted_columns_.PopulateFromZero(num_rows, num_cols);
costs_.resize(num_cols, 0.0);
for (ColIndex col(0); col < num_cols; ++col) {
const SparseColumn& column = lp->GetSparseColumn(col);
@@ -1214,7 +1240,7 @@ bool ForcingAndImpliedFreeConstraintPreprocessor::Run(LinearProgram* lp) {
column_deletion_helper_.MarkColumnForDeletionWithState(
col, target_bound,
ComputeVariableStatus(target_bound, lower, upper));
deleted_columns_.mutable_column(col)->PopulateFromSparseVector(column);
columns_saver_.SaveColumn(col, column);
costs_[col] = lp->objective_coefficients()[col];
}
}
@@ -1243,19 +1269,39 @@ void ForcingAndImpliedFreeConstraintPreprocessor::RecoverSolution(
column_deletion_helper_.RestoreDeletedColumns(solution);
row_deletion_helper_.RestoreDeletedRows(solution);
struct DeletionEntry {
RowIndex row;
ColIndex col;
Fractional coefficient;
};
std::vector<DeletionEntry> entries;
// Compute for each deleted columns the last deleted row in which it appears.
const ColIndex num_cols = deleted_columns_.num_cols();
ColToRowMapping last_deleted_row(num_cols, kInvalidRow);
for (ColIndex col(0); col < num_cols; ++col) {
const ColIndex size = column_deletion_helper_.GetMarkedColumns().size();
for (ColIndex col(0); col < size; ++col) {
if (!column_deletion_helper_.IsColumnMarked(col)) continue;
for (const SparseColumn::Entry e : deleted_columns_.column(col)) {
RowIndex last_row = kInvalidRow;
Fractional last_coefficient;
for (const SparseColumn::Entry e : columns_saver_.SavedColumn(col)) {
const RowIndex row = e.row();
if (row_deletion_helper_.IsRowMarked(row)) {
last_deleted_row[col] = row;
last_row = row;
last_coefficient = e.coefficient();
}
}
if (last_row != kInvalidRow) {
entries.push_back({last_row, col, last_coefficient});
}
}
// Sort by row first and then col.
std::sort(entries.begin(), entries.end(),
[](const DeletionEntry& a, const DeletionEntry& b) {
if (a.row == b.row) return a.col < b.col;
return a.row < b.row;
});
// For each deleted row (in order), compute a bound on the dual values so
// that all the deleted columns for which this row is the last deleted row are
// dual-feasible. Note that for the other columns, it will always be possible
@@ -1266,39 +1312,39 @@ void ForcingAndImpliedFreeConstraintPreprocessor::RecoverSolution(
// reduced cost of 0.0. This column becomes VariableStatus::BASIC, and the
// constraint status is changed to ConstraintStatus::AT_LOWER_BOUND,
// ConstraintStatus::AT_UPPER_BOUND or ConstraintStatus::FIXED_VALUE.
SparseMatrix transpose;
transpose.PopulateFromTranspose(deleted_columns_);
const RowIndex num_rows = solution->dual_values.size();
for (RowIndex row(0); row < num_rows; ++row) {
if (row_deletion_helper_.IsRowMarked(row)) {
Fractional new_dual_value = 0.0;
ColIndex new_basic_column = kInvalidCol;
for (const SparseColumn::Entry e : transpose.column(RowToColIndex(row))) {
const ColIndex col = RowToColIndex(e.row());
if (last_deleted_row[col] != row) continue;
const Fractional scalar_product =
ScalarProduct(solution->dual_values, deleted_columns_.column(col));
const Fractional reduced_cost = costs_[col] - scalar_product;
const Fractional bound = reduced_cost / e.coefficient();
if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
if (bound < new_dual_value) {
new_dual_value = bound;
new_basic_column = col;
}
} else {
if (bound > new_dual_value) {
new_dual_value = bound;
new_basic_column = col;
}
for (int i = 0; i < entries.size();) {
const RowIndex row = entries[i].row;
DCHECK(row_deletion_helper_.IsRowMarked(row));
// Process column with this last deleted row.
Fractional new_dual_value = 0.0;
ColIndex new_basic_column = kInvalidCol;
for (; i < entries.size(); ++i) {
if (entries[i].row != row) break;
const ColIndex col = entries[i].col;
const Fractional scalar_product =
ScalarProduct(solution->dual_values, columns_saver_.SavedColumn(col));
const Fractional reduced_cost = costs_[col] - scalar_product;
const Fractional bound = reduced_cost / entries[i].coefficient;
if (is_forcing_up_[row] == !lp_is_maximization_problem_) {
if (bound < new_dual_value) {
new_dual_value = bound;
new_basic_column = col;
}
} else {
if (bound > new_dual_value) {
new_dual_value = bound;
new_basic_column = col;
}
}
if (new_basic_column != kInvalidCol) {
solution->dual_values[row] = new_dual_value;
solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
solution->constraint_statuses[row] =
is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
: ConstraintStatus::AT_LOWER_BOUND;
}
}
if (new_basic_column != kInvalidCol) {
solution->dual_values[row] = new_dual_value;
solution->variable_statuses[new_basic_column] = VariableStatus::BASIC;
solution->constraint_statuses[row] =
is_forcing_up_[row] ? ConstraintStatus::AT_UPPER_BOUND
: ConstraintStatus::AT_LOWER_BOUND;
}
}
}
@@ -1721,12 +1767,7 @@ bool IsConstraintBlockingVariable(const LinearProgram& lp, Fractional direction,
void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable(
ColIndex col, Fractional target_bound, LinearProgram* lp) {
DCHECK_EQ(0.0, lp->objective_coefficients()[col]);
if (deleted_rows_as_column_.IsEmpty()) {
deleted_columns_.PopulateFromZero(lp->num_constraints(),
lp->num_variables());
deleted_rows_as_column_.PopulateFromZero(
ColToRowIndex(lp->num_variables()),
RowToColIndex(lp->num_constraints()));
if (rhs_.empty()) {
rhs_.resize(lp->num_constraints(), 0.0);
activity_sign_correction_.resize(lp->num_constraints(), 1.0);
is_unbounded_.resize(lp->num_variables(), false);
@@ -1737,10 +1778,9 @@ void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable(
const RowIndex row = e.row();
if (!row_deletion_helper_.IsRowMarked(row)) {
row_deletion_helper_.MarkRowForDeletion(row);
const ColIndex row_as_col = RowToColIndex(row);
deleted_rows_as_column_.mutable_column(row_as_col)
->PopulateFromSparseVector(
lp->GetTransposeSparseMatrix().column(row_as_col));
rows_saver_.SaveColumn(
RowToColIndex(row),
lp->GetTransposeSparseMatrix().column(RowToColIndex(row)));
}
const bool is_constraint_upper_bound_relevant =
e.coefficient() > 0.0 ? !is_unbounded_up : is_unbounded_up;
@@ -1757,7 +1797,6 @@ void UnconstrainedVariablePreprocessor::RemoveZeroCostUnconstrainedVariable(
is_unbounded_[col] = true;
Fractional initial_feasible_value = MinInMagnitudeOrZeroIfInfinite(
lp->variable_lower_bounds()[col], lp->variable_upper_bounds()[col]);
deleted_columns_.mutable_column(col)->PopulateFromSparseVector(column);
column_deletion_helper_.MarkColumnForDeletionWithState(
col, initial_feasible_value,
ComputeVariableStatus(initial_feasible_value,
@@ -2000,37 +2039,62 @@ void UnconstrainedVariablePreprocessor::RecoverSolution(
column_deletion_helper_.RestoreDeletedColumns(solution);
row_deletion_helper_.RestoreDeletedRows(solution);
struct DeletionEntry {
RowIndex row;
ColIndex col;
Fractional coefficient;
};
std::vector<DeletionEntry> entries;
// Compute the last deleted column index for each deleted rows.
const RowIndex num_rows = solution->dual_values.size();
RowToColMapping last_deleted_column(num_rows, kInvalidCol);
for (RowIndex row(0); row < num_rows; ++row) {
if (row_deletion_helper_.IsRowMarked(row)) {
for (const SparseColumn::Entry e :
deleted_rows_as_column_.column(RowToColIndex(row))) {
const ColIndex col = RowToColIndex(e.row());
if (is_unbounded_[col]) {
last_deleted_column[row] = col;
}
if (!row_deletion_helper_.IsRowMarked(row)) continue;
ColIndex last_col = kInvalidCol;
Fractional last_coefficient;
for (const SparseColumn::Entry e :
rows_saver_.SavedColumn(RowToColIndex(row))) {
const ColIndex col = RowToColIndex(e.row());
if (is_unbounded_[col]) {
last_col = col;
last_coefficient = e.coefficient();
}
}
if (last_col != kInvalidCol) {
entries.push_back({row, last_col, last_coefficient});
}
}
// Sort by col first and then row.
std::sort(entries.begin(), entries.end(),
[](const DeletionEntry& a, const DeletionEntry& b) {
if (a.col == b.col) return a.row < b.row;
return a.col < b.col;
});
// Note that this will be empty if there were no deleted rows.
const ColIndex num_cols = is_unbounded_.size();
for (ColIndex col(0); col < num_cols; ++col) {
if (!is_unbounded_[col]) continue;
for (int i = 0; i < entries.size();) {
const ColIndex col = entries[i].col;
CHECK(is_unbounded_[col]);
Fractional primal_value_shift = 0.0;
RowIndex row_at_bound = kInvalidRow;
for (const SparseColumn::Entry e : deleted_columns_.column(col)) {
const RowIndex row = e.row();
// The second condition is for VariableStatus::FREE rows.
for (; i < entries.size(); ++i) {
if (entries[i].col != col) break;
const RowIndex row = entries[i].row;
// This is for VariableStatus::FREE rows.
//
// TODO(user): In presense of free row, we must move them to 0.
// Note that currently VariableStatus::FREE rows should be removed before
// this is called.
DCHECK(IsFinite(rhs_[row]));
if (last_deleted_column[row] != col || !IsFinite(rhs_[row])) continue;
if (!IsFinite(rhs_[row])) continue;
const SparseColumn& row_as_column =
deleted_rows_as_column_.column(RowToColIndex(row));
rows_saver_.SavedColumn(RowToColIndex(row));
const Fractional activity =
rhs_[row] - ScalarProduct(solution->primal_values, row_as_column);
@@ -2039,7 +2103,7 @@ void UnconstrainedVariablePreprocessor::RecoverSolution(
// Note that by construction, the variable value will move towards its
// unbounded direction.
if (activity * activity_sign_correction_[row] < 0.0) {
const Fractional bound = activity / e.coefficient();
const Fractional bound = activity / entries[i].coefficient;
if (std::abs(bound) > std::abs(primal_value_shift)) {
primal_value_shift = bound;
row_at_bound = row;
@@ -2148,18 +2212,18 @@ SingletonUndo::SingletonUndo(OperationType type, const LinearProgram& lp,
constraint_status_(status) {}
void SingletonUndo::Undo(const GlopParameters& parameters,
const SparseMatrix& deleted_columns,
const SparseMatrix& deleted_rows,
const SparseColumn& saved_column,
const SparseColumn& saved_row,
ProblemSolution* solution) const {
switch (type_) {
case SINGLETON_ROW:
SingletonRowUndo(deleted_columns, solution);
SingletonRowUndo(saved_column, solution);
break;
case ZERO_COST_SINGLETON_COLUMN:
ZeroCostSingletonColumnUndo(parameters, deleted_rows, solution);
ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
break;
case SINGLETON_COLUMN_IN_EQUALITY:
SingletonColumnInEqualityUndo(parameters, deleted_rows, solution);
SingletonColumnInEqualityUndo(parameters, saved_row, solution);
break;
case MAKE_CONSTRAINT_AN_EQUALITY:
MakeConstraintAnEqualityUndo(solution);
@@ -2213,16 +2277,13 @@ void SingletonPreprocessor::DeleteSingletonRow(MatrixEntry e,
row_deletion_helper_.MarkRowForDeletion(e.row);
undo_stack_.push_back(SingletonUndo(SingletonUndo::SINGLETON_ROW, *lp, e,
ConstraintStatus::FREE));
if (deleted_columns_.column(e.col).IsEmpty()) {
deleted_columns_.mutable_column(e.col)->PopulateFromSparseVector(
lp->GetSparseColumn(e.col));
}
columns_saver_.SaveColumnIfNotAlreadyDone(e.col, lp->GetSparseColumn(e.col));
lp->SetVariableBounds(e.col, new_lower_bound, new_upper_bound);
}
// The dual value of the row needs to be corrected to stay at the optimal.
void SingletonUndo::SingletonRowUndo(const SparseMatrix& deleted_columns,
void SingletonUndo::SingletonRowUndo(const SparseColumn& saved_column,
ProblemSolution* solution) const {
DCHECK_EQ(0, solution->dual_values[e_.row]);
@@ -2248,8 +2309,7 @@ void SingletonUndo::SingletonRowUndo(const SparseMatrix& deleted_columns,
// This is the reduced cost of the variable before the singleton constraint is
// added back.
const Fractional reduced_cost =
cost_ -
ScalarProduct(solution->dual_values, deleted_columns.column(e_.col));
cost_ - ScalarProduct(solution->dual_values, saved_column);
const Fractional reduced_cost_for_minimization =
is_maximization_ ? -reduced_cost : reduced_cost;
@@ -2345,20 +2405,17 @@ bool SingletonPreprocessor::IntegerSingletonColumnIsRemovable(
void SingletonPreprocessor::DeleteZeroCostSingletonColumn(
const SparseMatrix& transpose, MatrixEntry e, LinearProgram* lp) {
const ColIndex transpose_col = RowToColIndex(e.row);
const SparseColumn& column = transpose.column(transpose_col);
undo_stack_.push_back(SingletonUndo(SingletonUndo::ZERO_COST_SINGLETON_COLUMN,
*lp, e, ConstraintStatus::FREE));
if (deleted_rows_.column(transpose_col).IsEmpty()) {
deleted_rows_.mutable_column(transpose_col)
->PopulateFromSparseVector(column);
}
const SparseColumn& row_as_col = transpose.column(transpose_col);
rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_col);
UpdateConstraintBoundsWithVariableBounds(e, lp);
column_deletion_helper_.MarkColumnForDeletion(e.col);
}
// We need to restore the variable value in order to satisfy the constraint.
void SingletonUndo::ZeroCostSingletonColumnUndo(
const GlopParameters& parameters, const SparseMatrix& deleted_rows,
const GlopParameters& parameters, const SparseColumn& saved_row,
ProblemSolution* solution) const {
// If the variable was fixed, this is easy. Note that this is the only
// possible case if the current constraint status is FIXED.
@@ -2390,9 +2447,7 @@ void SingletonUndo::ZeroCostSingletonColumnUndo(
// This is the activity of the constraint before the singleton variable is
// added back to it.
const ColIndex row_as_col = RowToColIndex(e_.row);
const Fractional activity =
ScalarProduct(solution->primal_values, deleted_rows.column(row_as_col));
const Fractional activity = ScalarProduct(solution->primal_values, saved_row);
// First we try to fix the variable at its lower or upper bound and leave the
// constraint VariableStatus::BASIC. Note that we use the same logic as in
@@ -2478,8 +2533,7 @@ void SingletonPreprocessor::DeleteSingletonColumnInEquality(
undo_stack_.push_back(
SingletonUndo(SingletonUndo::SINGLETON_COLUMN_IN_EQUALITY, *lp, e,
ConstraintStatus::FREE));
deleted_rows_.mutable_column(transpose_col)
->PopulateFromSparseVector(row_as_column);
rows_saver_.SaveColumnIfNotAlreadyDone(RowToColIndex(e.row), row_as_column);
// Update the objective function using the equality constraint. We have
// v_col*coeff + expression = rhs,
@@ -2514,10 +2568,10 @@ void SingletonPreprocessor::DeleteSingletonColumnInEquality(
}
void SingletonUndo::SingletonColumnInEqualityUndo(
const GlopParameters& parameters, const SparseMatrix& deleted_rows,
const GlopParameters& parameters, const SparseColumn& saved_row,
ProblemSolution* solution) const {
// First do the same as a zero-cost singleton column.
ZeroCostSingletonColumnUndo(parameters, deleted_rows, solution);
ZeroCostSingletonColumnUndo(parameters, saved_row, solution);
// Then, restore the dual optimal value taking into account the cost
// modification.
@@ -2695,9 +2749,6 @@ bool SingletonPreprocessor::Run(LinearProgram* lp) {
// Initialize column_to_process with the current singleton columns.
ColIndex num_cols(matrix.num_cols());
RowIndex num_rows(matrix.num_rows());
deleted_columns_.PopulateFromZero(num_rows, num_cols);
deleted_rows_.PopulateFromZero(ColToRowIndex(num_cols),
RowToColIndex(num_rows));
StrictITIVector<ColIndex, EntryIndex> column_degree(num_cols, EntryIndex(0));
std::vector<ColIndex> column_to_process;
for (ColIndex col(0); col < num_cols; ++col) {
@@ -2780,7 +2831,11 @@ void SingletonPreprocessor::RecoverSolution(ProblemSolution* solution) const {
// It is important to undo the operations in the correct order, i.e. in the
// reverse order in which they were done.
for (int i = undo_stack_.size() - 1; i >= 0; --i) {
undo_stack_[i].Undo(parameters_, deleted_columns_, deleted_rows_, solution);
const SparseColumn& saved_col =
columns_saver_.SavedOrEmptyColumn(undo_stack_[i].Entry().col);
const SparseColumn& saved_row = rows_saver_.SavedOrEmptyColumn(
RowToColIndex(undo_stack_[i].Entry().row));
undo_stack_[i].Undo(parameters_, saved_col, saved_row, solution);
}
}
@@ -2950,13 +3005,18 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) {
saved_row_lower_bounds_ = lp->constraint_lower_bounds();
saved_row_upper_bounds_ = lp->constraint_upper_bounds();
// This is needed for postsolving dual.
saved_objective_ = lp->objective_coefficients();
// Note that we don't update the transpose during this preprocessor run.
const SparseMatrix& original_transpose = lp->GetTransposeSparseMatrix();
// Iterate over the rows that were already doubletons before this preprocessor
// run, and whose items don't belong to a column that we deleted during this
// run. This implies that the rows are only ever touched once per run, because
// we only modify rows that have an item on a deleted column.
// Heuristic: We try to subtitute sparse columns first to avoid a complexity
// explosion. Note that if we do long chain of substitution, we can still end
// up with a complexity of O(num_rows x num_cols) instead of O(num_entries).
//
// TODO(user): There is probably some more robust ways.
std::vector<std::pair<int64_t, RowIndex>> sorted_rows;
const RowIndex num_rows(lp->num_constraints());
for (RowIndex row(0); row < num_rows; ++row) {
const SparseColumn& original_row =
@@ -2966,6 +3026,23 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) {
lp->constraint_upper_bounds()[row]) {
continue;
}
int64_t score = 0;
for (const SparseColumn::Entry e : original_row) {
const ColIndex col = RowToColIndex(e.row());
score += lp->GetSparseColumn(col).num_entries().value();
}
sorted_rows.push_back({score, row});
}
std::sort(sorted_rows.begin(), sorted_rows.end());
// Iterate over the rows that were already doubletons before this preprocessor
// run, and whose items don't belong to a column that we deleted during this
// run. This implies that the rows are only ever touched once per run, because
// we only modify rows that have an item on a deleted column.
for (const auto p : sorted_rows) {
const RowIndex row = p.second;
const SparseColumn& original_row =
original_transpose.column(RowToColIndex(row));
// Collect the two row items. Skip the ones involving a deleted column.
// Note: we filled r.col[] and r.coeff[] by item order, and currently we
@@ -2997,7 +3074,6 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) {
r.lb[col_choice] = lp->variable_lower_bounds()[col];
r.ub[col_choice] = lp->variable_upper_bounds()[col];
r.objective_coefficient[col_choice] = lp->objective_coefficients()[col];
r.column[col_choice].PopulateFromSparseVector(lp->GetSparseColumn(col));
}
// 2) One of the columns is fixed: don't bother, it will be treated
@@ -3085,9 +3161,19 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) {
status_ = ProblemStatus::ABNORMAL;
break;
}
r.column[DELETED].AddMultipleToSparseVectorAndDeleteCommonIndex(
substitution_factor, row, parameters_.drop_tolerance(),
lp->GetMutableSparseColumn(r.col[MODIFIED]));
// Note that we do not save again a saved column, so that we only save
// columns from the initial LP. This is important to limit the memory usage.
// It complexify a bit the postsolve though.
for (const int col_choice : {DELETED, MODIFIED}) {
const ColIndex col = r.col[col_choice];
columns_saver_.SaveColumnIfNotAlreadyDone(col, lp->GetSparseColumn(col));
}
lp->GetSparseColumn(r.col[DELETED])
.AddMultipleToSparseVectorAndDeleteCommonIndex(
substitution_factor, r.row, parameters_.drop_tolerance(),
lp->GetMutableSparseColumn(r.col[MODIFIED]));
// Apply similar operations on the objective coefficients.
// Note that the offset is being updated by
@@ -3109,13 +3195,20 @@ bool DoubletonEqualityRowPreprocessor::Run(LinearProgram* lp) {
SubtractColumnMultipleFromConstraintBound(r.col[DELETED],
constant_offset_factor, lp);
// If we keep substituing the same "dense" columns over and over, we can
// have a memory in O(num_rows * num_cols) which can be order of magnitude
// larger than the original problem. It is important to reclaim the memory
// of the deleted column right away.
lp->GetMutableSparseColumn(r.col[DELETED])->ClearAndRelease();
// Mark the column and the row for deletion.
column_deletion_helper_.MarkColumnForDeletion(r.col[DELETED]);
row_deletion_helper_.MarkRowForDeletion(row);
row_deletion_helper_.MarkRowForDeletion(r.row);
}
if (status_ != ProblemStatus::INIT) return false;
lp->DeleteColumns(column_deletion_helper_.GetMarkedColumns());
lp->DeleteRows(row_deletion_helper_.GetMarkedRows());
return !column_deletion_helper_.IsEmpty();
}
@@ -3125,6 +3218,10 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution(
RETURN_IF_NULL(solution);
column_deletion_helper_.RestoreDeletedColumns(solution);
row_deletion_helper_.RestoreDeletedRows(solution);
const ColIndex num_cols = solution->variable_statuses.size();
StrictITIVector<ColIndex, bool> new_basic_columns(num_cols, false);
for (const RestoreInfo& r : Reverse(restore_stack_)) {
switch (solution->variable_statuses[r.col[MODIFIED]]) {
case VariableStatus::FIXED_VALUE:
@@ -3140,6 +3237,7 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution(
// Several code paths set the deleted column as basic. The code that
// sets its value in that case is below, after the switch() block.
solution->variable_statuses[r.col[DELETED]] = VariableStatus::BASIC;
new_basic_columns[r.col[DELETED]] = true;
break;
case VariableStatus::AT_LOWER_BOUND:
ABSL_FALLTHROUGH_INTENDED;
@@ -3158,10 +3256,10 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution(
solution->variable_statuses[bounded_var] = bound_backtracking.status;
solution->primal_values[bounded_var] = bound_backtracking.value;
solution->variable_statuses[basic_var] = VariableStatus::BASIC;
new_basic_columns[basic_var] = true;
// If the modified column is VariableStatus::BASIC, then its value is
// already set
// correctly. If it's the deleted column that is basic, its value is
// set below the switch() block.
// already set correctly. If it's the deleted column that is basic, its
// value is set below the switch() block.
}
}
@@ -3175,23 +3273,69 @@ void DoubletonEqualityRowPreprocessor::RecoverSolution(
// Make the deleted constraint status FIXED.
solution->constraint_statuses[r.row] = ConstraintStatus::FIXED_VALUE;
}
// Adjust the dual value of the deleted constraint so that the
// VariableStatus::BASIC column have a reduced cost of zero (if the two
// are VariableStatus::BASIC, just pick one).
// The reduced cost of the other variable will then automatically be
// correct: zero if it's VariableStatus::BASIC, and with the correct sign if
// it's bounded.
const ColChoice col_choice =
solution->variable_statuses[r.col[DELETED]] == VariableStatus::BASIC
? DELETED
: MODIFIED;
// To compute the reduced cost properly (i.e. without the restored row).
solution->dual_values[r.row] = 0.0;
// Now we need to reconstruct the dual. This is a bit tricky and is basically
// the same as inverting a really structed and easy to invert matrix. For n
// doubleton rows, looking only at the new_basic_columns, there is exactly n
// by construction (one per row). We consider only this n x n matrix, and we
// must choose dual row values so that we make the reduced costs zero on all
// these columns.
//
// There is always an order that make this matrix triangular. We start with a
// singleton column which fix its corresponding row and then work on the
// square submatrix left. We can always start and continue, because if we take
// the first substitued row of the current submatrix, if its deleted column
// was in the submatrix we have a singleton column. If it is outside, we have
// 2 n - 1 entries for a matrix with n columns, so one must be singleton.
//
// Note(user): Another advantage of working on the "original" matrix before
// this presolve is an increased precision.
//
// TODO(user): We can probably use something better than a vector of set,
// but the number of entry is really sparse though. And the size of a set<int>
// is 24 bytes, same as a std::vector<int>.
StrictITIVector<ColIndex, std::set<int>> col_to_index(num_cols);
for (int i = 0; i < restore_stack_.size(); ++i) {
const RestoreInfo& r = restore_stack_[i];
col_to_index[r.col[MODIFIED]].insert(i);
col_to_index[r.col[DELETED]].insert(i);
}
std::vector<ColIndex> singleton_col;
for (ColIndex col(0); col < num_cols; ++col) {
if (!new_basic_columns[col]) continue;
if (col_to_index[col].size() == 1) singleton_col.push_back(col);
}
while (!singleton_col.empty()) {
const ColIndex col = singleton_col.back();
singleton_col.pop_back();
if (!new_basic_columns[col]) continue;
if (col_to_index[col].empty()) continue;
CHECK_EQ(col_to_index[col].size(), 1);
const int index = *col_to_index[col].begin();
const RestoreInfo& r = restore_stack_[index];
const ColChoice col_choice = r.col[MODIFIED] == col ? MODIFIED : DELETED;
// Adjust the dual value of the deleted constraint so that col have a
// reduced costs of zero.
CHECK_EQ(solution->dual_values[r.row], 0.0);
const SparseColumn& saved_col =
columns_saver_.SavedColumn(r.col[col_choice]);
const Fractional current_reduced_cost =
r.objective_coefficient[col_choice] -
PreciseScalarProduct(solution->dual_values, r.column[col_choice]);
saved_objective_[r.col[col_choice]] -
PreciseScalarProduct(solution->dual_values, saved_col);
solution->dual_values[r.row] = current_reduced_cost / r.coeff[col_choice];
// Update singleton
col_to_index[r.col[DELETED]].erase(index);
col_to_index[r.col[MODIFIED]].erase(index);
if (col_to_index[r.col[DELETED]].size() == 1) {
singleton_col.push_back(r.col[DELETED]);
}
if (col_to_index[r.col[MODIFIED]].size() == 1) {
singleton_col.push_back(r.col[MODIFIED]);
}
}
// Fix potential bad ConstraintStatus::FIXED_VALUE statuses.
@@ -3228,7 +3372,6 @@ void DoubletonEqualityRowPreprocessor::
swap(r->coeff[DELETED], r->coeff[MODIFIED]);
swap(r->lb[DELETED], r->lb[MODIFIED]);
swap(r->ub[DELETED], r->ub[MODIFIED]);
swap(r->column[DELETED], r->column[MODIFIED]);
swap(r->objective_coefficient[DELETED], r->objective_coefficient[MODIFIED]);
}

View File

@@ -134,6 +134,33 @@ class MainLpPreprocessor : public Preprocessor {
// --------------------------------------------------------
// ColumnDeletionHelper
// --------------------------------------------------------
// Some preprocessors need to save columns/rows of the matrix for the postsolve.
// This class helps them do that.
//
// Note that we used to simply use a SparseMatrix, which is like a vector of
// SparseColumn. However on large problem with 10+ millions columns, each empty
// SparseColumn take 48 bytes, so if we run like 10 presolve step that save as
// little as 1 columns, we already are at 4GB memory for nothing!
class ColumnsSaver {
public:
// Saves a column. The first version CHECKs that it is not already done.
void SaveColumn(ColIndex col, const SparseColumn& column);
void SaveColumnIfNotAlreadyDone(ColIndex col, const SparseColumn& column);
// Returns the saved column. The first version CHECKs that it was saved.
const SparseColumn& SavedColumn(ColIndex col) const;
const SparseColumn& SavedOrEmptyColumn(ColIndex col) const;
private:
SparseColumn empty_column_;
absl::flat_hash_map<ColIndex, int> saved_columns_index_;
// TODO(user): We could optimize further since all these are read only, we
// could use a CompactSparseMatrix instead.
std::deque<SparseColumn> saved_columns_;
};
// Help preprocessors deal with column deletion.
class ColumnDeletionHelper {
public:
@@ -354,24 +381,25 @@ class SingletonUndo {
SingletonUndo(OperationType type, const LinearProgram& lp, MatrixEntry e,
ConstraintStatus status);
// Undo the operation saved in this class, taking into account the deleted
// columns and rows passed by the calling instance of SingletonPreprocessor.
// Note that the operations must be undone in the reverse order of the one
// in which they were applied.
void Undo(const GlopParameters& parameters,
const SparseMatrix& deleted_columns,
const SparseMatrix& deleted_rows, ProblemSolution* solution) const;
// Undo the operation saved in this class, taking into account the saved
// column and row (at the row/col given by Entry()) passed by the calling
// instance of SingletonPreprocessor. Note that the operations must be undone
// in the reverse order of the one in which they were applied.
void Undo(const GlopParameters& parameters, const SparseColumn& saved_column,
const SparseColumn& saved_row, ProblemSolution* solution) const;
const MatrixEntry& Entry() const { return e_; }
private:
// Actual undo functions for each OperationType.
// Undo() just calls the correct one.
void SingletonRowUndo(const SparseMatrix& deleted_columns,
void SingletonRowUndo(const SparseColumn& saved_column,
ProblemSolution* solution) const;
void ZeroCostSingletonColumnUndo(const GlopParameters& parameters,
const SparseMatrix& deleted_rows,
const SparseColumn& saved_row,
ProblemSolution* solution) const;
void SingletonColumnInEqualityUndo(const GlopParameters& parameters,
const SparseMatrix& deleted_rows,
const SparseColumn& saved_row,
ProblemSolution* solution) const;
void MakeConstraintAnEqualityUndo(ProblemSolution* solution) const;
@@ -471,11 +499,12 @@ class SingletonPreprocessor : public Preprocessor {
absl::StrongVector<RowIndex, SumWithPositiveInfiniteAndOneMissing>
row_ub_sum_;
// The columns that are deleted by this preprocessor.
SparseMatrix deleted_columns_;
// The transpose of the rows that are deleted by this preprocessor.
// TODO(user): implement a RowMajorSparseMatrix class to simplify the code.
SparseMatrix deleted_rows_;
// TODO(user): It is annoying that we need to store a part of the matrix that
// is not deleted here. This extra memory usage might show the limit of our
// presolve architecture that does not require a new matrix factorization on
// the original problem to reconstruct the solution.
ColumnsSaver columns_saver_;
ColumnsSaver rows_saver_;
};
// --------------------------------------------------------
@@ -532,11 +561,11 @@ class ForcingAndImpliedFreeConstraintPreprocessor : public Preprocessor {
private:
bool lp_is_maximization_problem_;
SparseMatrix deleted_columns_;
DenseRow costs_;
DenseBooleanColumn is_forcing_up_;
ColumnDeletionHelper column_deletion_helper_;
RowDeletionHelper row_deletion_helper_;
ColumnsSaver columns_saver_;
};
// --------------------------------------------------------
@@ -702,12 +731,10 @@ class UnconstrainedVariablePreprocessor : public Preprocessor {
ColumnDeletionHelper column_deletion_helper_;
RowDeletionHelper row_deletion_helper_;
ColumnsSaver rows_saver_;
DenseColumn rhs_;
DenseColumn activity_sign_correction_;
DenseBooleanRow is_unbounded_;
SparseMatrix deleted_columns_;
SparseMatrix deleted_rows_as_column_;
};
// --------------------------------------------------------
@@ -839,7 +866,6 @@ class DoubletonEqualityRowPreprocessor : public Preprocessor {
Fractional coeff[NUM_DOUBLETON_COLS];
Fractional lb[NUM_DOUBLETON_COLS];
Fractional ub[NUM_DOUBLETON_COLS];
SparseColumn column[NUM_DOUBLETON_COLS];
Fractional objective_coefficient[NUM_DOUBLETON_COLS];
// If the modified variable has status AT_[LOWER,UPPER]_BOUND, then we'll
@@ -863,6 +889,9 @@ class DoubletonEqualityRowPreprocessor : public Preprocessor {
std::vector<RestoreInfo> restore_stack_;
DenseColumn saved_row_lower_bounds_;
DenseColumn saved_row_upper_bounds_;
ColumnsSaver columns_saver_;
DenseRow saved_objective_;
};
// Because of numerical imprecision, a preprocessor like
@@ -1025,6 +1054,12 @@ class ToMinimizationPreprocessor : public Preprocessor {
// As a consequence, the matrix of the linear program always has full row rank
// after this preprocessor. Note that the slack variables are always added last,
// so that the rightmost square sub-matrix is always the identity matrix.
//
// TODO(user): Do not require this step to talk to the revised simplex. On large
// LPs like supportcase11.mps, this step alone can add 1.5 GB to the solver peak
// memory for no good reason. The internal matrix representation used in glop is
// a lot more efficient, and there is no point keeping the slacks in
// LinearProgram. It is also bad for incrementaly modifying the LP.
class AddSlackVariablesPreprocessor : public Preprocessor {
public:
explicit AddSlackVariablesPreprocessor(const GlopParameters* parameters)