export from google3

* Add base/callback.h
* Add algorithms/duplicate_remover.h
* Add algorithms/weighted_set_covering.h

* export algorithms unit tests (not running them yet)
* Few cleanup here and there
This commit is contained in:
Corentin Le Molgat
2023-04-27 08:04:23 +02:00
parent 152513a3ff
commit 28c908f392
25 changed files with 4677 additions and 5 deletions

View File

@@ -31,8 +31,8 @@ portable software suite for solving combinatorial optimization problems.
The suite contains:
* A constraint programming solver;
* Two linear programming solvers;
* Two constraint programming solver (CP* and CP-SAT);
* Two linear programming solvers (Glop and PDLP);
* Wrappers around commercial and other open source solvers, including mixed
integer solvers;
* Bin packing and knapsack algorithms;
@@ -40,8 +40,7 @@ The suite contains:
* Graph algorithms (shortest paths, min cost flow, max flow, linear sum
assignment).
We wrote OR-Tools in C++, but also provide wrappers in Python, C# and
Java.
We wrote OR-Tools in C++, but provide wrappers in Python, C# and Java.
## Codemap

View File

@@ -13,8 +13,18 @@
file(GLOB _SRCS "*.h" "*.cc")
list(REMOVE_ITEM _SRCS
${CMAKE_CURRENT_SOURCE_DIR}/binary_indexed_tree_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/binary_search_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/dense_doubly_linked_list_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/duplicate_remover_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/dynamic_partition_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/dynamic_permutation_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/find_graph_symmetries_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/hungarian_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/hungarian_test.h
${CMAKE_CURRENT_SOURCE_DIR}/knapsack_solver_for_cuts_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/knapsack_solver_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/sparse_permutation_test.cc
${CMAKE_CURRENT_SOURCE_DIR}/weighted_set_covering_test.cc
)
set(NAME ${PROJECT_NAME}_algorithms)

View File

@@ -0,0 +1,71 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_ALGORITHMS_BINARY_INDEXED_TREE_H_
#define OR_TOOLS_ALGORITHMS_BINARY_INDEXED_TREE_H_
#include <vector>
#include "absl/log/check.h"
namespace operations_research {
// A binary indexed tree is a data structure that represents an array of
// numbers and supports two operations:
// 1) add a number to i-th element of the array.
// 2) find the sum of a prefix of the array (elements from 0-th to j-th).
// See http://en.wikipedia.org/wiki/Fenwick_tree.
template <typename T>
class BinaryIndexedTree {
public:
// Initializes the storage for a binary indexed tree of a given size. The
// tree contains all zeros initially.
explicit BinaryIndexedTree(int n) : tree_(n + 1, 0) {}
// Adds value to index-th number.
void AddItem(int index, T value) {
DCHECK_GE(index, 0);
DCHECK_LT(index, tree_.size() - 1);
// Internal indices of BinaryIndexedTree are 1 based.
++index;
while (index < tree_.size()) {
tree_[index] += value;
index += index & -index;
}
}
// Finds the sum of a prefix [0, index]. Passing index == -1 is allowed,
// will return 0 in that case.
T GetPrefixSum(int index) const {
DCHECK_GE(index, -1);
DCHECK_LT(index + 1, tree_.size());
// Internal indices of BinaryIndexedTree are 1 based.
++index;
T prefix_sum = 0;
while (index > 0) {
prefix_sum += tree_[index];
index -= index & -index;
}
return prefix_sum;
}
// Returns the size of the tree.
int size() const { return tree_.size() - 1; }
private:
std::vector<T> tree_;
};
} // namespace operations_research
#endif // OR_TOOLS_ALGORITHMS_BINARY_INDEXED_TREE_H_

View File

@@ -0,0 +1,63 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/binary_indexed_tree.h"
#include "gtest/gtest.h"
namespace operations_research {
namespace {
template <typename T>
void GetAllPrefixSums(const BinaryIndexedTree<T>& tree,
std::vector<T>* prefix_sums) {
int tree_size = tree.size();
prefix_sums->clear();
prefix_sums->reserve(tree_size + 1);
for (int index = -1; index < tree_size; ++index) {
prefix_sums->emplace_back(tree.GetPrefixSum(index));
}
}
template <typename T>
class BinaryIndexedTreeTest : public ::testing::Test {};
typedef ::testing::Types<int, float> TestTypes;
TYPED_TEST_SUITE(BinaryIndexedTreeTest, TestTypes);
TYPED_TEST(BinaryIndexedTreeTest, BinaryIndexedTree) {
BinaryIndexedTree<TypeParam> tree(5);
std::vector<TypeParam> prefix_sum;
tree.AddItem(1, 1);
// {0, 1, 0, 0, 0}
GetAllPrefixSums(tree, &prefix_sum);
EXPECT_EQ((std::vector<TypeParam>{0, 0, 1, 1, 1, 1}), prefix_sum);
tree.AddItem(0, 2);
// {2, 1, 0, 0, 0}
GetAllPrefixSums(tree, &prefix_sum);
EXPECT_EQ((std::vector<TypeParam>{0, 2, 3, 3, 3, 3}), prefix_sum);
tree.AddItem(2, 3);
// {2, 1, 3, 0, 0}
GetAllPrefixSums(tree, &prefix_sum);
EXPECT_EQ((std::vector<TypeParam>{0, 2, 3, 6, 6, 6}), prefix_sum);
tree.AddItem(4, 4);
// {2, 1, 3, 0, 4}
GetAllPrefixSums(tree, &prefix_sum);
EXPECT_EQ((std::vector<TypeParam>{0, 2, 3, 6, 6, 10}), prefix_sum);
tree.AddItem(3, 5);
// {2, 1, 3, 5, 4}
GetAllPrefixSums(tree, &prefix_sum);
EXPECT_EQ((std::vector<TypeParam>{0, 2, 3, 6, 11, 15}), prefix_sum);
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,381 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/binary_search.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include <utility>
#include <vector>
#include "absl/numeric/int128.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/time/time.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/util/testing_utils.h"
#include "testing/base/public/benchmark.h"
namespace operations_research {
// Correctly picking the midpoint of two integers in all cases isn't trivial!
template <>
int BinarySearchMidpoint(int x, int y) {
if (x > y) std::swap(x, y);
if (x >= 0 || y < 0) return x + (y - x) / 2;
return (x + y) / 2;
}
namespace {
TEST(BinarySearchTest, DoubleExample) {
const double x =
BinarySearch<double>(/*x_true=*/0.0, /*x_false=*/M_PI / 2,
[](double x) { return cos(x) >= 2 * sin(x); });
EXPECT_GE(x, 0);
EXPECT_LE(x, M_PI / 2);
EXPECT_EQ(cos(x), 2 * sin(x)) << x;
}
template <typename T>
class BinarySearchIntTest : public ::testing::Test {};
TYPED_TEST_SUITE_P(BinarySearchIntTest);
TYPED_TEST_P(BinarySearchIntTest, IntExampleWithReversedIntervalOrder) {
EXPECT_EQ(
BinarySearch<TypeParam>(/*x_true=*/67, /*x_false=*/23,
[](TypeParam x) { return x > TypeParam{42}; }),
43);
}
TYPED_TEST_P(BinarySearchIntTest, IntOverflowStressTest) {
const TypeParam kBounds[] = {std::numeric_limits<TypeParam>::min(),
std::numeric_limits<TypeParam>::min() + 1,
std::numeric_limits<TypeParam>::min() + 2,
std::numeric_limits<TypeParam>::min() + 3,
0,
1,
2,
3,
std::numeric_limits<TypeParam>::max() - 3,
std::numeric_limits<TypeParam>::max() - 2,
std::numeric_limits<TypeParam>::max() - 1,
std::numeric_limits<TypeParam>::max()};
for (TypeParam x : kBounds) {
for (TypeParam y : kBounds) {
if (x == y) continue;
ASSERT_EQ(BinarySearch<TypeParam>(/*x_true=*/x, /*x_false=*/y,
[x](TypeParam t) { return t == x; }),
x)
<< "x=" << x << ", y=" << y;
}
}
}
REGISTER_TYPED_TEST_SUITE_P(BinarySearchIntTest,
IntExampleWithReversedIntervalOrder,
IntOverflowStressTest);
using MyTypes = ::testing::Types<int, unsigned, int64_t, uint64_t, absl::int128,
absl::uint128>;
INSTANTIATE_TYPED_TEST_SUITE_P(My, BinarySearchIntTest, MyTypes);
TEST(BinarySearchTest, LargeInt128SearchDomain) {
absl::int128 target = -1234567890123456789;
target <<= 50; // Make sure it does need more than 64 or even 96 bits.
EXPECT_EQ(BinarySearch<absl::int128>(
/*x_true=*/std::numeric_limits<absl::int128>::min(),
/*x_false=*/std::numeric_limits<absl::int128>::max(),
[target](absl::int128 x) { return x < target; }),
target - 1);
}
TEST(BinarySearchTest, VeryLongDoubleSearchDomain) {
// Binary search for the smallest possible long double that is > 0,
// starting with interval [0, numeric_limit::max()]. This is probably close to
// the longest possible binary search on a widely-available numerical type.
EXPECT_EQ(BinarySearch<long double>(
/*x_true=*/std::numeric_limits<long double>::max(),
/*x_false=*/0.0, [](long double x) { return x > 0; }),
std::numeric_limits<long double>::denorm_min());
}
TEST(BinarySearchTest, InfinityCornerCases) {
constexpr double kInfinity = std::numeric_limits<double>::infinity();
EXPECT_THAT(BinarySearch<double>(
/*x_true=*/-kInfinity,
/*x_false=*/kInfinity, [](double x) { return x < 0; }),
-kInfinity);
EXPECT_EQ(BinarySearch<double>(
/*x_true=*/-1,
/*x_false=*/kInfinity, [](double x) { return x < 0; }),
-1);
EXPECT_THAT(BinarySearch<double>(
/*x_true=*/kInfinity,
/*x_false=*/0, [](double x) { return x > 0; }),
kInfinity);
}
TEST(BinarySearchTest, NanCornerCases) {
EXPECT_THAT(BinarySearch<double>(
/*x_true=*/std::numeric_limits<double>::quiet_NaN(),
/*x_false=*/0, [](double x) { return !(x == 0); }),
testing::IsNan());
EXPECT_EQ(BinarySearch<double>(
/*x_true=*/0,
/*x_false=*/std::numeric_limits<double>::quiet_NaN(),
[](double x) { return x == 0; }),
0);
}
TEST(BinarySearchTest, WithAbslDuration) {
EXPECT_THAT(BinarySearch<absl::Duration>(
/*x_true=*/absl::Hours(100000),
/*x_false=*/absl::ZeroDuration(),
[](absl::Duration x) { return x > absl::ZeroDuration(); }),
// Smallest non-zero absl::Duration.
absl::Nanoseconds(0.25));
EXPECT_EQ(BinarySearch<absl::Duration>(
/*x_true=*/absl::InfiniteDuration(),
/*x_false=*/-absl::Seconds(100),
[](absl::Duration t) { return t > absl::Seconds(1); }),
absl::InfiniteDuration());
}
TEST(BinarySearchDeathTest, DiesIfEitherBoundaryConditionViolatedInFastbuild) {
if (!DEBUG_MODE) GTEST_SKIP();
EXPECT_DEATH(BinarySearch<int>(/*x_true=*/0, /*x_false=*/42,
[](int x) { return x < 999; }),
"");
EXPECT_DEATH(BinarySearch<int>(/*x_true=*/0, /*x_false=*/42,
[](int x) { return x < 0; }),
"");
EXPECT_DEATH(BinarySearch<int>(/*x_true=*/0, /*x_false=*/42,
[](int x) { return x > 20; }),
"");
}
} // namespace
// Examples of cases where one needs to override BinarySearchMidpoint() to get
// correct results.
// Note that template specializations must be exactly in the same namespace,
// hence the presence of these tests outside the unnamed namespace.
template <>
absl::Time BinarySearchMidpoint(absl::Time x, absl::Time y) {
return x + (y - x) / 2;
}
TEST(BinarySearchTest, WithAbslTime) {
const absl::Time t0 = absl::Now();
EXPECT_EQ(BinarySearch<absl::Time>(
/*x_true=*/t0 + absl::Hours(1),
/*x_false=*/t0, [t0](absl::Time x) { return x > t0; }),
t0 + absl::Nanoseconds(0.25));
EXPECT_EQ(BinarySearch<absl::Time>(
/*x_true=*/absl::InfinitePast(),
/*x_false=*/absl::Now() + absl::Seconds(100),
[](absl::Time x) { return x < absl::Now(); }),
absl::InfinitePast());
}
TEST(BinarySearchTest, NonMonoticPredicateReachesLocalInflexionPoint_Double) {
absl::BitGen random;
auto generate_random_double = [&random]() {
// We generate the sign, mantissa and exponent separately.
return (absl::Bernoulli(random, 0.5) ? 1 : -1) *
scalbn(absl::Uniform<double>(random, 1, 2),
absl::Uniform<int>(random, -1023, 1023));
};
constexpr double kEps = std::numeric_limits<double>::epsilon();
const int kNumAttempts = 100000;
for (int attempt = 0; attempt < kNumAttempts; ++attempt) {
const uint64_t hash_seed = random();
std::function<bool(double)> non_monotonic_predicate =
[hash_seed](double x) {
return util_hash::CityHash64WithSeed(
reinterpret_cast<const char*>(&x), sizeof(x), hash_seed) &
1;
};
// Pick a random [x_true, x_false] interval which verifies f(x_true) = true
// and f(x_false) = false.
double x_true, x_false;
do {
x_true = generate_random_double();
} while (!non_monotonic_predicate(x_true));
// x_false will either be set to a another random double, or to a small
// perturbation from x_true.
if (absl::Bernoulli(random, 0.5)) {
// random double.
do {
x_false = generate_random_double();
} while (non_monotonic_predicate(x_false));
} else {
// small perturbation from x_true.
do {
const double eps = absl::LogUniform(random, 1, 1000) * kEps;
x_false = x_true * (1 + (absl::Bernoulli(random, 0.5) ? eps : -eps));
} while (non_monotonic_predicate(x_false));
}
ASSERT_NE(x_true, x_false);
// Verify that our predicate is deterministic.
for (int i = 0; i < 20; ++i) {
ASSERT_TRUE(non_monotonic_predicate(x_true));
}
for (int i = 0; i < 20; ++i) {
ASSERT_FALSE(non_monotonic_predicate(x_false));
}
// Perform the binary search.
const double solution =
BinarySearch(x_true, x_false, non_monotonic_predicate);
SCOPED_TRACE(absl::StrFormat("x_true=%.16g, x_false=%.16g, solution=%.16g",
x_true, x_false, solution));
// Verify that the solution is in [x_true, x_false[.
if (x_true < x_false) {
ASSERT_GE(solution, x_true);
ASSERT_LT(solution, x_false);
} else {
ASSERT_LE(solution, x_true);
ASSERT_GT(solution, x_false);
}
// Verify that f(solution')=false, where solution' is the smallest double
// "after" solution (in the x_true->x_false direction).
ASSERT_FALSE(non_monotonic_predicate(std::nextafter(solution, x_false)));
}
}
TEST(BinarySearchTest, NonDeterministicPredicateStillConverges) {
if (DEBUG_MODE) {
GTEST_SKIP() << "DCHECKs catch f(x_true)=false or f(x_false)=true.";
}
absl::BitGen random;
std::function<bool(int)> random_predicate = [&random](int) {
return absl::Bernoulli(random, 0.5);
};
const int kNumAttempts = 100000;
for (int attempt = 0; attempt < kNumAttempts; ++attempt) {
const int x_true = random();
const int x_false = absl::Bernoulli(random, 0.5)
? x_true + (absl::Bernoulli(random, 0.5) ? 1 : -1) *
absl::LogUniform(random, 0, 1000)
: random();
const int solution = BinarySearch(x_true, x_false, random_predicate);
SCOPED_TRACE(absl::StrFormat("x_true=%d, x_false=%d, solution=%d", x_true,
x_false, solution));
if (x_false == x_true) {
ASSERT_EQ(solution, x_true);
} else if (x_true < x_false) {
ASSERT_GE(solution, x_true);
ASSERT_LT(solution, x_false);
} else {
ASSERT_LE(solution, x_true);
ASSERT_GT(solution, x_false);
}
}
}
template <typename T>
void BM_BinarySearch(benchmark::State& state) {
auto functor = [](T x) { return x > std::numeric_limits<T>::max() / 2; };
for (const auto s : state) {
testing::DoNotOptimize(functor);
auto result = BinarySearch<T>(std::numeric_limits<T>::max(),
std::numeric_limits<T>::min(), functor);
testing::DoNotOptimize(result);
}
}
BENCHMARK(BM_BinarySearch<float>);
BENCHMARK(BM_BinarySearch<double>);
BENCHMARK(BM_BinarySearch<int>);
BENCHMARK(BM_BinarySearch<unsigned>);
BENCHMARK(BM_BinarySearch<int64_t>);
BENCHMARK(BM_BinarySearch<uint64_t>);
BENCHMARK(BM_BinarySearch<absl::int128>);
TEST(ConvexMinimumTest, ExhaustiveTest) {
const int n = 99;
std::vector<int> points(n);
std::vector<int> values(n);
for (int i = 0; i < n; ++i) points[i] = i;
int total_num_queries = 0;
int max_num_queries = 0;
for (int b1 = 0; b1 < n; ++b1) {
for (int i = b1; i >= 0; --i) values[i] = b1 - i;
for (int b2 = b1; b2 < n; ++b2) {
for (int i = b2; i < n; ++i) values[i] = i - b2;
int num_queries = 0;
const auto [point, value] = ConvexMinimum<int, int>(points, [&](int p) {
++num_queries;
return values[p];
});
total_num_queries += num_queries;
max_num_queries = std::max(max_num_queries, num_queries);
ASSERT_EQ(value, 0);
ASSERT_GE(point, b1);
ASSERT_LE(point, b2);
}
}
// TODO(user): we can probably do better.
EXPECT_EQ(total_num_queries, 19376);
EXPECT_EQ(max_num_queries, 12);
}
TEST(ConvexMinimumTest, OneQueryIfSizeOne) {
std::vector<int> points{0};
std::vector<double> values{0.0};
int num_queries = 0;
const auto [point, value] = ConvexMinimum<int, int>(points, [&](int p) {
++num_queries;
return values[p];
});
EXPECT_EQ(point, 0);
EXPECT_EQ(value, 0.0);
EXPECT_EQ(num_queries, 1);
}
TEST(ConvexMinimumTest, TwoQueriesIfSizeTwo) {
std::vector<int> points{0, 1};
std::vector<double> values{0.0, 1.0};
int num_queries = 0;
const auto [point, value] = ConvexMinimum<int, int>(points, [&](int p) {
++num_queries;
return values[p];
});
EXPECT_EQ(point, 0);
EXPECT_EQ(value, 0.0);
EXPECT_EQ(num_queries, 2);
}
TEST(ConvexMinimumTest, TwoQueriesIfSizeTwoReversed) {
std::vector<int> points{0, 1};
std::vector<double> values{1.0, 0.0};
int num_queries = 0;
const auto [point, value] = ConvexMinimum<int, int>(points, [&](int p) {
++num_queries;
return values[p];
});
EXPECT_EQ(point, 1);
EXPECT_EQ(value, 0.0);
EXPECT_EQ(num_queries, 2);
}
} // namespace operations_research

View File

@@ -0,0 +1,41 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/dense_doubly_linked_list.h"
#include <vector>
#include "gtest/gtest.h"
namespace operations_research {
namespace {
TEST(DenseDoublyLinkedListTest, EndToEnd) {
DenseDoublyLinkedList list(std::vector<int>({3, 6, 4, 5, 2, 1, 0}));
list.Remove(2);
list.Remove(1);
list.Remove(3);
list.Remove(0);
// The list that remains is: 6, 4, 5.
EXPECT_EQ(-1, list.Prev(6));
EXPECT_EQ(6, list.Prev(4));
EXPECT_EQ(4, list.Prev(5));
EXPECT_EQ(4, list.Next(6));
EXPECT_EQ(5, list.Next(4));
EXPECT_EQ(-1, list.Next(5));
}
// TODO(user): test the DCHECKs with death tests.
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,59 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/duplicate_remover.h"
#include <cstddef>
#include <cstdint>
#include "absl/log/check.h"
#include "absl/types/span.h"
namespace operations_research {
size_t DenseIntDuplicateRemover::RemoveDuplicatesInternal(
absl::Span<int> span) {
// We use vector<uint8_t> because using vector<bool> would be potentially more
// expensive: writing in vector<bool> involves a read+write, and here we're
// directly writing.
int num_unique_kept = -1;
// Fast track for the leading portion without duplicates.
while (++num_unique_kept < span.size()) {
const int x = span[num_unique_kept];
DCHECK_GE(x, 0);
DCHECK_LT(x, tmp_mask_.size() * 8);
// Bit #i = Bit #(i modulo 8) of Byte #(i / 8).
const uint8_t mask = 1u << (x & 7); // Bit #(i modulo 8).
const uint8_t byte = tmp_mask_[x >> 3]; // .. of Byte #(i / 8).
if (mask & byte) break; // Already seen.
tmp_mask_[x >> 3] = byte | mask;
}
// The next portion is exactly the same, except that now we have to shift
// the elements that we're keeping, making it slightly slower.
for (int i = num_unique_kept + 1; i < span.size(); ++i) {
const int x = span[i];
DCHECK_GE(x, 0);
DCHECK_LT(x, tmp_mask_.size() * 8);
const uint8_t mask = 1 << (x & 7);
const uint8_t byte = tmp_mask_[x >> 3];
if (mask & byte) continue; // Already seen.
tmp_mask_[x >> 3] = mask | byte;
span[num_unique_kept++] = x; // Keep x=[i], at its new (shifted) position.
}
span.remove_suffix(span.size() - num_unique_kept);
// Clear the bit mask.
for (int x : span) tmp_mask_[x >> 3] = 0;
return num_unique_kept;
}
} // namespace operations_research

View File

@@ -0,0 +1,140 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef OR_TOOLS_ALGORITHMS_DUPLICATE_REMOVER_H_
#define OR_TOOLS_ALGORITHMS_DUPLICATE_REMOVER_H_
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/log/check.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "google/protobuf/repeated_field.h"
namespace operations_research {
// This class offers an alternative to gtl::linked_hash_set<> which is:
// - stateless: it works directly on a vector<int> or any similar container,
// without storing extra data anywhere;
// - faster when the number of unique values is 5K or above.
//
// The memory usage can be O(num_distinct_values) at any time if you use
// AppendAndLazilyRemoveDuplicates(). In fact, unit tests verify that the
// average number of elements kept is ≤ 1.5 * num_distinct_values, making
// it comparable to a flat_hash_set<int> (whose overhead factor is ~1.68).
//
// Usage pattern:
//
// // One instance of this can handle many sets on the same [0, n) domain.
// int N = 100'000;
// DenseIntDuplicateRemover deduper(N); // Uses N/8 bytes of memory.
// std::vector<int> values; // Your container. Could be RepeatedField<int>.
// for (int x : ...) {
// deduper.AppendAndLazilyRemoveDuplicates(x, &values); // O(1) amortized.
// }
// deduper.RemoveDuplicates(&values); // O(values.size())
//
class DenseIntDuplicateRemover {
public:
explicit DenseIntDuplicateRemover(int n)
: n_(n),
tmp_mask_storage_((n + 7) / 8, 0),
tmp_mask_(tmp_mask_storage_) {}
template <class IntContainer>
void RemoveDuplicates(IntContainer* container);
template <class IntContainer>
void AppendAndLazilyRemoveDuplicates(int x, IntContainer* container);
private:
template <class IntContainer>
void Append(int x, IntContainer* container);
template <class IntContainer>
void Truncate(size_t new_size, IntContainer* container);
size_t RemoveDuplicatesInternal(absl::Span<int> span);
absl::BitGen random_;
const int n_;
std::vector<uint8_t> tmp_mask_storage_;
const absl::Span<uint8_t> tmp_mask_;
};
// _____________________________________________________________________________
// Implementation of the templates.
template <class IntContainer>
void DenseIntDuplicateRemover::RemoveDuplicates(IntContainer* container) {
const size_t new_size = RemoveDuplicatesInternal(absl::MakeSpan(*container));
Truncate(new_size, container);
}
template <class IntContainer>
void DenseIntDuplicateRemover::AppendAndLazilyRemoveDuplicates(
int x, IntContainer* container) {
DCHECK_GE(x, 0);
DCHECK_LT(x, n_);
Append(x, container);
// ALGORITHM:
// In order to remain stateless, yet call RemoveDuplicates() often enough
// that the size of the container remains O(num_distinct_elements), but not
// too often since we must remain O(1) time amortized, we randomize:
// every time we append an element, we'll call RemoveDuplicates() with
// probability 1/k, where k is the current size of the container.
// That way, the added expected complexity is O(k)*1/k = O(1), yet we know
// that we'll eventually call it. See the unit tests that verify the claims.
// As an important optimization, since drawing the pseudo-random number is
// expensive, we only perform it every kCheckPeriod, and to compensate we
// multiply the probability by the same amount.
constexpr int kCheckPeriod = 8;
static_assert(__builtin_popcount(kCheckPeriod) == 1, "must be power of two");
const size_t size = container->size();
if (size & (kCheckPeriod - 1)) return;
if (size >= 2 * n_ ||
absl::Uniform<size_t>(random_, 0, container->size()) < kCheckPeriod) {
RemoveDuplicates(container);
}
}
template <>
inline void DenseIntDuplicateRemover::Append(int x,
std::vector<int>* container) {
container->push_back(x);
}
template <>
inline void DenseIntDuplicateRemover::Append(
int x, google::protobuf::RepeatedField<int>* container) {
container->Add(x);
}
template <>
inline void DenseIntDuplicateRemover::Truncate(size_t new_size,
std::vector<int>* container) {
container->resize(new_size);
}
template <>
inline void DenseIntDuplicateRemover::Truncate(
size_t new_size, google::protobuf::RepeatedField<int>* container) {
container->Truncate(new_size);
}
} // namespace operations_research
#endif // OR_TOOLS_ALGORITHMS_DUPLICATE_REMOVER_H_

View File

@@ -0,0 +1,184 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/duplicate_remover.h"
#include <random>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/base/linked_hash_set.h"
#include "ortools/util/random_engine.h"
#include "testing/base/public/benchmark.h"
#include "util/tuple/dump_vars.h"
namespace operations_research {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesEmpty) {
std::vector<int> v;
DenseIntDuplicateRemover deduper(10);
deduper.RemoveDuplicates(&v);
EXPECT_THAT(v, IsEmpty());
}
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesNZeroAndEmpty) {
std::vector<int> v;
DenseIntDuplicateRemover deduper(0);
deduper.RemoveDuplicates(&v);
EXPECT_THAT(v, IsEmpty());
}
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesSimpleCaseWithDuplicates) {
std::vector<int> v = {1, 8, 2, 2, 8, 4, 1, 2, 7, 0, 2};
DenseIntDuplicateRemover deduper(9);
deduper.RemoveDuplicates(&v);
EXPECT_THAT(v, ElementsAre(1, 8, 2, 4, 7, 0));
}
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesSimpleCaseWithNoDuplicates) {
std::vector<int> v = {3, 2, 0, 5, 4, 1};
const std::vector<int> v_copy = v;
DenseIntDuplicateRemover deduper(6);
deduper.RemoveDuplicates(&v);
EXPECT_THAT(v, ElementsAreArray(v_copy));
}
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesWithRepeatedField) {
const std::vector<int> v = {1, 0, 1, 2, 1};
google::protobuf::RepeatedField<int> r(v.begin(), v.end());
DenseIntDuplicateRemover deduper(3);
deduper.RemoveDuplicates(&r);
EXPECT_THAT(r, ElementsAre(1, 0, 2));
}
std::vector<int> UniqueValues(absl::Span<const int> span) {
absl::flat_hash_set<int> set;
std::vector<int> out;
for (int x : span)
if (set.insert(x).second) out.push_back(x);
return out;
}
TEST(DenseIntDuplicateRemoverTest, RemoveDuplicatesRandomizedStressTest) {
constexpr int kNumValues = 1003;
DenseIntDuplicateRemover deduper(kNumValues);
constexpr int kNumTests = 1'000'000;
absl::BitGen random;
for (int t = 0; t < kNumTests; ++t) {
const int size = absl::LogUniform(random, 0, 16);
const int domain_size =
absl::Uniform(absl::IntervalClosed, random, 1, kNumValues);
std::vector<int> v(size);
for (int& x : v) x = absl::Uniform(random, 0, domain_size);
const std::vector<int> v_initial = v;
const std::vector<int> unique_values = UniqueValues(v);
deduper.RemoveDuplicates(&v);
ASSERT_THAT(v, ElementsAreArray(unique_values)) << DUMP_VARS(t, v_initial);
}
}
TEST(DenseIntDuplicateRemoverTest,
AppendAndLazilyRemoveDuplicatesRandomizedStressTest) {
constexpr int kNumValues = 103;
constexpr int kNumTests = 1'000;
std::mt19937 random;
gtl::linked_hash_set<int> reference;
std::vector<int> v;
int64_t num_extra_elements = 0;
int64_t num_unique_elements = 0;
for (int t = 0; t < kNumTests; ++t) {
const int num_inserts = absl::LogUniform(random, 2, 1 << 16);
const int domain_size =
absl::Uniform(absl::IntervalClosed, random, 1, kNumValues);
v.clear();
reference.clear();
DenseIntDuplicateRemover deduper(domain_size);
for (int i = 0; i < num_inserts; ++i) {
const int x = absl::Uniform(random, 0, domain_size);
deduper.AppendAndLazilyRemoveDuplicates(x, &v);
reference.insert(x);
}
ASSERT_LE(v.size(), domain_size * 2 + 15);
const int old_size = v.size();
deduper.RemoveDuplicates(&v);
num_unique_elements += v.size();
num_extra_elements += old_size - v.size();
ASSERT_THAT(v, ElementsAreArray(reference))
<< DUMP_VARS(t, num_inserts, domain_size, old_size, v.size());
}
EXPECT_LE(static_cast<double>(num_extra_elements) / num_unique_elements, 0.5);
}
template <bool use_flat_hash_set>
void BM_AppendAndLazilyRemoveDuplicates(benchmark::State& state) {
const int num_inserts = state.range(0);
const int domain_size = state.range(1);
std::vector<int> to_insert(num_inserts);
random_engine_t random;
for (int& x : to_insert) x = absl::Uniform(random, 0, domain_size);
DenseIntDuplicateRemover deduper(domain_size);
std::vector<int> v;
absl::flat_hash_set<int> set;
for (auto _ : state) {
v.clear();
set.clear();
for (int x : to_insert) {
if (use_flat_hash_set) {
set.insert(x);
} else {
deduper.AppendAndLazilyRemoveDuplicates(x, &v);
}
}
if (!use_flat_hash_set) deduper.RemoveDuplicates(&v);
testing::DoNotOptimize(v);
testing::DoNotOptimize(set);
}
state.SetItemsProcessed(state.iterations() * num_inserts);
}
BENCHMARK(BM_AppendAndLazilyRemoveDuplicates<true>)
->ArgPair(1, 10)
->ArgPair(10, 2)
->ArgPair(10, 10)
->ArgPair(100, 100)
->ArgPair(100, 10)
->ArgPair(10'000, 10'000)
->ArgPair(10'000, 1'000)
->ArgPair(10'000, 100)
->ArgPair(10'000, 10)
->ArgPair(1'000'000, 1'000'000)
->ArgPair(1'000'000, 10'000)
->ArgPair(1'000'000, 100);
BENCHMARK(BM_AppendAndLazilyRemoveDuplicates<false>)
->ArgPair(1, 10)
->ArgPair(10, 2)
->ArgPair(10, 10)
->ArgPair(100, 100)
->ArgPair(100, 10)
->ArgPair(10'000, 10'000)
->ArgPair(10'000, 1'000)
->ArgPair(10'000, 100)
->ArgPair(10'000, 10)
->ArgPair(1'000'000, 1'000'000)
->ArgPair(1'000'000, 10'000)
->ArgPair(1'000'000, 100);
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,442 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/dynamic_partition.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <random>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/strings/str_join.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/base/stl_util.h"
namespace operations_research {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::StartsWith;
using ::testing::UnorderedElementsAre;
std::vector<int> GetPart(const DynamicPartition& partition, int i) {
return std::vector<int>(partition.ElementsInPart(i).begin(),
partition.ElementsInPart(i).end());
}
std::vector<std::vector<int>> GetAllParts(const DynamicPartition& partition) {
std::vector<std::vector<int>> parts;
for (int i = 0; i < partition.NumParts(); ++i) {
parts.push_back(GetPart(partition, i));
}
return parts;
}
TEST(DynamicPartitionTest, OrderAgnosticPartitioning) {
DynamicPartition partition(5);
ASSERT_EQ(5, partition.NumElements());
ASSERT_EQ(1, partition.NumParts());
EXPECT_THAT(GetPart(partition, 0), UnorderedElementsAre(0, 1, 2, 3, 4));
partition.Refine({1, 3, 4});
EXPECT_EQ(5, partition.NumElements());
EXPECT_EQ(2, partition.NumParts());
EXPECT_THAT(GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0, 2),
UnorderedElementsAre(1, 3, 4)));
partition.Refine({0, 3});
EXPECT_THAT(
GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0), UnorderedElementsAre(1, 4),
UnorderedElementsAre(2), UnorderedElementsAre(3)));
// Corner case: no-op Refine(), on both a singleton and a non-singleton part.
partition.Refine({0, 1, 4});
EXPECT_THAT(
GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0), UnorderedElementsAre(1, 4),
UnorderedElementsAre(2), UnorderedElementsAre(3)));
// Corner case: Refine a singleton. Incidentally, our partition becomes
// fully-split (only singletons), which is also another interesting corner
// case.
partition.Refine({4});
EXPECT_THAT(
GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0), UnorderedElementsAre(1),
UnorderedElementsAre(2), UnorderedElementsAre(3),
UnorderedElementsAre(4)));
// Roll back the last 3 parts.
partition.UndoRefineUntilNumPartsEqual(2);
EXPECT_THAT(GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0, 2),
UnorderedElementsAre(1, 3, 4)));
// No-op rollback.
partition.UndoRefineUntilNumPartsEqual(2);
// Re-apply some refinement.
partition.Refine({4});
EXPECT_THAT(GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0, 2),
UnorderedElementsAre(1, 3),
UnorderedElementsAre(4)));
// Roll back until the start.
partition.UndoRefineUntilNumPartsEqual(1);
EXPECT_THAT(GetAllParts(partition),
UnorderedElementsAre(UnorderedElementsAre(0, 1, 2, 3, 4)));
}
TEST(DynamicPartitionTest, PartOrdering) {
DynamicPartition partition(9);
partition.Refine({4, 1, 3});
ASSERT_THAT(GetAllParts(partition),
ElementsAre(UnorderedElementsAre(0, 2, 5, 6, 7, 8),
UnorderedElementsAre(1, 3, 4)));
partition.Refine({0, 6, 3, 5});
ASSERT_THAT(
GetAllParts(partition),
ElementsAre(UnorderedElementsAre(2, 7, 8), UnorderedElementsAre(1, 4),
UnorderedElementsAre(0, 5, 6), UnorderedElementsAre(3)));
partition.Refine({7, 2, 6, 1});
ASSERT_THAT(GetAllParts(partition),
ElementsAre(UnorderedElementsAre(8), UnorderedElementsAre(4),
UnorderedElementsAre(0, 5), UnorderedElementsAre(3),
UnorderedElementsAre(2, 7), UnorderedElementsAre(1),
UnorderedElementsAre(6)));
partition.Refine({3, 7, 1, 0});
ASSERT_THAT(GetAllParts(partition),
ElementsAre(UnorderedElementsAre(8), UnorderedElementsAre(4),
UnorderedElementsAre(5), UnorderedElementsAre(3),
UnorderedElementsAre(2), UnorderedElementsAre(1),
UnorderedElementsAre(6), UnorderedElementsAre(0),
UnorderedElementsAre(7)));
}
TEST(DynamicPartitionTest, Accessors) {
DynamicPartition partition(7);
partition.Refine({2, 1, 5, 0});
partition.Refine({2, 4, 3, 6});
ASSERT_THAT(GetAllParts(partition), ElementsAre(UnorderedElementsAre(3, 4, 6),
UnorderedElementsAre(0, 1, 5),
UnorderedElementsAre(2)));
// Test DebugString(SORT_LEXICOGRAPHICALLY).
EXPECT_EQ("0 1 5 | 2 | 3 4 6",
partition.DebugString(DynamicPartition::SORT_LEXICOGRAPHICALLY));
// Test DebugString(SORT_BY_PART).
EXPECT_EQ("3 4 6 | 0 1 5 | 2",
partition.DebugString(DynamicPartition::SORT_BY_PART));
// Test PartOf().
EXPECT_EQ(1, partition.PartOf(0));
EXPECT_EQ(1, partition.PartOf(1));
EXPECT_EQ(2, partition.PartOf(2));
EXPECT_EQ(0, partition.PartOf(3));
EXPECT_EQ(0, partition.PartOf(4));
EXPECT_EQ(1, partition.PartOf(5));
EXPECT_EQ(0, partition.PartOf(6));
// Test SizeOfPart().
EXPECT_EQ(3, partition.SizeOfPart(0));
EXPECT_EQ(3, partition.SizeOfPart(1));
EXPECT_EQ(1, partition.SizeOfPart(2));
// Test ParentOfPart().
EXPECT_EQ(0, partition.ParentOfPart(0));
EXPECT_EQ(0, partition.ParentOfPart(1));
EXPECT_EQ(1, partition.ParentOfPart(2));
}
TEST(DynamicPartitionTest, ConstructWithEmptyPartition) {
DynamicPartition partition(std::vector<int>(0));
EXPECT_EQ("", partition.DebugString(DynamicPartition::SORT_BY_PART));
}
TEST(DynamicPartitionTest, ConstructWithPartition) {
DynamicPartition partition({2, 1, 0, 1, 0, 3, 0});
EXPECT_EQ("0 | 1 3 | 2 4 6 | 5",
partition.DebugString(DynamicPartition::SORT_LEXICOGRAPHICALLY));
EXPECT_EQ("2 4 6 | 1 3 | 0 | 5",
partition.DebugString(DynamicPartition::SORT_BY_PART));
}
TEST(DynamicPartitionTest, DebugStringWithUnknownSorting) {
DynamicPartition partition(4);
EXPECT_THAT(partition.DebugString(
static_cast<DynamicPartition::DebugStringSorting>(987)),
HasSubstr("Unsupported sorting"));
}
TEST(DynamicPartitionTest, FingerprintBasic) {
DynamicPartition p1(10);
DynamicPartition p2(/*initial_part_of_element=*/{2, 0, 1, 0, 1, 3});
p1.Refine({2, 4, 7});
p1.Refine({0});
p1.Refine({5, 7});
p1.Refine({6, 8, 9});
// We have to rely on all the other methods working as expected: if any of
// the other unit tests failed, then this one probably will, too.
ASSERT_EQ("1 3 | 2 4 | 0 | 5",
p2.DebugString(DynamicPartition::SORT_BY_PART));
ASSERT_THAT(p1.DebugString(DynamicPartition::SORT_BY_PART),
StartsWith("1 3 | 2 4 | 0 | 5 |"));
for (int p = 0; p < 3; ++p) {
EXPECT_EQ(p1.FprintOfPart(p), p2.FprintOfPart(p));
}
for (int p = 0; p < p1.NumParts(); ++p) {
for (int q = 0; q < p; ++q) {
EXPECT_NE(p1.FprintOfPart(p), p1.FprintOfPart(q)) << "Collision!";
}
}
}
TEST(DynamicPartitionTest, FingerprintDoesNotDependOnElementOrderNorPartIndex) {
DynamicPartition p1(3);
DynamicPartition p2(3);
p1.Refine({0});
p2.Refine({2, 1});
ASSERT_THAT(GetPart(p1, 0), ElementsAre(2, 1));
ASSERT_THAT(GetPart(p2, 1), ElementsAre(1, 2));
EXPECT_EQ(p1.FprintOfPart(0), p2.FprintOfPart(1));
}
void ShufflePartition(int num_operations, int max_num_parts_at_the_end,
absl::BitGenRef random, DynamicPartition* partition) {
const int n = partition->NumElements();
std::vector<int> elements_to_refine_on;
for (int i = 0; i < num_operations; ++i) {
if (absl::Bernoulli(random, 1.0 / 2)) {
// Refine on a random set of elements.
elements_to_refine_on.clear();
const int num_elements_to_refine_on = absl::Uniform(random, 0, n);
for (int j = 0; j < num_elements_to_refine_on; ++j) {
elements_to_refine_on.push_back(absl::Uniform(random, 0, n));
}
gtl::STLSortAndRemoveDuplicates(&elements_to_refine_on);
partition->Refine(elements_to_refine_on);
} else {
// Undo some refines.
partition->UndoRefineUntilNumPartsEqual(
absl::Uniform(random, 0, partition->NumParts()) + 1);
}
}
// We're done shuffling. If there are too many parts; un-refine some of them.
if (partition->NumParts() > max_num_parts_at_the_end) {
partition->UndoRefineUntilNumPartsEqual(max_num_parts_at_the_end);
}
}
TEST(DynamicPartitionTest, FingerprintStressTest) {
// Stress test: create 1000 'random' partitions of {0..9} in up to 3 parts.
// Since there are 2^10 = 1024 possible subsets of 10 elements; some of
// the 3000 created parts should coincide; actually we expect about
// (3000 * 2999) / 2 collisions.
// The size are just indicative (in opt mode, we stress it a bit more).
//
// Timing as of 2014-04-30, on forge: fastbuild=7.5s, opt=22s.
const int kNumPartitions = DEBUG_MODE ? 1000 : 4000;
const int kPartitionSize = DEBUG_MODE ? 10 : 12;
const int kMaxNumParts = 3;
std::mt19937 random(12345);
std::vector<std::unique_ptr<DynamicPartition>> partitions(kNumPartitions);
for (int i = 0; i < kNumPartitions; ++i) {
partitions[i] = std::make_unique<DynamicPartition>(kPartitionSize);
ShufflePartition(/*num_operations=*/20, kMaxNumParts, random,
partitions[i].get());
}
// Do a pairwise comparison of all part fingerprints, and verify that all
// collisions that we obtain are legit (i.e. the parts are actually equal).
int num_collisions = 0;
for (int p1 = 0; p1 < kNumPartitions; ++p1) {
for (int i1 = 0; i1 < partitions[p1]->NumParts(); ++i1) {
const uint64_t fprint1 = partitions[p1]->FprintOfPart(i1);
std::vector<int> part1;
for (const int i : partitions[p1]->ElementsInPart(i1)) part1.push_back(i);
std::sort(part1.begin(), part1.end());
for (int p2 = 0; p2 < p1; ++p2) {
for (int i2 = 0; i2 < partitions[p2]->NumParts(); ++i2) {
if (partitions[p2]->FprintOfPart(i2) == fprint1) {
++num_collisions;
std::vector<int> part2;
for (const int i : partitions[p2]->ElementsInPart(i2)) {
part2.push_back(i);
}
std::sort(part2.begin(), part2.end());
ASSERT_THAT(part2, ElementsAreArray(part1))
<< "Unexpected collision! Fingerprint=" << fprint1;
}
}
}
}
}
// Verify that we had roughly the expected number of collisions.
// (if we never had zero collisions, or if we always had collisions, then
// this test wouldn't be as "stressful" as it should be).
EXPECT_LE(num_collisions, kNumPartitions * kNumPartitions / 4);
EXPECT_GE(num_collisions,
kNumPartitions * kNumPartitions / (1 << kPartitionSize));
EXPECT_GE(kNumPartitions * kNumPartitions / (1 << kPartitionSize), 100);
}
TEST(DynamicPartitionTest, ElementsInHierarchicalOrder) {
DynamicPartition partition(/*num_elements=*/5);
partition.Refine({4, 3}); // Now:(0 1 2 | 3 4)
partition.Refine({1, 4}); // Now: ((0 2 | 1) | (3 | 4))
partition.Refine({0}); // Now: (((2 | 0) | 1) | (3 | 4))
// The parts are sorted differently than the natural order.
ASSERT_EQ("2 | 3 | 1 | 4 | 0",
partition.DebugString(DynamicPartition::SORT_BY_PART));
EXPECT_THAT(partition.ElementsInHierarchicalOrder(),
ElementsAre(2, 0, 1, 3, 4));
partition.UndoRefineUntilNumPartsEqual(1);
EXPECT_THAT(partition.ElementsInHierarchicalOrder(),
ElementsAre(2, 0, 1, 3, 4));
}
TEST(MergingPartitionTest, Empty) {
MergingPartition partition;
EXPECT_EQ("", partition.DebugString());
std::vector<int> node_equivalence_classes = {345, 234, -123, 45}; // Junk.
int num_classes = partition.FillEquivalenceClasses(&node_equivalence_classes);
EXPECT_THAT(node_equivalence_classes, IsEmpty());
EXPECT_EQ(0, num_classes);
}
TEST(MergingPartitionTest, Reset) {
MergingPartition partition(4);
partition.MergePartsOf(2, 3);
partition.MergePartsOf(1, 0);
partition.Reset(3);
EXPECT_EQ("0 | 1 | 2", partition.DebugString());
}
TEST(MergingPartitionTest, EndToEnd) {
MergingPartition partition(10);
EXPECT_EQ(4, partition.MergePartsOf(3, 4));
EXPECT_EQ(5, partition.MergePartsOf(3, 5));
EXPECT_EQ(6, partition.MergePartsOf(3, 6));
EXPECT_EQ(-1, partition.MergePartsOf(5, 3)); // Redundant.
EXPECT_EQ(8, partition.MergePartsOf(2, 8));
EXPECT_EQ(1, partition.MergePartsOf(2, 1));
EXPECT_EQ(9, partition.MergePartsOf(9, 7));
EXPECT_EQ(2, partition.MergePartsOf(1, 4));
EXPECT_EQ("0 | 1 2 3 4 5 6 8 | 7 9", partition.DebugString());
// Test GetRoot() without path compression.
EXPECT_EQ(0, partition.GetRoot(0));
EXPECT_EQ(3, partition.GetRoot(1));
EXPECT_EQ(3, partition.GetRoot(2));
EXPECT_EQ(3, partition.GetRoot(2));
EXPECT_EQ(3, partition.GetRoot(3));
EXPECT_EQ(3, partition.GetRoot(4));
EXPECT_EQ(3, partition.GetRoot(5));
EXPECT_EQ(3, partition.GetRoot(6));
EXPECT_EQ(7, partition.GetRoot(7));
EXPECT_EQ(3, partition.GetRoot(8));
// Test GetRoot() with path compression.
EXPECT_EQ(0, partition.GetRootAndCompressPath(0));
EXPECT_EQ(3, partition.GetRootAndCompressPath(1));
EXPECT_EQ(3, partition.GetRootAndCompressPath(2));
EXPECT_EQ(3, partition.GetRootAndCompressPath(2));
EXPECT_EQ(3, partition.GetRootAndCompressPath(3));
EXPECT_EQ(3, partition.GetRootAndCompressPath(4));
EXPECT_EQ(3, partition.GetRootAndCompressPath(5));
EXPECT_EQ(3, partition.GetRootAndCompressPath(6));
EXPECT_EQ(7, partition.GetRootAndCompressPath(7));
EXPECT_EQ(3, partition.GetRootAndCompressPath(8));
EXPECT_EQ(7, partition.GetRootAndCompressPath(9));
std::vector<int> node_equivalence_classes = {345, 234, -123, 45}; // Junk.
int num_classes = partition.FillEquivalenceClasses(&node_equivalence_classes);
EXPECT_THAT(node_equivalence_classes,
ElementsAre(0, 1, 1, 1, 1, 1, 1, 2, 1, 2));
EXPECT_EQ(3, num_classes);
std::vector<int> nodes = {0, 7, 2, 9, 4, 6, 8};
partition.KeepOnlyOneNodePerPart(&nodes);
EXPECT_THAT(nodes, ElementsAre(0, 7, 2));
EXPECT_EQ(1, partition.NumNodesInSamePartAs(0));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(1));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(2));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(3));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(4));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(5));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(6));
EXPECT_EQ(2, partition.NumNodesInSamePartAs(7));
EXPECT_EQ(7, partition.NumNodesInSamePartAs(8));
EXPECT_EQ(2, partition.NumNodesInSamePartAs(9));
for (int i = 1; i <= 9; ++i) partition.ResetNode(i);
EXPECT_EQ("0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9", partition.DebugString());
num_classes = partition.FillEquivalenceClasses(&node_equivalence_classes);
EXPECT_THAT(node_equivalence_classes,
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9));
EXPECT_EQ(10, num_classes);
}
TEST(SimpleDynamicPartitionTest, EmptyCase) {
SimpleDynamicPartition partition(0);
EXPECT_EQ(partition.NumElements(), 0);
EXPECT_EQ(partition.NumParts(), 0);
// Do not crash.
partition.Refine({});
std::vector<int> buffer;
EXPECT_TRUE(partition.GetParts(&buffer).empty());
}
TEST(SimpleDynamicPartitionTest, BasicTest) {
SimpleDynamicPartition partition(7);
partition.Refine({2, 1, 5, 0});
partition.Refine({2, 4, 3, 6});
std::vector<int> buffer;
const std::vector<absl::Span<const int>> r = partition.GetParts(&buffer);
ASSERT_THAT(r, ElementsAre(ElementsAre(3, 4, 6), ElementsAre(0, 1, 5),
ElementsAre(2)));
// Test PartOf().
EXPECT_EQ(1, partition.PartOf(0));
EXPECT_EQ(1, partition.PartOf(1));
EXPECT_EQ(2, partition.PartOf(2));
EXPECT_EQ(0, partition.PartOf(3));
EXPECT_EQ(0, partition.PartOf(4));
EXPECT_EQ(1, partition.PartOf(5));
EXPECT_EQ(0, partition.PartOf(6));
// Test SizeOfPart().
EXPECT_EQ(3, partition.SizeOfPart(0));
EXPECT_EQ(3, partition.SizeOfPart(1));
EXPECT_EQ(1, partition.SizeOfPart(2));
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,78 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/dynamic_permutation.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace operations_research {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
TEST(DynamicPermutationTest, EndToEnd) {
DynamicPermutation perm(10);
EXPECT_EQ("", perm.DebugString());
// Incrementally enter the following permutation:
// 5->3->6(->5); 1->2(->1); 8->9->7(->8).
perm.AddMappings({3, 5}, {6, 3});
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(6));
perm.AddMappings({1, 0}, {2, 0});
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(2, 6));
perm.AddMappings({6}, {5});
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(2));
// Temporarily add two mappings and undo them.
perm.AddMappings({2, 4, 7}, {4, 9, 8});
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(9, 8));
perm.AddMappings({8}, {7});
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(9));
std::vector<int> last_mapping_src;
perm.UndoLastMappings(&last_mapping_src);
EXPECT_THAT(last_mapping_src, ElementsAre(8));
perm.UndoLastMappings(&last_mapping_src);
EXPECT_THAT(last_mapping_src, ElementsAre(2, 4, 7));
EXPECT_THAT(perm.LooseEnds(), UnorderedElementsAre(2));
// Finish entering the permutation described above.
perm.AddMappings({2, 8, 7}, {1, 9, 8});
perm.AddMappings({9}, {7});
EXPECT_EQ("(1 2) (3 6 5) (7 8 9)", perm.DebugString());
EXPECT_THAT(perm.AllMappingsSrc(), ElementsAre(3, 5, 1, 0, 6, 2, 8, 7, 9));
EXPECT_THAT(perm.LooseEnds(), IsEmpty());
perm.Reset();
EXPECT_EQ("", perm.DebugString());
EXPECT_THAT(perm.AllMappingsSrc(), IsEmpty());
EXPECT_THAT(perm.LooseEnds(), IsEmpty());
perm.UndoLastMappings(&last_mapping_src);
EXPECT_THAT(last_mapping_src, IsEmpty());
EXPECT_EQ("", perm.DebugString());
}
// TODO(user): better, more focused and modular tests that cover all the logic
// and the corner cases.
// TODO(user): verify the average complexity claim of RootOf().
// TODO(user): Add debug-only death tests that verify that any misuse of the
// API triggers a DCHECK fail.
} // namespace
} // namespace operations_research

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,341 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/knapsack_solver_for_cuts.h"
#include <limits>
#include <memory>
#include <vector>
#include "gtest/gtest.h"
namespace operations_research {
namespace {
const int kInvalidSolution = -1;
bool IsSolutionValid(const std::vector<double>& profits,
const std::vector<double>& weights, const double capacity,
const std::vector<bool>& best_solution,
double optimal_profit) {
double remaining_capacity = capacity;
double profit = 0;
const int number_of_items(profits.size());
for (int item_id(0); item_id < number_of_items; ++item_id) {
if (best_solution.at(item_id)) {
profit += profits[item_id];
remaining_capacity -= weights[item_id];
}
}
if (remaining_capacity < 0) {
return false;
}
return profit == optimal_profit;
}
double SolveKnapsackProblem(KnapsackSolverForCuts* solver) {
bool is_solution_optimal = false;
auto time_limit =
std::make_unique<TimeLimit>(std::numeric_limits<double>::infinity());
return solver->Solve(time_limit.get(), &is_solution_optimal);
}
TEST(KnapsackSearchNodeForCutsTest, Depth) {
KnapsackAssignmentForCuts assignement(0, false);
KnapsackSearchNodeForCuts root(nullptr, assignement);
EXPECT_EQ(0, root.depth());
KnapsackSearchNodeForCuts node_0(&root, assignement);
EXPECT_EQ(1, node_0.depth());
KnapsackSearchNodeForCuts node_00(&node_0, assignement);
EXPECT_EQ(2, node_00.depth());
}
TEST(KnapsackSearchPathTest, MoveUpToDepth) {
KnapsackAssignmentForCuts assignement(0, false);
KnapsackSearchNodeForCuts root(nullptr, assignement);
KnapsackSearchNodeForCuts node_0(&root, assignement);
KnapsackSearchPathForCuts from_root_to_0(&root, &node_0);
const KnapsackSearchNodeForCuts* root_ptr = MoveUpToDepth(&node_0, 0);
EXPECT_EQ(&root, root_ptr);
}
TEST(KnapsackSearchPathTest, InitAndMoveUpToDepth) {
KnapsackAssignmentForCuts assignement(0, false);
KnapsackSearchNodeForCuts root(nullptr, assignement);
KnapsackSearchNodeForCuts node_0(&root, assignement);
KnapsackSearchNodeForCuts node_00(&node_0, assignement);
KnapsackSearchNodeForCuts node_01(&node_0, assignement);
KnapsackSearchNodeForCuts node_001(&node_00, assignement);
KnapsackSearchNodeForCuts node_010(&node_01, assignement);
KnapsackSearchNodeForCuts node_0101(&node_010, assignement);
KnapsackSearchNodeForCuts node_01011(&node_0101, assignement);
KnapsackSearchPathForCuts from_01011_to_001(&node_01011, &node_001);
const KnapsackSearchNodeForCuts* node_01_ptr = MoveUpToDepth(&node_01011, 2);
EXPECT_EQ(&node_01, node_01_ptr);
from_01011_to_001.Init();
EXPECT_EQ(&node_0, &from_01011_to_001.via());
KnapsackSearchPathForCuts from_001_to_01011(&node_001, &node_01011);
from_001_to_01011.Init();
EXPECT_EQ(&from_01011_to_001.via(), &from_001_to_01011.via());
}
TEST(KnapsackItemForCutsTest, GetEfficiency) {
const int kId(7);
const double kWeight = 52;
const double kProfit = 130;
const double kEfficiency = 2.5;
const double kProfitMax = 1000;
const double kNullWeight = 0;
const KnapsackItemForCuts item(kId, kWeight, kProfit);
EXPECT_EQ(kId, item.id);
EXPECT_EQ(kWeight, item.weight);
EXPECT_EQ(kProfit, item.profit);
EXPECT_EQ(kEfficiency, item.GetEfficiency(kProfitMax));
const KnapsackItemForCuts item2(kId, kNullWeight, kProfit);
EXPECT_EQ(kProfitMax, item2.GetEfficiency(kProfitMax));
}
TEST(KnapsackStateForCutsTest, Init) {
const int kNumberOfItems(12);
KnapsackStateForCuts state;
state.Init(kNumberOfItems);
for (int i(0); i < kNumberOfItems; ++i) {
EXPECT_FALSE(state.is_bound(i));
}
EXPECT_EQ(kNumberOfItems, state.GetNumberOfItems());
}
TEST(KnapsackStateForCutsTest, UpdateState) {
const int kNumberOfItems(12);
KnapsackStateForCuts state;
state.Init(kNumberOfItems);
const int item_id(7);
bool is_in = true;
KnapsackAssignmentForCuts assignment1(item_id, is_in);
bool no_fail = state.UpdateState(false, assignment1);
for (int i(0); i < kNumberOfItems; ++i) {
EXPECT_EQ(i == item_id, state.is_bound(i));
}
EXPECT_EQ(is_in, state.is_in(item_id));
EXPECT_TRUE(no_fail);
is_in = false;
KnapsackAssignmentForCuts assignment2(item_id, is_in);
no_fail = state.UpdateState(false, assignment2);
EXPECT_TRUE(state.is_bound(item_id));
EXPECT_FALSE(no_fail);
no_fail = state.UpdateState(true, assignment2);
EXPECT_FALSE(state.is_bound(item_id));
EXPECT_TRUE(no_fail);
}
TEST(KnapsackPropagatorForCutsTest, InitAndUpdatePropagator) {
const std::vector<double> profits = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const std::vector<double> weights = {1, 1, 1, 1, 1, 1, 1, 1, 1};
ASSERT_EQ(profits.size(), weights.size());
const int kNumItems(profits.size());
const int kNoSelection(-1);
KnapsackStateForCuts state;
state.Init(kNumItems);
KnapsackPropagatorForCuts capacity_propagator(&state);
capacity_propagator.Init(profits, weights, 2);
EXPECT_EQ(kNoSelection, capacity_propagator.GetNextItemId());
KnapsackAssignmentForCuts assignment1(3, true);
EXPECT_TRUE(state.UpdateState(false, assignment1));
EXPECT_TRUE(capacity_propagator.Update(false, assignment1));
EXPECT_EQ(4, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(7, capacity_propagator.GetNextItemId());
const double kProfit13 = profits[3] + profits[8];
EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound());
KnapsackAssignmentForCuts assignment2(8, true);
EXPECT_TRUE(state.UpdateState(false, assignment2));
EXPECT_TRUE(capacity_propagator.Update(false, assignment2));
EXPECT_EQ(kProfit13, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(7, capacity_propagator.GetNextItemId());
EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound());
KnapsackAssignmentForCuts assignment3(5, true);
EXPECT_TRUE(state.UpdateState(false, assignment3));
EXPECT_FALSE(capacity_propagator.Update(false, assignment3));
const double kProfit19 = profits[3] + profits[8] + profits[5];
EXPECT_EQ(kProfit19, capacity_propagator.current_profit());
EXPECT_TRUE(state.UpdateState(true, assignment2));
EXPECT_TRUE(capacity_propagator.Update(true, assignment2));
const double kProfit10 = profits[3] + profits[5];
EXPECT_EQ(kProfit10, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(8, capacity_propagator.GetNextItemId());
EXPECT_EQ(kProfit10, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit10, capacity_propagator.profit_upper_bound());
}
TEST(KnapsackSolverForCutsTest, SolveOneDimension) {
const std::vector<double> profits = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const std::vector<double> weights = {1, 2, 3, 4, 5, 6, 7, 8, 9};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 34;
const double kOptimalProfit = 34;
KnapsackSolverForCuts solver("solver");
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverForCutsTest, SolveOneDimensionInfeasible) {
const std::vector<double> profits = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const std::vector<double> weights = {1, 2, 3, 4, 5, 6, 7, 8, 9};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = -1;
KnapsackSolverForCuts solver("solver");
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
const int number_of_items(profits.size());
std::vector<bool> best_solution(number_of_items, false);
for (int item_id(0); item_id < number_of_items; ++item_id) {
best_solution.at(item_id) = solver.best_solution(item_id);
}
EXPECT_FALSE(
IsSolutionValid(profits, weights, kCapacity, best_solution, profit));
}
TEST(KnapsackSolverForCutsTest, MultipleSolves) {
KnapsackSolverForCuts solver("solver");
{
const std::vector<double> profits = {1, 2, 3};
const std::vector<double> weights = {4, 5, 6};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 10;
const double kOptimalProfit = 4;
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
{
const std::vector<double> profits = {1, 2, 3, 7};
const std::vector<double> weights = {4, 5, 6, 8};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 10;
const double kOptimalProfit = 7;
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
{
const std::vector<double> profits = {1, 2};
const std::vector<double> weights = {4, 5};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 10;
const double kOptimalProfit = 3;
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
}
TEST(KnapsackSolverForCutsTest, SolveBigOneDimension) {
const std::vector<double> profits = {
360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600,
38, 48, 147, 78, 256, 63, 17, 120, 164, 432, 35, 92, 110,
22, 42, 50, 323, 514, 28, 87, 73, 78, 15, 26, 78, 210,
36, 85, 189, 274, 43, 33, 10, 19, 389, 276, 312};
const std::vector<double> weights = {
7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15,
42, 9, 0, 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56,
7, 29, 93, 44, 71, 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 850;
const double kOptimalProfit = 7534;
KnapsackSolverForCuts solver("solver");
{
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
{
// Solve with lower bound threshold.
solver.Init(profits, weights, kCapacity);
solver.set_solution_lower_bound_threshold(100);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_GT(kOptimalProfit, profit);
}
{
// Solve with upper bound threshold.
solver.Init(profits, weights, kCapacity);
solver.set_solution_upper_bound_threshold(10000);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_GT(kOptimalProfit, profit);
}
{
solver.Init(profits, weights, kCapacity);
solver.set_node_limit(1);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_GT(kOptimalProfit, profit);
}
}
TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractionalProfits) {
const std::vector<double> profits = {0, 0.5, 0.4, 1, 1, 1.1};
const std::vector<double> weights = {9, 6, 2, 1, 1, 1};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 4;
const double kOptimalProfit = 3.1;
KnapsackSolverForCuts solver("solver");
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractionalWeights) {
const std::vector<double> profits = {0, 1, 1, 1, 1, 2};
const std::vector<double> weights = {9, 6, 2, 1.5, 1.5, 1.5};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 4;
const double kOptimalProfit = 3;
KnapsackSolverForCuts solver("solver");
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverForCutsTest, SolveOneDimensionFractional) {
const std::vector<double> profits = {0, 0.5, 0.4, 1, 1, 1.1};
const std::vector<double> weights = {9, 6, 2, 1.5, 1.5, 1.5};
ASSERT_EQ(profits.size(), weights.size());
const double kCapacity = 4;
const double kOptimalProfit = 2.1;
KnapsackSolverForCuts solver("solver");
solver.Init(profits, weights, kCapacity);
const double profit = SolveKnapsackProblem(&solver);
EXPECT_EQ(kOptimalProfit, profit);
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,618 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/knapsack_solver.h"
#include <cstdint>
#include <limits>
#include <vector>
#include "gtest/gtest.h"
#include "ortools/base/macros.h"
namespace operations_research {
namespace {
const int kInvalidSolution = -1;
bool IsSolutionValid(const std::vector<int64_t>& profits,
const std::vector<std::vector<int64_t> >& weights,
const std::vector<int64_t>& capacities,
const std::vector<bool>& best_solution,
int64_t optimal_profit) {
std::vector<int64_t> remaining_capacities = capacities;
int64_t profit = 0;
for (int i = 0; i < profits.size(); ++i) {
if (best_solution.at(i)) {
profit += profits[i];
for (int j = 0; j < capacities.size(); ++j) {
remaining_capacities[j] -= (weights[j])[i];
}
}
}
for (int j = 0; j < capacities.size(); ++j) {
if (remaining_capacities[j] < 0) {
return false;
}
}
return profit == optimal_profit;
}
int64_t SolveKnapsackProblemUsingSpecificSolverAndReduction(
const int64_t* profit_array, int number_of_items,
const int64_t* weight_array, const int64_t* capacity_array,
int number_of_dimensions, KnapsackSolver::SolverType type,
bool use_reduction,
double time_limit = std::numeric_limits<double>::infinity()) {
std::vector<int64_t> profits(profit_array, profit_array + number_of_items);
std::vector<int64_t> capacities(capacity_array,
capacity_array + number_of_dimensions);
std::vector<std::vector<int64_t> > weights;
for (int i = 0; i < number_of_dimensions; ++i) {
const int64_t* one_dimension = weight_array + number_of_items * i;
std::vector<int64_t> weights_one_dimension(one_dimension,
one_dimension + number_of_items);
weights.push_back(weights_one_dimension);
}
KnapsackSolver solver(type, "solver");
solver.set_use_reduction(use_reduction);
solver.set_time_limit(time_limit);
solver.Init(profits, weights, capacities);
int64_t profit = solver.Solve();
std::vector<bool> best_solution(number_of_items, false);
for (int item_id = 0; item_id < number_of_items; ++item_id) {
best_solution.at(item_id) = solver.BestSolutionContains(item_id);
}
return (IsSolutionValid(profits, weights, capacities, best_solution, profit))
? profit
: kInvalidSolution;
}
int64_t SolveKnapsackProblemUsingSpecificSolver(
const int64_t* profit_array, int number_of_items,
const int64_t* weight_array, const int64_t* capacity_array,
int number_of_dimensions, KnapsackSolver::SolverType type,
double time_limit = std::numeric_limits<double>::infinity()) {
const int64_t result_when_reduction =
SolveKnapsackProblemUsingSpecificSolverAndReduction(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions, type, true, time_limit);
const int64_t result_when_no_reduction =
SolveKnapsackProblemUsingSpecificSolverAndReduction(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions, type, false, time_limit);
return (result_when_reduction == result_when_no_reduction)
? result_when_reduction
: kInvalidSolution;
}
int64_t SolveKnapsackProblem(
const int64_t* profit_array, int number_of_items,
const int64_t* weight_array, const int64_t* capacity_array,
int number_of_dimensions,
double time_limit = std::numeric_limits<double>::infinity()) {
const int kMaxNumberOfItemsForBruteForceSolver = 15;
const int kMaxNumberOfItemsForDivideAndConquerSolver = 32;
const int kMaxNumberOfItemsFor64ItemsSolver = 64;
// This test is ran as "size = 'small'". To be fast enough, the dynamic
// programming solver should be limited to instances with capacities smaller
// than 10^6.
const int64_t kMaxCapacityForDynamicProgrammingSolver = 1000000;
const int64_t generic_profit = SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions,
KnapsackSolver::KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,
time_limit);
if (generic_profit == kInvalidSolution) {
return kInvalidSolution;
}
const int64_t scip_profit = SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions,
KnapsackSolver::KNAPSACK_MULTIDIMENSION_SCIP_MIP_SOLVER);
if (scip_profit != generic_profit) {
return kInvalidSolution;
}
if (number_of_dimensions > 1) {
return generic_profit;
}
if (number_of_items <= kMaxNumberOfItemsForBruteForceSolver) {
const int64_t brute_force_profit = SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions, KnapsackSolver::KNAPSACK_BRUTE_FORCE_SOLVER);
if (brute_force_profit != generic_profit) {
return kInvalidSolution;
}
}
if (number_of_items <= kMaxNumberOfItemsFor64ItemsSolver) {
const int64_t items64_profit = SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions, KnapsackSolver::KNAPSACK_64ITEMS_SOLVER);
if (items64_profit != generic_profit) {
return kInvalidSolution;
}
}
if (capacity_array[0] <= kMaxCapacityForDynamicProgrammingSolver) {
const int64_t dynamic_programming_profit =
SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions,
KnapsackSolver::KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER);
if (dynamic_programming_profit != generic_profit) {
return kInvalidSolution;
}
}
if (number_of_items <= kMaxNumberOfItemsForDivideAndConquerSolver) {
const int64_t divide_and_conquer_profit =
SolveKnapsackProblemUsingSpecificSolver(
profit_array, number_of_items, weight_array, capacity_array,
number_of_dimensions,
KnapsackSolver::KNAPSACK_DIVIDE_AND_CONQUER_SOLVER);
if (divide_and_conquer_profit != generic_profit) {
return kInvalidSolution;
}
}
return generic_profit;
}
TEST(KnapsackItemTest, GetEfficiency) {
const int kId = 7;
const int64_t kWeight = 52;
const int64_t kProfit = 130;
const double kEfficiency = 2.5;
const int64_t kProfitMax = 1000;
const int kNullWeight = 0;
const KnapsackItem item(kId, kWeight, kProfit);
EXPECT_EQ(kId, item.id);
EXPECT_EQ(kWeight, item.weight);
EXPECT_EQ(kProfit, item.profit);
EXPECT_EQ(kEfficiency, item.GetEfficiency(kProfitMax));
const KnapsackItem item2(kId, kNullWeight, kProfit);
EXPECT_EQ(kProfitMax, item2.GetEfficiency(kProfitMax));
}
TEST(KnapsackSearchNodeTest, Depth) {
KnapsackAssignment assignment(0, false);
KnapsackSearchNode root(nullptr, assignment);
EXPECT_EQ(0, root.depth());
KnapsackSearchNode node_0(&root, assignment);
EXPECT_EQ(1, node_0.depth());
KnapsackSearchNode node_00(&node_0, assignment);
EXPECT_EQ(2, node_00.depth());
}
TEST(KnapsackSearchPathTest, MoveUpToDepth) {
KnapsackAssignment assignment(0, false);
KnapsackSearchNode root(nullptr, assignment);
KnapsackSearchNode node_0(&root, assignment);
KnapsackSearchPath from_root_to_0(root, node_0);
const KnapsackSearchNode* root_ptr = from_root_to_0.MoveUpToDepth(node_0, 0);
EXPECT_EQ(&root, root_ptr);
}
TEST(KnapsackSearchPathTest, InitAndMoveUpToDepth) {
KnapsackAssignment assignment(0, false);
KnapsackSearchNode root(nullptr, assignment);
KnapsackSearchNode node_0(&root, assignment);
KnapsackSearchNode node_00(&node_0, assignment);
KnapsackSearchNode node_01(&node_0, assignment);
KnapsackSearchNode node_001(&node_00, assignment);
KnapsackSearchNode node_010(&node_01, assignment);
KnapsackSearchNode node_0101(&node_010, assignment);
KnapsackSearchNode node_01011(&node_0101, assignment);
KnapsackSearchPath from_01011_to_001(node_01011, node_001);
const KnapsackSearchNode* node_01_ptr =
from_01011_to_001.MoveUpToDepth(node_01011, 2);
EXPECT_EQ(&node_01, node_01_ptr);
from_01011_to_001.Init();
EXPECT_EQ(&node_0, &from_01011_to_001.via());
KnapsackSearchPath from_001_to_01011(node_001, node_01011);
from_001_to_01011.Init();
EXPECT_EQ(&from_01011_to_001.via(), &from_001_to_01011.via());
}
TEST(KnapsackStateTest, Init) {
const int kNumberOfItems = 12;
KnapsackState state;
state.Init(kNumberOfItems);
for (int i = 0; i < kNumberOfItems; ++i) {
EXPECT_FALSE(state.is_bound(i));
}
EXPECT_EQ(kNumberOfItems, state.GetNumberOfItems());
}
TEST(KnapsackStateTest, UpdateState) {
const int kNumberOfItems = 12;
KnapsackState state;
state.Init(kNumberOfItems);
const int item_id = 7;
bool is_in = true;
KnapsackAssignment assignment1(item_id, is_in);
bool no_fail = state.UpdateState(false, assignment1);
for (int i = 0; i < kNumberOfItems; ++i) {
EXPECT_EQ(i == item_id, state.is_bound(i));
}
EXPECT_EQ(is_in, state.is_in(item_id));
EXPECT_TRUE(no_fail);
is_in = false;
KnapsackAssignment assignment2(item_id, is_in);
no_fail = state.UpdateState(false, assignment2);
EXPECT_TRUE(state.is_bound(item_id));
EXPECT_FALSE(no_fail);
no_fail = state.UpdateState(true, assignment2);
EXPECT_FALSE(state.is_bound(item_id));
EXPECT_TRUE(no_fail);
}
class KnapsackFakePropagator : public KnapsackPropagator {
public:
explicit KnapsackFakePropagator(const KnapsackState& state)
: KnapsackPropagator(state) {}
int GetNextItemId() const override { return 0; }
void ComputeProfitBounds() override {
set_profit_upper_bound(profit_lower_bound());
}
protected:
void InitPropagator() override { set_profit_lower_bound(items().size()); }
bool UpdatePropagator(bool revert,
const KnapsackAssignment& /*assignment*/) override {
set_profit_lower_bound(profit_lower_bound() + ((revert) ? -4 : 4));
return profit_lower_bound() > 0;
}
void CopyCurrentStateToSolutionPropagator(
std::vector<bool>* /*solution*/) const override {}
};
TEST(KnapsackPropagatorTest, InitAndUpdatePropagator) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
KnapsackState state;
state.Init(kArraySize);
std::vector<int64_t> profits(kProfitArray, kProfitArray + kArraySize);
std::vector<int64_t> weights(kWeightArray, kWeightArray + kArraySize);
KnapsackFakePropagator fake_propagator(state);
fake_propagator.Init(profits, weights);
EXPECT_EQ(kArraySize, fake_propagator.profit_lower_bound());
EXPECT_EQ(0, fake_propagator.GetNextItemId());
KnapsackAssignment assignment(3, true);
bool no_fail = fake_propagator.Update(true, assignment);
EXPECT_TRUE(no_fail);
no_fail = fake_propagator.Update(true, assignment);
EXPECT_TRUE(no_fail);
no_fail = fake_propagator.Update(true, assignment);
EXPECT_FALSE(no_fail);
no_fail = fake_propagator.Update(false, assignment);
EXPECT_TRUE(no_fail);
}
TEST(KnapsackCapacityPropagatorTest, InitAndUpdatePropagator) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 1, 1, 1, 1, 1, 1, 1, 1};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNoSelection = -1;
KnapsackState state;
state.Init(kArraySize);
std::vector<int64_t> profits(kProfitArray, kProfitArray + kArraySize);
std::vector<int64_t> weights(kWeightArray, kWeightArray + kArraySize);
KnapsackCapacityPropagator capacity_propagator(state, 2);
capacity_propagator.Init(profits, weights);
EXPECT_EQ(kNoSelection, capacity_propagator.GetNextItemId());
KnapsackAssignment assignment1(3, true);
bool no_fail = state.UpdateState(false, assignment1);
no_fail = capacity_propagator.Update(false, assignment1) && no_fail;
EXPECT_TRUE(no_fail);
EXPECT_EQ(4, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(7, capacity_propagator.GetNextItemId());
const int64_t kProfit13 = kProfitArray[3] + kProfitArray[8];
EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound());
KnapsackAssignment assignment2(8, true);
no_fail = state.UpdateState(false, assignment2);
no_fail = capacity_propagator.Update(false, assignment2) && no_fail;
EXPECT_TRUE(no_fail);
EXPECT_EQ(kProfit13, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(7, capacity_propagator.GetNextItemId());
EXPECT_EQ(kProfit13, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit13, capacity_propagator.profit_upper_bound());
KnapsackAssignment assignment3(5, true);
no_fail = state.UpdateState(false, assignment3);
no_fail = capacity_propagator.Update(false, assignment3) && no_fail;
EXPECT_FALSE(no_fail);
const int64_t kProfit19 = kProfitArray[3] + kProfitArray[8] + kProfitArray[5];
EXPECT_EQ(kProfit19, capacity_propagator.current_profit());
no_fail = state.UpdateState(true, assignment2);
no_fail = capacity_propagator.Update(true, assignment2) && no_fail;
EXPECT_TRUE(no_fail);
const int64_t kProfit10 = kProfitArray[3] + kProfitArray[5];
EXPECT_EQ(kProfit10, capacity_propagator.current_profit());
capacity_propagator.ComputeProfitBounds();
EXPECT_EQ(8, capacity_propagator.GetNextItemId());
EXPECT_EQ(kProfit10, capacity_propagator.profit_lower_bound());
EXPECT_EQ(kProfit10, capacity_propagator.profit_upper_bound());
}
TEST(KnapsackSolverTest, SolveOneDimension) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kCapacityArray[] = {34};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 34;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveOneDimensionReducedToNone) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kCapacityArray[] = {50};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 45;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveOneDimensionWithZeroTimeLimit) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kCapacityArray[] = {34};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kNoProfit = -1;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions, 0.0);
EXPECT_EQ(kNoProfit, profit);
}
TEST(KnapsackSolverTest, SolveTwoDimensions) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 1, 1, 1, 1, 1, 1, 1, 1};
const int64_t kCapacityArray[] = {34, 4};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 30;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveTwoDimensionsReducedToOne) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 1, 1, 1, 1, 1, 1, 1, 1};
const int64_t kCapacityArray[] = {50, 4};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 30;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveTwoDimensionsReducedToNone) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 1, 1, 1, 1, 1, 1, 1, 1};
const int64_t kCapacityArray[] = {50, 10};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 45;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveTwoDimensionsSettingPrimaryPropagator) {
const int64_t kProfitArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
const int64_t kWeightArray[] = {1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 1, 1, 1, 1, 1, 1, 1, 1};
const int64_t kCapacityArray[] = {34, 4};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 30;
std::vector<int64_t> profits(kProfitArray, kProfitArray + kArraySize);
std::vector<int64_t> capacities(kCapacityArray,
kCapacityArray + kNumberOfDimensions);
std::vector<std::vector<int64_t> > weights;
for (int i = 0; i < kNumberOfDimensions; ++i) {
const int64_t* one_dimension = kWeightArray + kArraySize * i;
std::vector<int64_t> weights_one_dimension(one_dimension,
one_dimension + kArraySize);
weights.push_back(weights_one_dimension);
}
KnapsackGenericSolver solver("solver");
solver.Init(profits, weights, capacities);
solver.set_primary_propagator_id(1);
bool is_solution_optimal = false;
operations_research::TimeLimit time_limit(
std::numeric_limits<double>::infinity());
int64_t profit = solver.Solve(&time_limit, &is_solution_optimal);
EXPECT_TRUE(is_solution_optimal);
std::vector<bool> best_solution(kArraySize, false);
for (int item_id = 0; item_id < kArraySize; ++item_id) {
best_solution.at(item_id) = solver.best_solution(item_id);
}
EXPECT_TRUE(
IsSolutionValid(profits, weights, capacities, best_solution, profit));
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveBigOneDimension) {
const int64_t kProfitArray[] = {
360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600,
38, 48, 147, 78, 256, 63, 17, 120, 164, 432, 35, 92, 110,
22, 42, 50, 323, 514, 28, 87, 73, 78, 15, 26, 78, 210,
36, 85, 189, 274, 43, 33, 10, 19, 389, 276, 312};
const int64_t kWeightArray[] = {
7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15,
42, 9, 0, 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56,
7, 29, 93, 44, 71, 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13};
const int64_t kCapacityArray[] = {850};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 7534;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveBigFiveDimensions) {
const int64_t kProfitArray[] = {
360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600,
38, 48, 147, 78, 256, 63, 17, 120, 164, 432, 35, 92, 110,
22, 42, 50, 323, 514, 28, 87, 73, 78, 15, 26, 78, 210,
36, 85, 189, 274, 43, 33, 10, 19, 389, 276, 312};
const int64_t kWeightArray[] = {
7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15, 42,
9, 0, 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56, 7, 29,
93, 44, 71, 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13, 8, 66, 98, 50,
0, 30, 0, 88, 15, 37, 26, 72, 61, 57, 17, 27, 83, 3, 9, 66, 97, 42,
2, 44, 71, 11, 25, 74, 90, 20, 0, 38, 33, 14, 9, 23, 12, 58, 6, 14,
78, 0, 12, 99, 84, 31, 16, 7, 33, 20, 3, 74, 88, 50, 55, 19, 0, 6,
30, 62, 17, 81, 25, 46, 67, 28, 36, 8, 1, 52, 19, 37, 27, 62, 39, 84,
16, 14, 21, 5, 60, 82, 72, 89, 16, 5, 29, 7, 80, 97, 41, 46, 15, 92,
51, 76, 57, 90, 10, 37, 21, 40, 0, 6, 82, 91, 43, 30, 62, 91, 10, 41,
12, 4, 80, 77, 98, 50, 78, 35, 7, 1, 96, 67, 85, 4, 23, 38, 2, 57,
4, 53, 0, 33, 2, 25, 14, 97, 87, 42, 15, 65, 19, 83, 67, 70, 80, 39,
9, 5, 94, 86, 80, 92, 31, 17, 65, 51, 46, 66, 44, 3, 26, 0, 39, 20,
11, 6, 55, 70, 11, 75, 82, 35, 47, 99, 5, 14, 23, 38, 94, 66, 64, 27,
77, 50, 28, 25, 61, 10, 30, 15, 12, 24, 90, 25, 39, 47, 98, 83};
const int64_t kCapacityArray[] = {850, 1400, 1500, 450, 1100};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 6339;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, SolveVeryDifficultInstanceForMIPSolvers) {
const int64_t kProfitArray[] = {
840350, 492312, 1032839, 811082, 382846, 441114, 386610, 783293, 998199,
579384, 642499, 247987, 892801, 193061, 494328, 314360, 730783, 308562,
799683, 676459, 591170, 551284, 218343, 730920, 513370, 531444, 214762,
316396, 350372, 961409, 894479, 83114, 195613, 383992, 823112, 193730,
198549, 454831, 239367, 712908, 819866, 156561, 445686, 668469, 526442,
36085, 414327, 10450, 524913, 553583};
const int64_t kWeightArray[] = {
893821, 405554, 713114, 498726, 230483, 640836, 226067, 975043, 700562,
627861, 720734, 205614, 458490, 181755, 616093, 447249, 852340, 415851,
454072, 598218, 549422, 699689, 163910, 954988, 625015, 394931, 310015,
207170, 194778, 758551, 956952, 74310, 276930, 313596, 481395, 170299,
115532, 515859, 189626, 959419, 686824, 183455, 568483, 409119, 655220,
24540, 523383, 9381, 735775, 812811};
const int64_t kCapacityArray[] = {10922833};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 14723396;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, Item0IsNotPartOfTheOptimalSolution) {
const int64_t kProfitArray[] = {16, 11, 26, 30, 31};
const int64_t kWeightArray[] = {32, 9, 11, 9, 30};
const int64_t kCapacityArray[] = {23};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 56;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, CheckNoRoudingIssues) {
const int64_t kProfitArray[] = {2, 28, 30, 29, 7, 27, 18, 13, 32, 9};
const int64_t kWeightArray[] = {1, 16, 22, 13, 11, 23, 5, 21, 29, 18};
const int64_t kCapacityArray[] = {97};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 146;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
TEST(KnapsackSolverTest, AllItemsReduced) {
const int64_t kProfitArray[] = {30, 9, 21, 5, 19};
const int64_t kWeightArray[] = {10, 4, 19, 6, 28};
const int64_t kCapacityArray[] = {34};
const int kArraySize = ABSL_ARRAYSIZE(kProfitArray);
const int kNumberOfDimensions = ABSL_ARRAYSIZE(kCapacityArray);
const int kOptimalProfit = 60;
const int64_t profit =
SolveKnapsackProblem(kProfitArray, kArraySize, kWeightArray,
kCapacityArray, kNumberOfDimensions);
EXPECT_EQ(kOptimalProfit, profit);
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,108 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/sparse_permutation.h"
#include <memory>
#include <random>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/random/distributions.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace operations_research {
namespace {
using ::testing::ElementsAre;
TEST(SparsePermutationTest, SimpleExample) {
SparsePermutation permutation(12);
permutation.AddToCurrentCycle(4);
permutation.AddToCurrentCycle(2);
permutation.AddToCurrentCycle(7);
permutation.CloseCurrentCycle();
permutation.AddToCurrentCycle(6);
permutation.AddToCurrentCycle(1);
permutation.CloseCurrentCycle();
EXPECT_EQ("(1 6) (2 7 4)", permutation.DebugString());
EXPECT_EQ(2, permutation.NumCycles());
EXPECT_EQ(5, permutation.Support().size());
EXPECT_THAT(permutation.Cycle(0), ElementsAre(4, 2, 7));
EXPECT_THAT(permutation.Cycle(1), ElementsAre(6, 1));
}
TEST(SparsePermutationTest, RemoveCycles) {
SparsePermutation permutation(12);
permutation.AddToCurrentCycle(4);
permutation.AddToCurrentCycle(2);
permutation.AddToCurrentCycle(7);
permutation.CloseCurrentCycle();
permutation.AddToCurrentCycle(6);
permutation.AddToCurrentCycle(1);
permutation.CloseCurrentCycle();
permutation.AddToCurrentCycle(9);
permutation.AddToCurrentCycle(8);
permutation.CloseCurrentCycle();
EXPECT_EQ("(1 6) (2 7 4) (8 9)", permutation.DebugString());
permutation.RemoveCycles({});
EXPECT_EQ("(1 6) (2 7 4) (8 9)", permutation.DebugString());
permutation.RemoveCycles({2, 1});
EXPECT_EQ("(2 7 4)", permutation.DebugString());
permutation.RemoveCycles({0});
EXPECT_EQ("", permutation.DebugString());
permutation.RemoveCycles({});
EXPECT_EQ("", permutation.DebugString());
}
TEST(SparsePermutationTest, Identity) {
SparsePermutation permutation(1000);
EXPECT_EQ("", permutation.DebugString());
EXPECT_EQ(0, permutation.Support().size());
EXPECT_EQ(0, permutation.NumCycles());
}
// Generate a bunch of permutation on a 'huge' space, but that have very few
// displacements. This would OOM if the implementation was O(N); we verify
// that it doesn't.
TEST(SparsePermutationTest, Sparsity) {
const int kSpaceSize = 1000000000; // Fits largely in an int32_t.
const int kNumPermutationsToGenerate = 1000;
const int kAverageCycleSize = 10;
const int kAverageNumCycles = 3;
std::vector<std::unique_ptr<SparsePermutation>> permutations;
std::mt19937 random(12345);
absl::flat_hash_set<int> cycle;
for (int i = 0; i < kNumPermutationsToGenerate; ++i) {
SparsePermutation* permutation = new SparsePermutation(kSpaceSize);
permutations.emplace_back(permutation);
const int num_cycles = absl::Uniform(random, 0, 2 * kAverageNumCycles + 1);
for (int c = 0; c < num_cycles; ++c) {
const int cycle_size =
absl::Uniform(random, 0, 2 * kAverageCycleSize - 1) + 2;
cycle.clear();
while (cycle.size() < cycle_size) {
cycle.insert(absl::Uniform(random, 0, kSpaceSize));
}
for (const int e : cycle) permutation->AddToCurrentCycle(e);
permutation->CloseCurrentCycle();
}
EXPECT_LT(permutation->DebugString().size(),
100 * kAverageCycleSize * kAverageNumCycles)
<< permutation->DebugString();
}
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,368 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/weighted_set_covering.h"
#include <algorithm>
#include <vector>
#include "absl/random/random.h"
#include "ortools/base/adjustable_priority_queue-inl.h"
#include "ortools/base/logging.h"
using operations_research::glop::StrictITIVector;
namespace operations_research {
void WeightedSetCovering::Init() {
penalized_cost_ = subset_cost_;
priority_ = subset_cost_;
for (SubsetIndex subset(0); subset < priority_.size(); ++subset) {
priority_[subset] /= columns_[subset].size().value();
}
SubsetIndex size = subset_cost_.size();
assignment_.resize(size, false);
assignment_.assign(size, false);
times_penalized_.resize(size, 0);
times_penalized_.assign(size, 0);
num_subsets_covering_element_.resize(num_elements_, SubsetIndex(0));
num_subsets_covering_element_.assign(num_elements_, SubsetIndex(0));
iteration_ = 0;
cost_ = 0.0;
best_cost_ = 0.0;
// TODO(user): make these changeable by the user.
penalty_factor_ = 0.2;
lagrangian_factor_ = 100.0;
}
void WeightedSetCovering::AddEmptySubset(Cost cost) {
subset_cost_.push_back(cost);
columns_.push_back(StrictITIVector<EntryIndex, ElementIndex>());
}
void WeightedSetCovering::AddElementToLastSubset(int element) {
ElementIndex new_element(element);
columns_.back().push_back(new_element);
num_elements_ = std::max(num_elements_, new_element + 1);
}
void WeightedSetCovering::SetCostOfSubset(Cost cost, int subset) {
const SubsetIndex subset_index(subset);
const SubsetIndex size = std::max(columns_.size(), subset_index + 1);
columns_.resize(size, StrictITIVector<EntryIndex, ElementIndex>());
subset_cost_.resize(size, 0.0);
subset_cost_[subset_index] = cost;
}
void WeightedSetCovering::AddElementToSubset(int element, int subset) {
const SubsetIndex subset_index(subset);
const SubsetIndex size = std::max(columns_.size(), subset_index + 1);
subset_cost_.resize(size, 0.0);
columns_.resize(size, StrictITIVector<EntryIndex, ElementIndex>());
const ElementIndex new_element(element);
columns_[subset_index].push_back(new_element);
num_elements_ = std::max(num_elements_, new_element + 1);
}
void WeightedSetCovering::GenerateGreedySolution() {
StrictITIVector<SubsetIndex, bool> covers_non_covered_elements(
columns_.size(), true);
for (ElementIndex num_zeros(num_elements_); num_zeros > 0;) {
VLOG(1) << "Remaining zeros: " << num_zeros
<< ", matrix size: " << columns_.size().value();
Cost min_reduced_cost = kMaxCost;
SubsetIndex best_subset = kNotFound;
Cost best_cost_increase = 1e308;
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (!covers_non_covered_elements[subset]) {
continue;
}
if (columns_[subset].size().value() * min_reduced_cost <
subset_cost_[subset]) {
continue;
}
ElementIndex num_elements_covered(0);
ElementIndex num_elements_already_covered(0);
for (const ElementIndex element : columns_[subset]) {
if (num_subsets_covering_element_[element] == 0) {
++num_elements_covered;
} else {
++num_elements_already_covered;
}
}
if (num_elements_covered == 0) {
covers_non_covered_elements[subset] = false;
continue;
}
Cost cost_increase =
subset_cost_[subset] +
lagrangian_factor_ * num_elements_already_covered.value();
if (cost_increase < min_reduced_cost * num_elements_covered.value()) {
min_reduced_cost = cost_increase / num_elements_covered.value();
best_subset = subset;
best_cost_increase = cost_increase;
}
}
CHECK_NE(kNotFound, best_subset);
covers_non_covered_elements[best_subset] = false;
assignment_[best_subset] = true;
cost_ += subset_cost_[best_subset];
for (const ElementIndex element : columns_[best_subset]) {
if (num_subsets_covering_element_[element] == 0) {
--num_zeros;
}
++num_subsets_covering_element_[element];
}
}
DCHECK(CheckSolution());
DCHECK(CheckFeasibility());
StoreSolution();
}
void WeightedSetCovering::UseEverything() {
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
cost_ += subset_cost_[subset];
assignment_[subset] = true;
for (const ElementIndex element : columns_[subset]) {
++num_subsets_covering_element_[element];
}
}
DCHECK(CheckFeasibility());
StoreSolution();
}
void WeightedSetCovering::StoreSolution() {
VLOG(1) << "Storing solution with cost " << cost_;
best_assignment_ = assignment_;
best_cost_ = cost_;
}
void WeightedSetCovering::RestoreSolution() {
assignment_ = best_assignment_;
cost_ = best_cost_;
num_subsets_covering_element_.assign(num_elements_, SubsetIndex(0));
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (assignment_[subset]) {
for (const ElementIndex element : columns_[subset]) {
++num_subsets_covering_element_[element];
}
}
}
DCHECK(CheckSolution());
DCHECK(CheckFeasibility());
}
bool WeightedSetCovering::CheckSolution() const {
Cost cost = 0;
StrictITIVector<ElementIndex, SubsetIndex> coverage(num_elements_,
SubsetIndex(0));
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (assignment_[subset]) {
cost += subset_cost_[subset];
for (const ElementIndex element : columns_[subset]) {
++coverage[element];
}
}
}
if (cost != cost_) {
LOG(ERROR) << "Error on cost.";
return false;
}
for (ElementIndex element(0); element < num_elements_; ++element) {
if (coverage[element] != num_subsets_covering_element_[element]) {
LOG(ERROR) << "Error on the coverage of element " << element.value();
return false;
}
}
LOG(INFO) << "Solution cost: " << cost;
return true;
}
bool WeightedSetCovering::CheckFeasibility() const {
StrictITIVector<ElementIndex, SubsetIndex> coverage(num_elements_,
SubsetIndex(0));
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
for (const ElementIndex element : columns_[subset]) {
++coverage[element];
}
}
for (ElementIndex element(0); element < num_elements_; ++element) {
if (coverage[element] == 0) {
LOG(ERROR) << "Element " << element.value() << " is not covered.";
return false;
}
}
SubsetIndex max_coverage(0);
for (auto element_coverage : coverage) {
max_coverage = std::max(element_coverage, max_coverage);
VLOG(2) << element_coverage;
}
LOG(INFO) << "Max coverage = " << max_coverage;
return true;
}
ElementIndex WeightedSetCovering::ComputeNumElementsCovered(
SubsetIndex subset) const {
ElementIndex num_elements_covered(0);
for (const ElementIndex element : columns_[subset]) {
if (num_subsets_covering_element_[element] > 1) {
++num_elements_covered;
}
}
return num_elements_covered;
}
void WeightedSetCovering::Flip(SubsetIndex subset) {
tabu_list_.Add(subset);
if (!assignment_[subset]) {
assignment_[subset] = true;
cost_ += subset_cost_[subset];
for (const ElementIndex element : columns_[subset]) {
++num_subsets_covering_element_[element];
}
} else {
assignment_[subset] = false;
cost_ -= subset_cost_[subset];
for (const ElementIndex element : columns_[subset]) {
--num_subsets_covering_element_[element];
}
}
LOG(INFO) << "Flipping " << subset.value() << " cost = " << cost_
<< " best cost = " << best_cost_;
if (cost_ < best_cost_) {
StoreSolution();
}
}
bool WeightedSetCovering::CanBeRemoved(SubsetIndex subset) const {
for (const ElementIndex element : columns_[subset]) {
DCHECK_LT(0, num_subsets_covering_element_[element]);
if (num_subsets_covering_element_[element] == 1) {
return false;
}
}
return true;
}
void WeightedSetCovering::Steepest(int num_iterations) {
for (int iteration_ = 0; iteration_ < num_iterations; ++iteration_) {
Cost largest_cost_decrease = 0;
SubsetIndex best_subset = kNotFound;
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (!assignment_[subset]) {
continue;
}
const Cost cost_decrease = subset_cost_[subset];
VLOG(1) << "Subset: " << subset.value() << " at " << assignment_[subset]
<< " can be removed = " << CanBeRemoved(subset)
<< " cost_decrease = " << cost_decrease;
if (cost_decrease > largest_cost_decrease && CanBeRemoved(subset)) {
largest_cost_decrease = cost_decrease;
best_subset = subset;
}
}
if (best_subset == kNotFound) {
StoreSolution();
return;
}
Flip(best_subset);
}
StoreSolution();
}
void WeightedSetCovering::ResetGuidedTabuSearch() {
penalized_cost_ = subset_cost_;
priority_ = subset_cost_;
for (SubsetIndex subset(0); subset < priority_.size(); ++subset) {
priority_[subset] /= columns_[subset].size().value();
}
iteration_ = 0;
}
void WeightedSetCovering::GuidedTabuSearch(int num_iterations) {
Cost gts_cost = 0;
for (const Cost cost : penalized_cost_) {
gts_cost += cost;
}
for (int iteration_ = 0; iteration_ < num_iterations; ++iteration_) {
Cost smallest_penalized_cost_increase = 1e308;
SubsetIndex best_subset = kNotFound;
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
const Cost penalized_delta = penalized_cost_[subset];
VLOG(1) << "Subset: " << subset.value() << " at " << assignment_[subset]
<< " can be removed = " << CanBeRemoved(subset)
<< " penalized_delta = " << penalized_delta
<< " smallest_penalized_cost_increase = "
<< smallest_penalized_cost_increase;
if (!assignment_[subset]) {
if (penalized_delta < smallest_penalized_cost_increase) {
smallest_penalized_cost_increase = penalized_delta;
best_subset = subset;
}
} else {
if (-penalized_delta < smallest_penalized_cost_increase &&
CanBeRemoved(subset) &&
(!tabu_list_.Contains(subset) ||
cost_ - subset_cost_[subset] < best_cost_)) {
smallest_penalized_cost_increase = -penalized_delta;
best_subset = subset;
}
}
}
if (best_subset == kNotFound) { // Local minimum reached.
RestoreSolution();
return;
}
gts_cost += smallest_penalized_cost_increase;
UpdatePenalties();
Flip(best_subset);
VLOG(2) << "Iteration:" << iteration_ << ", current cost = " << cost_
<< ", best cost = " << best_cost_
<< ", penalized cost = " << gts_cost;
}
RestoreSolution();
}
bool WeightedSetCovering::Probability() const {
// Flip a coin.
return absl::Bernoulli(absl::BitGen(), 0.5);
}
void WeightedSetCovering::UpdatePenalties() {
double largest_priority = -1.0;
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (assignment_[subset]) {
largest_priority = std::max(largest_priority, priority_[subset]) /
ComputeNumElementsCovered(subset).value();
// divide priority_ by ComputeNumElementsCovered(subset).value()) ?
}
}
const double radius = 1e-8 * largest_priority;
for (SubsetIndex subset(0); subset < columns_.size(); ++subset) {
if (assignment_[subset]) {
const double subset_priority = priority_[subset];
if (largest_priority - subset_priority <= radius && Probability()) {
++times_penalized_[subset];
const int times_penalized = times_penalized_[subset];
const Cost cost =
subset_cost_[subset] / columns_[subset].size().value();
priority_[subset] = cost / (1 + times_penalized);
penalized_cost_[subset] =
cost * (1 + penalty_factor_ * times_penalized);
}
}
}
}
} // namespace operations_research

View File

@@ -0,0 +1,232 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Minimum cost set covering.
//
// Let S be a set, let (T_j) be a family (j in J) of subsets of S, and c_j costs
// associated to each T_j.
//
// The minimum-cost set-covering problem consists in finding K, a sub-set of J
// such that the union of all the T_k, k in K = S, with the minimal total cost
// sum c_k (k in K).
//
// TODO(user): Explain the set covering problem in matrix terms.
//
// The first solution is obtained using the Chvatal heuristic, that guarantees
// that the solution is at most 1 + log(n) times the optimal value.
// Vasek Chvatal, 1979. A greedy heuristic for the set-covering problem.
// Mathematics of Operations Research, 4(3):233-235, 1979.
// www.jstor.org/stable/3689577
//
// The idea is basically to compute the cost per element for a T_j to cover
// them, and to start by the ones with the best such amortized costs.
//
// The following paper dives into the details this class of algorithms.
// Neal E. Young, Greedy Set-Cover Algorithms (1974-1979, Chvatal, Johnson,
// Lovasz, Stein), in Kao, ed., Encyclopedia of Algorithms.
// Draft at: http://www.cs.ucr.edu/~neal/non_arxiv/Young08SetCover.pdf
//
// The first solution is then improved by using local search descent, which
// eliminates the T_j's that have no interest in the solution.
//
// Also implemented is a guided local search metaheuristic. TODO(user): Add a
// reference to guided local search.
//
// TODO(user): use "ortools/base/adjustable_priority_queue.h" to make the
// computation of the cost per element and the sorting of the columns more
// efficient.
//
// TODO(user): add Large Neighborhood Search that removes a collection of T_j
// (with a parameterized way to choose them), and that runs the algorithm here
// on the corresponding subproblem.
//
// TODO(user): represent the problem as its transpose, in order to find the
// elements whose coverage is the largest, to help choose the T_j that may
// be removed from the solution and re-optimized.
//
// TODO(user): make Large Neighborhood Search concurrent, solving independent
// subproblems in different threads.
//
// TODO(user): be able to choose the collection of T_j from the elements they
// contain so as to focus the search.
#ifndef OR_TOOLS_ALGORITHMS_WEIGHTED_SET_COVERING_H_
#define OR_TOOLS_ALGORITHMS_WEIGHTED_SET_COVERING_H_
#include <sys/types.h>
#include <limits>
#include "ortools/lp_data/lp_types.h" // For StrictITIVector.
namespace operations_research {
typedef double Cost;
DEFINE_STRONG_INDEX_TYPE(ElementIndex);
DEFINE_STRONG_INDEX_TYPE(SubsetIndex);
DEFINE_STRONG_INDEX_TYPE(EntryIndex);
const SubsetIndex kNotFound(-1);
const Cost kMaxCost = std::numeric_limits<Cost>::infinity();
// TODO(user): This should be a parameter.
const Cost kPenaltyUpdateEpsilon = 1e-1;
// TODO(user): consider replacing with absl equivalent, which are less strict,
// unfortunately: the return type for size() is a simple size_t with absl, and
// not the Index as in StrictITIVector, which makes the code less elegant.
using SubsetCostVector = glop::StrictITIVector<SubsetIndex, Cost>;
using SparseColumn = glop::StrictITIVector<EntryIndex, ElementIndex>;
using SparseRow = glop::StrictITIVector<EntryIndex, SubsetIndex>;
template <typename T>
class TabuList {
public:
explicit TabuList(int size) : array_(size, T(-1)), fill_(0), index_(0) {}
void Add(T t) {
const int size = array_.size();
array_[index_] = t;
++index_;
if (index_ >= size) {
index_ = 0;
}
if (fill_ < size) {
++fill_;
}
}
bool Contains(T t) const {
for (int i = 0; i < fill_; ++i) {
if (t == array_[i]) {
return true;
}
}
return false;
}
private:
std::vector<T> array_;
int fill_;
int index_;
};
// TODO(user): Expand documentation here.
// The solving procedure looks like:
// set_covering.GenerateGreedySolution();
// set_covering.Steepest(num_steepest_iterations);
// set_covering.GuidedTabuSearch(num_guided_tabu_search_iterations);
// TODO(user): Explain set covering in matrix terms.
class WeightedSetCovering {
public:
// Constructs an empty weighted set covering problem.
// TODO(user): find a meaningful way to set the Tabu list size, if ever
// it is useful.
WeightedSetCovering() : iteration_(0), tabu_list_(17) {}
WeightedSetCovering(const WeightedSetCovering&) = default;
WeightedSetCovering(WeightedSetCovering&&) = default;
WeightedSetCovering& operator=(const WeightedSetCovering&) = default;
WeightedSetCovering& operator=(WeightedSetCovering&&) = default;
// Adds an empty subset with a cost to the problem. In matrix terms, this
// adds a column to the matrix.
void AddEmptySubset(Cost cost);
// Adds an element to the last subset created. In matrix terms, this adds a
// 1 on row 'element' of the current last column of the matrix.
void AddElementToLastSubset(int element);
void SetCostOfSubset(Cost cost, int subset);
void AddElementToSubset(int element, int subset);
// Initializes the solver once the data is set.
void Init();
// Generates a solution using a greedy algorithm.
void GenerateGreedySolution();
void UseEverything();
// Returns true if the current solution is indeed a solution, i.e. all the
// elements of the set are covered by the selected subsets.
bool CheckSolution() const;
// Returns true if the problem is feasible. The problem is trivially
// infeasible if one element does not appear in any of the subsets.
bool CheckFeasibility() const;
// Runs a steepest local search.
void Steepest(int num_iterations);
// Runs a guided tabu search.
// TODO(user): Describe what a Guided Tabu Search is.
void GuidedTabuSearch(int num_iterations);
// Resets the guided tabu search by clearing the penalties and the tabu list.
void ResetGuidedTabuSearch();
// Stores the solution.
void StoreSolution();
// Restores the solution.
void RestoreSolution();
// TODO(user) P0: add accessors to current and best costs, and assignments.
private:
ElementIndex ComputeNumElementsCovered(SubsetIndex subset) const;
bool Probability() const;
void Flip(SubsetIndex subset);
bool CanBeRemoved(SubsetIndex subset) const;
void UpdatePenalties();
ElementIndex num_elements_;
// Input data.
SubsetCostVector subset_cost_;
// Input data.
glop::StrictITIVector<SubsetIndex, SparseColumn> columns_;
glop::StrictITIVector<ElementIndex, SparseRow> rows_;
// Reduced costs.
glop::StrictITIVector<SubsetIndex, Cost> reduced_cost_;
// Current assignment.
Cost cost_;
glop::StrictITIVector<SubsetIndex, bool> assignment_;
// Best solution.
Cost best_cost_;
glop::StrictITIVector<SubsetIndex, bool> best_assignment_;
// Search handling data.
int iteration_;
glop::StrictITIVector<ElementIndex, SubsetIndex>
num_subsets_covering_element_;
double lagrangian_factor_;
// Guided local search-related data.
double penalty_factor_;
SubsetCostVector penalized_cost_;
SubsetCostVector priority_;
glop::StrictITIVector<SubsetIndex, int> times_penalized_;
// Tabu-search related data.
TabuList<SubsetIndex> tabu_list_;
};
} // namespace operations_research
#endif // OR_TOOLS_ALGORITHMS_WEIGHTED_SET_COVERING_H_

View File

@@ -0,0 +1,88 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/algorithms/weighted_set_covering.h"
#include "gtest/gtest.h"
#include "ortools/base/commandlineflags.h"
#include "ortools/base/logging.h"
namespace operations_research {
namespace {
TEST(SetCoveringTest, InitialValues) {
WeightedSetCovering set_covering;
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(0);
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(1);
set_covering.AddElementToLastSubset(2);
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(1);
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(2);
set_covering.Init();
set_covering.GenerateGreedySolution();
set_covering.Steepest(500);
set_covering.GuidedTabuSearch(500);
set_covering.RestoreSolution();
}
TEST(SetCoveringTest, Infeasible) {
WeightedSetCovering set_covering;
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(0);
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(3);
set_covering.Init();
EXPECT_FALSE(set_covering.CheckFeasibility());
}
TEST(SetCoveringTest, KnightsCover) {
const int knight_row_move[] = {2, 1, -1, -2, -2, -1, 1, 2};
const int knight_col_move[] = {1, 2, 2, 1, -1, -2, -2, -1};
WeightedSetCovering set_covering;
const int num_rows = 25;
const int num_cols = 25;
for (int row = 0; row < num_rows; ++row) {
for (int col = 0; col < num_cols; ++col) {
set_covering.AddEmptySubset(1);
set_covering.AddElementToLastSubset(row * num_cols + col);
for (int i = 0; i < 8; ++i) {
const int new_row = row + knight_row_move[i];
const int new_col = col + knight_col_move[i];
if (new_row >= 0 && new_row < num_rows && new_col >= 0 &&
new_col < num_cols) {
set_covering.AddElementToLastSubset(new_row * num_cols + new_col);
}
}
}
}
EXPECT_TRUE(set_covering.CheckFeasibility());
set_covering.Init();
set_covering.UseEverything();
set_covering.RestoreSolution();
EXPECT_TRUE(set_covering.CheckSolution());
set_covering.Init();
set_covering.GenerateGreedySolution();
set_covering.Steepest(500);
for (int i = 0; i < 50; ++i) {
set_covering.GuidedTabuSearch(100000);
set_covering.ResetGuidedTabuSearch();
}
set_covering.RestoreSolution();
EXPECT_TRUE(set_covering.CheckSolution());
}
} // namespace
} // namespace operations_research

View File

@@ -113,6 +113,15 @@ cc_library(
deps = [":integral_types"],
)
cc_library(
name = "callback",
hdrs = [
"callback.h",
"to_callback.h",
],
deps = [":base"],
)
cc_library(
name = "case",
srcs = ["case.cc"],
@@ -516,6 +525,7 @@ cc_library(
srcs = ["threadpool.cc"],
hdrs = ["threadpool.h"],
deps = [
":callback",
"@com_google_absl//absl/log:check",
"@com_google_absl//absl/synchronization",
],

29
ortools/base/callback.h Normal file
View File

@@ -0,0 +1,29 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Callback classes provide a generic interface for classes requiring
// callback from other classes.
#ifndef OR_TOOLS_BASE_CALLBACK_H_
#define OR_TOOLS_BASE_CALLBACK_H_
class Closure {
public:
virtual ~Closure() = default;
virtual void Run() = 0;
protected:
Closure() {}
};
#endif // OR_TOOLS_BASE_CALLBACK_H_

View File

@@ -14,6 +14,7 @@
#include "ortools/base/threadpool.h"
#include "absl/log/check.h"
#include "ortools/base/callback.h"
namespace operations_research {
void RunWorker(void* data) {
@@ -87,4 +88,9 @@ void ThreadPool::Schedule(std::function<void()> closure) {
}
}
void ThreadPool::Add(Closure* closure) {
assert(closure != nullptr);
Schedule([closure] { closure->Run(); });
}
} // namespace operations_research

View File

@@ -22,6 +22,8 @@
#include <thread> // NOLINT
#include <vector>
#include "ortools/base/callback.h"
namespace operations_research {
class ThreadPool {
public:
@@ -32,6 +34,7 @@ class ThreadPool {
void Schedule(std::function<void()> closure);
std::function<void()> GetNextTask();
void SetQueueCapacity(int capacity);
void Add(Closure* closure);
private:
const int num_workers_;

View File

@@ -0,0 +1,98 @@
// Copyright 2010-2022 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ToCallback allows a std::function<> object to be converted to
// Closure/Callback types accepted by older APIs. In fact, any functor, i.e.,
// a pure function or an object that has an "operator()", can be converted to
// Closure/Callback.
//
// Background: C++11 provides a std::function<Result(Args...)> type that stands
// for a typed piece of code that can be invoked. This is similar to the older
// functionality of Closure/Callback in callback.h.
#ifndef OR_TOOLS_BASE_TO_CALLBACK_H_
#define OR_TOOLS_BASE_TO_CALLBACK_H_
#include <type_traits>
#include "absl/log/check.h"
#include "ortools/base/logging.h"
template <typename C>
class CallbackToSig {
template <typename F>
struct MFSig;
template <typename R, typename U, typename... A>
struct MFSig<R (U::*)(A...)> {
using type = R(A...);
};
public:
using type = typename MFSig<decltype(&C::Run)>::type;
};
template <typename CallbackType, typename Functor,
typename Sig = typename CallbackToSig<CallbackType>::type>
class FunctorCallback;
template <typename CallbackType, typename Functor, typename R, typename... Args>
class FunctorCallback<CallbackType, Functor, R(Args...)> final
: public CallbackType {
public:
explicit FunctorCallback(Functor&& functor)
: CallbackType(), functor_(std::move(functor)) {}
R Run(Args... args) override {
RunCallbackHelper helper(this);
return static_cast<R>(
static_cast<Functor&&>(functor_)(std::forward<Args>(args)...));
}
private:
struct RunCallbackHelper {
explicit RunCallbackHelper(FunctorCallback* cb) : cb(cb) {}
~RunCallbackHelper() { delete cb; }
FunctorCallback* cb;
};
Functor functor_;
};
template <typename Functor>
class FunctorCallbackBinder {
public:
explicit FunctorCallbackBinder(const Functor& functor) : functor_(functor) {}
template <typename CallbackType>
operator CallbackType*() && { // NOLINT
CHECK(!bound_) << "Returned ToCallback object has already been converted";
bound_ = true;
return new FunctorCallback<CallbackType, Functor>(std::move(functor_));
}
private:
Functor functor_;
bool bound_ = false;
};
template <typename Functor>
using ToCallbackResult =
FunctorCallbackBinder<typename std::decay<Functor>::type>;
template <typename Functor>
ToCallbackResult<Functor> ToCallback(Functor&& functor) {
return ToCallbackResult<Functor>(std::forward<Functor>(functor));
}
#endif // OR_TOOLS_BASE_TO_CALLBACK_H_

View File

@@ -21,8 +21,10 @@
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "ortools/base/integral_types.h"
#include "ortools/base/logging.h"
#include "ortools/constraint_solver/constraint_solver.h"

View File

@@ -22,6 +22,7 @@ Paths:
Includes [Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm) and
[Bellman-Ford](https://en.wikipedia.org/wiki/Bellman%E2%80%93Ford_algorithm)
algorithms. These implementations are being deprecated.
* [hamiltonian_path.h](./hamiltonian_path.h): entry point for computing minimum
[Hamiltonian paths](https://en.wikipedia.org/wiki/Hamiltonian_path) and
cycles on directed graphs with costs on arcs, using a dynamic-programming