graph: fix build

This commit is contained in:
Corentin Le Molgat
2024-01-19 18:31:16 +01:00
parent 8b78a63b53
commit 2d9b003e12
14 changed files with 4184 additions and 17 deletions

View File

@@ -309,6 +309,11 @@ cc_library(
],
)
cc_library(
name = "top_n",
hdrs = ["top_n.h"],
)
cc_library(
name = "types",
hdrs = ["types.h"],

302
ortools/base/top_n.h Normal file
View File

@@ -0,0 +1,302 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ref: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/gtl/top_n.h
// This simple class finds the top n elements of an incrementally provided set
// of elements which you push one at a time. If the number of elements exceeds
// n, the lowest elements are incrementally dropped. At the end you get
// a vector of the top elements sorted in descending order (through Extract() or
// ExtractNondestructive()), or a vector of the top elements but not sorted
// (through ExtractUnsorted() or ExtractUnsortedNondestructive()).
//
// The value n is specified in the constructor. If there are p elements pushed
// altogether:
// The total storage requirements are O(min(n, p)) elements
// The running time is O(p * log(min(n, p))) comparisons
// If n is a constant, the total storage required is a constant and the running
// time is linear in p.
#ifndef OR_TOOLS_BASE_TOP_N_H_
#define OR_TOOLS_BASE_TOP_N_H_
#include <stddef.h>
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "ortools/base/logging.h"
namespace operations_research {
namespace gtl {
// Cmp is an stl binary predicate. Note that Cmp is the "greater" predicate,
// not the more commonly used "less" predicate.
//
// If you use a "less" predicate here, the TopN will pick out the bottom N
// elements out of the ones passed to it, and it will return them sorted in
// ascending order.
//
// TopN is rule-of-zero copyable and movable if its members are.
template <class T, class Cmp = std::greater<T> >
class TopN {
public:
// The TopN is in one of the three states:
//
// o UNORDERED: this is the state an instance is originally in,
// where the elements are completely orderless.
//
// o BOTTOM_KNOWN: in this state, we keep the invariant that there
// is at least one element in it, and the lowest element is at
// position 0. The elements in other positions remain
// unsorted. This state is reached if the state was originally
// UNORDERED and a peek_bottom() function call is invoked.
//
// o HEAP_SORTED: in this state, the array is kept as a heap and
// there are exactly (limit_+1) elements in the array. This
// state is reached when at least (limit_+1) elements are
// pushed in.
//
// The state transition graph is at follows:
//
// peek_bottom() (limit_+1) elements
// UNORDERED --------------> BOTTOM_KNOWN --------------------> HEAP_SORTED
// | ^
// | (limit_+1) elements |
// +-----------------------------------------------------------+
enum State { UNORDERED, BOTTOM_KNOWN, HEAP_SORTED };
using UnsortedIterator = typename std::vector<T>::const_iterator;
// 'limit' is the maximum number of top results to return.
explicit TopN(size_t limit) : TopN(limit, Cmp()) {}
TopN(size_t limit, const Cmp &cmp) : limit_(limit), cmp_(cmp) {}
size_t limit() const { return limit_; }
// Number of elements currently held by this TopN object. This
// will be no greater than 'limit' passed to the constructor.
size_t size() const { return std::min(elements_.size(), limit_); }
bool empty() const { return size() == 0; }
// If you know how many elements you will push at the time you create the
// TopN object, you can call reserve to preallocate the memory that TopN
// will need to process all 'n' pushes. Calling this method is optional.
void reserve(size_t n) { elements_.reserve(std::min(n, limit_ + 1)); }
// Push 'v'. If the maximum number of elements was exceeded, drop the
// lowest element and return it in 'dropped' (if given). If the maximum is not
// exceeded, 'dropped' will remain unchanged. 'dropped' may be omitted or
// nullptr, in which case it is not filled in.
// Requires: T is CopyAssignable, Swappable
void push(const T &v) { push(v, nullptr); }
void push(const T &v, T *dropped) { PushInternal(v, dropped); }
// Move overloads of push.
// Requires: T is MoveAssignable, Swappable
void push(T &&v) { // NOLINT(build/c++11)
push(std::move(v), nullptr);
}
void push(T &&v, T *dropped) { // NOLINT(build/c++11)
PushInternal(std::move(v), dropped);
}
// Peeks the bottom result without calling Extract()
const T &peek_bottom();
// Extract the elements as a vector sorted in descending order. The caller
// assumes ownership of the vector and must delete it when done. This is a
// destructive operation. The only method that can be called immediately
// after Extract() is Reset().
std::vector<T> *Extract();
// Similar to Extract(), but makes no guarantees the elements are in sorted
// order. As with Extract(), the caller assumes ownership of the vector and
// must delete it when done. This is a destructive operation. The only
// method that can be called immediately after ExtractUnsorted() is Reset().
std::vector<T> *ExtractUnsorted();
// A non-destructive version of Extract(). Copy the elements in a new vector
// sorted in descending order and return it. The caller assumes ownership of
// the new vector and must delete it when done. After calling
// ExtractNondestructive(), the caller can continue to push() new elements.
std::vector<T> *ExtractNondestructive() const;
// A non-destructive version of Extract(). Copy the elements to a given
// vector sorted in descending order. After calling
// ExtractNondestructive(), the caller can continue to push() new elements.
// Note:
// 1. The given argument must to be allocated.
// 2. Any data contained in the vector prior to the call will be deleted
// from it. After the call the vector will contain only the elements
// from the data structure.
void ExtractNondestructive(std::vector<T> *output) const;
// A non-destructive version of ExtractUnsorted(). Copy the elements in a new
// vector and return it, with no guarantees the elements are in sorted order.
// The caller assumes ownership of the new vector and must delete it when
// done. After calling ExtractUnsortedNondestructive(), the caller can
// continue to push() new elements.
std::vector<T> *ExtractUnsortedNondestructive() const;
// A non-destructive version of ExtractUnsorted(). Copy the elements into
// a given vector, with no guarantees the elements are in sorted order.
// After calling ExtractUnsortedNondestructive(), the caller can continue
// to push() new elements.
// Note:
// 1. The given argument must to be allocated.
// 2. Any data contained in the vector prior to the call will be deleted
// from it. After the call the vector will contain only the elements
// from the data structure.
void ExtractUnsortedNondestructive(std::vector<T> *output) const;
// Return an iterator to the beginning (end) of the container,
// with no guarantees about the order of iteration. These iterators are
// invalidated by mutation of the data structure.
UnsortedIterator unsorted_begin() const { return elements_.begin(); }
UnsortedIterator unsorted_end() const { return elements_.begin() + size(); }
// Accessor for comparator template argument.
Cmp *comparator() { return &cmp_; }
// This removes all elements. If Extract() or ExtractUnsorted() have been
// called, this will put it back in an empty but useable state.
void Reset();
private:
template <typename U>
void PushInternal(U &&v, T *dropped); // NOLINT(build/c++11)
// elements_ can be in one of two states:
// elements_.size() <= limit_: elements_ is an unsorted vector of elements
// pushed so far.
// elements_.size() > limit_: The last element of elements_ is unused;
// the other elements of elements_ are an stl heap whose size is exactly
// limit_. In this case elements_.size() is exactly one greater than
// limit_, but don't use "elements_.size() == limit_ + 1" to check for
// that because you'll get a false positive if limit_ == size_t(-1).
std::vector<T> elements_;
size_t limit_; // Maximum number of elements to find
Cmp cmp_; // Greater-than comparison function
State state_ = UNORDERED;
};
// ----------------------------------------------------------------------
// Implementations of non-inline functions
template <class T, class Cmp>
template <typename U>
void TopN<T, Cmp>::PushInternal(U &&v, T *dropped) { // NOLINT(build/c++11)
if (limit_ == 0) {
if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
return;
}
if (state_ != HEAP_SORTED) {
elements_.push_back(std::forward<U>(v)); // NOLINT(build/c++11)
if (state_ == UNORDERED || cmp_(elements_.back(), elements_.front())) {
// Easy case: we just pushed the new element back
} else {
// To maintain the BOTTOM_KNOWN state, we need to make sure that
// the element at position 0 is always the smallest. So we put
// the new element at position 0 and push the original bottom
// element in the back.
// Warning: this code is subtle.
using std::swap;
swap(elements_.front(), elements_.back());
}
if (elements_.size() == limit_ + 1) {
// Transition from unsorted vector to a heap.
std::make_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.front());
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
state_ = HEAP_SORTED;
}
} else {
// Only insert the new element if it is greater than the least element.
if (cmp_(v, elements_.front())) {
// Store new element in the last slot of elements_. Remember from the
// comments on elements_ that this last slot is unused, so we don't
// overwrite anything useful.
elements_.back() = std::forward<U>(v); // NOLINT(build/c++11)
// stp::pop_heap() swaps elements_.front() and elements_.back() and
// rearranges elements from [elements_.begin(), elements_.end() - 1) such
// that they are a heap according to cmp_. Net effect: remove
// elements_.front() from the heap, and add the new element instead. For
// more info, see https://en.cppreference.com/w/cpp/algorithm/pop_heap.
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.back());
} else {
if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
}
}
}
template <class T, class Cmp>
const T &TopN<T, Cmp>::peek_bottom() {
CHECK(!empty());
if (state_ == UNORDERED) {
// We need to do a linear scan to find out the bottom element
int min_candidate = 0;
for (size_t i = 1; i < elements_.size(); ++i) {
if (cmp_(elements_[min_candidate], elements_[i])) {
min_candidate = i;
}
}
// By swapping the element at position 0 and the minimal
// element, we transition to the BOTTOM_KNOWN state
if (min_candidate != 0) {
using std::swap;
swap(elements_[0], elements_[min_candidate]);
}
state_ = BOTTOM_KNOWN;
}
return elements_.front();
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::Extract() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ != HEAP_SORTED) {
std::sort(out->begin(), out->end(), cmp_);
} else {
out->pop_back();
std::sort_heap(out->begin(), out->end(), cmp_);
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsorted() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ == HEAP_SORTED) {
// Remove the limit_+1'th element.
out->pop_back();
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractNondestructive() const {
auto out = new std::vector<T>;
ExtractNondestructive(out);
return out;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractNondestructive(std::vector<T> *output) const {
CHECK(output);
*output = elements_;
if (state_ != HEAP_SORTED) {
std::sort(output->begin(), output->end(), cmp_);
} else {
output->pop_back();
std::sort_heap(output->begin(), output->end(), cmp_);
}
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsortedNondestructive() const {
auto elements = new std::vector<T>;
ExtractUnsortedNondestructive(elements);
return elements;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractUnsortedNondestructive(std::vector<T> *output) const {
CHECK(output);
*output = elements_;
if (state_ == HEAP_SORTED) {
// Remove the limit_+1'th element.
output->pop_back();
}
}
template <class T, class Cmp>
void TopN<T, Cmp>::Reset() {
elements_.clear();
state_ = UNORDERED;
}
} // namespace gtl
} // namespace operations_research
#endif // OR_TOOLS_BASE_TOP_N_H_

View File

@@ -51,6 +51,15 @@ cc_library(
],
)
cc_library(
name = "bfs",
hdrs = ["bfs.h"],
deps = [
"@com_google_absl//absl/status",
"@com_google_absl//absl/strings:str_format",
],
)
cc_library(
name = "bounded_dijkstra",
hdrs = ["bounded_dijkstra.h"],

View File

@@ -0,0 +1,222 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/bidirectional_dijkstra.h"
#include <cmath>
#include <limits>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/random/distributions.h"
#include "absl/strings/str_cat.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/base/map_util.h"
#include "ortools/graph/bounded_dijkstra.h"
#include "ortools/graph/graph.h"
#include "util/tuple/dump_vars.h"
namespace operations_research {
namespace {
using ::testing::AnyOf;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
using ::util::ListGraph;
using ::util::StaticGraph;
TEST(BidirectionalDijkstraTest, EmptyPathInspection) {
ListGraph<> empty_graph;
std::vector<double> empty_vector;
BidirectionalDijkstra<ListGraph<>, double> dijkstra(
&empty_graph, &empty_vector, &empty_graph, &empty_vector);
const auto path = dijkstra.SetToSetShortestPath({}, {});
ASSERT_EQ(path.meeting_point, -1);
EXPECT_THAT(dijkstra.PathToNodePath(path), IsEmpty());
EXPECT_EQ(dijkstra.PathDebugString(path), "<NO PATH>");
}
TEST(BidirectionalDijkstraTest, SmallTest) {
// Build a small "grid" graph. Arc indices and lengths of the forward graph
// are in (); and the backward graph has the same, but reversed arcs.
//
// 0 --(#0:0.1)--> 1 --(#1:1.1)--> 2
// | | |
// (#2:0.1) (#3:0.19) (#4:0.3)
// | | |
// v v v
// 3 --(#5:0.2)--> 4 --(#6:1.2)--> 5
ListGraph<> forward_graph;
std::vector<double> arc_lengths;
forward_graph.AddArc(0, 1);
arc_lengths.push_back(0.1);
forward_graph.AddArc(1, 2);
arc_lengths.push_back(1.1);
forward_graph.AddArc(0, 3);
arc_lengths.push_back(0.1);
forward_graph.AddArc(1, 4);
arc_lengths.push_back(0.19);
forward_graph.AddArc(2, 5);
arc_lengths.push_back(0.3);
forward_graph.AddArc(3, 4);
arc_lengths.push_back(0.2);
forward_graph.AddArc(4, 5);
arc_lengths.push_back(1.2);
ListGraph<> backward_graph;
for (int arc = 0; arc < forward_graph.num_arcs(); ++arc) {
backward_graph.AddArc(forward_graph.Head(arc), forward_graph.Tail(arc));
}
BidirectionalDijkstra<ListGraph<>, double> dijkstra(
&forward_graph, &arc_lengths, &backward_graph, &arc_lengths);
// Since the meeting point may vary, depending on which search direction goes
// faster, we run it many times to try and exercise more code paths.
const int kNumPaths = 1000;
for (int i = 0; i < kNumPaths; ++i) {
SCOPED_TRACE(absl::StrCat("On attempt #", i));
const auto path = dijkstra.OneToOneShortestPath(0, 5);
EXPECT_THAT(dijkstra.PathToNodePath(path), ElementsAre(0, 1, 4, 5));
ASSERT_THAT(dijkstra.PathDebugString(path),
AnyOf("0 --(#0:0.1)--> 1 --(#3:0.19)--> 4 --(#6:1.2)--> [5]",
"0 --(#0:0.1)--> 1 --(#3:0.19)--> [4] <--(#6:1.2)-- 5",
"0 --(#0:0.1)--> [1] <--(#3:0.19)-- 4 <--(#6:1.2)-- 5",
"[0] <--(#0:0.1)-- 1 <--(#3:0.19)-- 4 <--(#6:1.2)-- 5"));
}
}
TEST(BidirectionalDijkstraTest, RandomizedCorrectnessTest) {
std::mt19937 random(12345);
// Performance on forge as of 2016-10-05 with these numbers, over 1000 runs:
// - fastbuild: max = 21.9s, avg = 10.7s.
// - opt: max = 23.2s, avg = 10.4s.
const int kNumGraphs = DEBUG_MODE ? 100 : 300;
const int kNumQueriesPerGraph = DEBUG_MODE ? 10 : 30;
const int kNumNodes = 1000;
const int kNumArcs = 10000;
for (int graph_iter = 0; graph_iter < kNumGraphs; ++graph_iter) {
// Build the random graphs.
StaticGraph<> forward_graph;
StaticGraph<> backward_graph;
std::vector<double> forward_lengths;
std::vector<double> backward_lengths;
std::vector<int> forward_arc_of_backward_arc;
forward_graph.AddNode(kNumNodes - 1);
backward_graph.AddNode(kNumNodes - 1);
for (int i = 0; i < kNumArcs; ++i) {
const int a = absl::Uniform(random, 0, kNumNodes);
const int b = absl::Uniform(random, 0, kNumNodes);
forward_graph.AddArc(a, b);
backward_graph.AddArc(b, a);
forward_arc_of_backward_arc.push_back(i);
const double length = absl::Uniform<double>(random, 0.0, 1.0);
forward_lengths.push_back(length);
backward_lengths.push_back(length);
}
std::vector<int> forward_perm;
forward_graph.Build(&forward_perm);
util::Permute(forward_perm, &forward_lengths);
for (int& a : forward_arc_of_backward_arc) {
a = forward_perm[a];
}
std::vector<int> backward_perm;
backward_graph.Build(&backward_perm);
util::Permute(backward_perm, &backward_lengths);
util::Permute(backward_perm, &forward_arc_of_backward_arc);
// Initialize the tested Dijkstra and the reference Dijkstra.
typedef BidirectionalDijkstra<StaticGraph<>, double> Dijkstra;
Dijkstra tested_dijkstra(&forward_graph, &forward_lengths, &backward_graph,
&backward_lengths);
BoundedDijkstraWrapper<StaticGraph<>, double> ref_dijkstra(
&forward_graph, &forward_lengths);
// To print some debugging info in case the test fails.
auto print_arc_path = [&](const std::vector<int>& arc_path) -> std::string {
if (arc_path.empty()) return "<EMPTY>";
std::string out = absl::StrCat(forward_graph.Tail(arc_path[0]));
double total_length = 0.0;
for (const int arc : arc_path) {
absl::StrAppend(&out, "--(#", arc, ":", (forward_lengths[arc]), ")-->",
forward_graph.Head(arc));
total_length += forward_lengths[arc];
}
absl::StrAppend(&out, "; Total length=", (total_length));
return out;
};
auto print_node_distances =
[&](const std::vector<Dijkstra::NodeDistance>& nds) -> std::string {
std::string out = "{";
for (const Dijkstra::NodeDistance& nd : nds) {
absl::StrAppend(&out, " #", nd.node, " dist=", (nd.distance), ",");
}
if (!nds.empty()) out.back() = ' '; // Replace the last ','.
out += '}';
return out;
};
// Run random Dijkstra queries.
for (int q = 0; q < kNumQueriesPerGraph; ++q) {
const int num_src = ceil(absl::Exponential<double>(random));
const int num_dst = ceil(absl::Exponential<double>(random));
std::vector<std::pair<int, double>> ref_srcs;
std::vector<std::pair<int, double>> ref_dsts;
std::vector<Dijkstra::NodeDistance> srcs;
std::vector<Dijkstra::NodeDistance> dsts;
for (int i = 0; i < num_src; ++i) {
const int src = absl::Uniform(random, 0, kNumNodes);
// Try costs < 0.
const double dist = absl::Uniform<double>(random, 1.0, 2.0);
ref_srcs.push_back({src, dist});
srcs.push_back({src, dist});
}
for (int i = 0; i < num_dst; ++i) {
const int dst = absl::Uniform(random, 0, kNumNodes);
const double dist = absl::Uniform<double>(random, 1.0, 2.0);
ref_dsts.push_back({dst, dist});
dsts.push_back({dst, dist});
}
const std::vector<int> ref_dests =
ref_dijkstra
.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
ref_srcs, ref_dsts,
/*num_destinations_to_reach=*/1,
std::numeric_limits<double>::infinity());
if (ref_dests.empty()) {
// No path. Verify that.
EXPECT_EQ(
tested_dijkstra.SetToSetShortestPath(srcs, dsts).meeting_point, -1);
continue;
}
const std::vector<int> ref_arc_path =
ref_dijkstra.ArcPathToNode(ref_dests[0]);
const auto path = tested_dijkstra.SetToSetShortestPath(srcs, dsts);
std::vector<int> arc_path = path.forward_arc_path;
for (const int arc : gtl::reversed_view(path.backward_arc_path)) {
arc_path.push_back(forward_arc_of_backward_arc[arc]);
}
ASSERT_THAT(arc_path, ElementsAreArray(ref_arc_path))
<< "On graph #" << graph_iter << ", query #" << q
<< "\nSources : " << print_node_distances(srcs)
<< "\nDestinations: " << print_node_distances(dsts)
<< "\nReference path : " << print_arc_path(ref_arc_path)
<< "\nObtained path : " << print_arc_path(arc_path);
}
}
}
} // namespace
} // namespace operations_research

View File

@@ -25,7 +25,6 @@
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "ortools/base/iterator_adaptors.h"
#include "ortools/base/threadlocal.h"
#include "ortools/base/top_n.h"
#include "ortools/graph/graph.h"
@@ -256,11 +255,9 @@ class BoundedDijkstraWrapper {
}
private:
// We define a private copy constructor to be used by ThreadLocal<>,
// exclusively. The default copy constructor is problematic in a
// multi-threaded environment.
// The default copy constructor is problematic in a multi-threaded
// environment.
BoundedDijkstraWrapper(const BoundedDijkstraWrapper& other);
friend class ThreadLocal<BoundedDijkstraWrapper>;
// The Graph and length of each arc.
const GraphType* const graph_;
@@ -425,7 +422,7 @@ BoundedDijkstraWrapper<GraphType, DistanceType, ArcLengthFunctor>::
sources_with_distance_offsets, settled_node_callback, distance_limit);
// Clean up, sparsely, for the next call.
for (const int node : gtl::key_view(destinations_with_distance_offsets)) {
for (const auto& [node, _] : destinations_with_distance_offsets) {
is_destination_[node] = false;
}

View File

@@ -0,0 +1,736 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/bounded_dijkstra.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <random>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/io.h"
#include "ortools/graph/test_util.h"
#include "ortools/util/flat_matrix.h"
#include "ortools/util/saturated_arithmetic.h"
#include "util/tuple/dump_vars.h"
namespace operations_research {
namespace {
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAreArray;
using ::util::ListGraph;
TEST(BoundedDijkstraWrapperDeathTest, Accessors) {
ListGraph<> graph;
graph.AddArc(1, 3);
std::vector<float> arc_lengths = {2.5};
BoundedDijkstraWrapper<ListGraph<>, float> dijkstra(&graph, &arc_lengths);
const std::is_same<float, decltype(dijkstra)::distance_type> same_type;
ASSERT_TRUE(same_type.value);
ASSERT_EQ(&dijkstra.graph(), &graph);
ASSERT_EQ(dijkstra.GetArcLength(0), 2.5);
}
TEST(BoundedDijkstraWrapperDeathTest, WithArcLengthFunctor) {
ListGraph<> graph;
graph.AddArc(1, 3);
BoundedDijkstraWrapper<ListGraph<>, float, std::function<float(int)>>
dijkstra(&graph, [](int) { return 2.34; });
ASSERT_FLOAT_EQ(dijkstra.GetArcLength(0), 2.34f);
}
TEST(BoundedDijkstraWrapperDeathTest, ConstructorPreconditions) {
ListGraph<> graph;
for (int i = 0; i < 50; ++i) graph.AddArc(i, i + 1);
std::vector<int> arc_lengths(13, 0);
typedef BoundedDijkstraWrapper<ListGraph<>, int> TestedClass;
EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "13");
arc_lengths.resize(50, 0);
arc_lengths[20] = -132;
EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "-132");
}
TEST(BoundedDijkstraWrapper, ArcPathToAndSourceOfShortestPathToNode) {
ListGraph<> graph;
std::vector<int> arc_lengths = {1, 2, 3, 4, 6, 5};
graph.AddArc(0, 1);
graph.AddArc(0, 1);
graph.AddArc(1, 2);
graph.AddArc(1, 2);
graph.AddArc(2, 3);
graph.AddArc(2, 3);
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
const std::vector<int> reached = dijkstra.RunBoundedDijkstra(0, 10);
EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3));
EXPECT_EQ(9, dijkstra.distances()[3]);
EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(0, 2, 5));
EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3));
}
TEST(BoundedDijkstraWrapper, EmptyPath) {
ListGraph<> graph;
std::vector<int> arc_lengths = {1, 2};
graph.AddArc(0, 1);
graph.AddArc(2, 3);
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
const std::vector<int> reached = dijkstra.RunBoundedDijkstra(0, 10);
EXPECT_THAT(reached, ElementsAre(0, 1));
EXPECT_EQ(0, dijkstra.distances()[0]);
EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre());
EXPECT_THAT(dijkstra.NodePathTo(0), ElementsAre(0));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0));
}
TEST(BoundedDijkstraWrapper, OverflowSafe) {
ListGraph<> graph;
const int64_t int_max = std::numeric_limits<int64_t>::max();
std::vector<int64_t> arc_lengths = {int_max, int_max / 2, int_max / 2, 1};
graph.AddArc(0, 1);
graph.AddArc(0, 1);
graph.AddArc(1, 2);
graph.AddArc(2, 3);
BoundedDijkstraWrapper<ListGraph<>, int64_t> dijkstra(&graph, &arc_lengths);
const std::vector<int> reached = dijkstra.RunBoundedDijkstra(0, int_max);
// This works because int_max is odd, i.e. 2 * (int_max / 2) = int_max - 1
EXPECT_THAT(reached, ElementsAre(0, 1, 2));
EXPECT_EQ(0, dijkstra.distances()[0]);
EXPECT_EQ(int_max / 2, dijkstra.distances()[1]);
EXPECT_EQ(int_max - 1, dijkstra.distances()[2]);
}
TEST(BoundedDijkstraWrapper,
ArcPathToAndSourceOfShortestPathToNode_WithArcLengthFunction) {
ListGraph<> graph;
std::vector<int> arc_lengths = {1, 2, 3, 4, 6, 5};
graph.AddArc(0, 1);
graph.AddArc(0, 1);
graph.AddArc(1, 2);
graph.AddArc(1, 2);
graph.AddArc(2, 3);
graph.AddArc(2, 3);
class MyArcLengthFunctor {
public:
explicit MyArcLengthFunctor(const std::vector<int>& arc_lengths)
: arc_lengths_(arc_lengths) {}
int operator()(int arc) const {
return arc % 2 == 1 ? arc_lengths_[arc] : 100;
}
private:
const std::vector<int>& arc_lengths_;
};
BoundedDijkstraWrapper<ListGraph<>, int, MyArcLengthFunctor> dijkstra(
&graph, MyArcLengthFunctor(arc_lengths));
const std::vector<int> reached = dijkstra.RunBoundedDijkstra(0, 20);
EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3));
EXPECT_EQ(11, dijkstra.distances()[3]);
EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(1, 3, 5));
EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3));
}
TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) {
std::mt19937 random(12345);
const int num_nodes = 50;
std::vector<std::vector<int>> lengths(num_nodes, std::vector<int>(num_nodes));
ListGraph<> graph;
std::vector<int> arc_lengths;
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
lengths[i][j] = (i == j) ? 0 : absl::Uniform(random, 0, 1000);
graph.AddArc(i, j);
arc_lengths.push_back(lengths[i][j]);
}
}
// Compute the shortest-path length using FloydWarshall algorithm.
for (int k = 0; k < num_nodes; ++k) {
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
lengths[i][j] = std::min(lengths[i][j], lengths[i][k] + lengths[k][j]);
}
}
}
// Test the bounded dijkstra code (from all sources).
std::vector<int> reached_sizes;
for (int source = 0; source < num_nodes; ++source) {
const int limit = 100;
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
const std::vector<int> reached = dijkstra.RunBoundedDijkstra(source, limit);
for (const int node : reached) {
EXPECT_LT(dijkstra.distances()[node], limit);
EXPECT_EQ(dijkstra.distances()[node], lengths[source][node]);
// Check that we never have the same node twice in the paths.
std::vector<int> path = {node};
int parent = node;
while (dijkstra.parents()[parent] != parent) {
parent = dijkstra.parents()[parent];
path.push_back(parent);
}
std::sort(path.begin(), path.end());
EXPECT_THAT(std::unique(path.begin(), path.end()), path.end());
}
int num_under_limit = 0;
for (int i = 0; i < num_nodes; ++i) {
if (lengths[source][i] < limit) ++num_under_limit;
}
EXPECT_EQ(num_under_limit, reached.size());
reached_sizes.push_back(reached.size());
}
// With the given limit, we reach a good number of nodes.
EXPECT_THAT(reached_sizes, Contains(15));
EXPECT_THAT(reached_sizes, Contains(28));
EXPECT_THAT(reached_sizes, Contains(41));
}
TEST(SimpleOneToOneShortestPathTest, PathTooLong) {
const int big_length = std::numeric_limits<int>::max() / 2;
std::vector<int> tails = {0, 1, 2};
std::vector<int> heads = {1, 2, 3};
std::vector<int> lengths = {big_length, big_length, big_length};
{
const auto [distance, path] =
SimpleOneToOneShortestPath<int>(0, 3, tails, heads, lengths);
EXPECT_EQ(distance, std::numeric_limits<int>::max());
EXPECT_TRUE(path.empty());
}
{
// from 0 to 2 work because 2 * big_length < int_max.
const auto [distance, path] =
SimpleOneToOneShortestPath<int>(0, 2, tails, heads, lengths);
EXPECT_EQ(distance, std::numeric_limits<int>::max() - 1);
EXPECT_THAT(path, ElementsAre(0, 1, 2));
}
}
TEST(SimpleOneToOneShortestPathTest, Random) {
absl::BitGen random;
// We will generate a random dense graph.
const int num_nodes = 50;
std::vector<std::vector<int>> lengths(num_nodes, std::vector<int>(num_nodes));
std::vector<std::vector<int>> shortest_distance(num_nodes,
std::vector<int>(num_nodes));
// This will be the "sparse" representation.
std::vector<int> tails;
std::vector<int> heads;
std::vector<int> arc_lengths;
// We permutes the arc order to properly test that it do not matter.
std::vector<int> nodes(num_nodes);
for (int i = 0; i < num_nodes; ++i) nodes[i] = i;
std::shuffle(nodes.begin(), nodes.end(), random);
// Generate random data.
for (const int tail : nodes) {
for (const int head : nodes) {
lengths[tail][head] = (tail == head) ? 0 : absl::Uniform(random, 0, 1000);
shortest_distance[tail][head] = lengths[tail][head];
tails.push_back(tail);
heads.push_back(head);
arc_lengths.push_back(lengths[tail][head]);
}
}
// Compute the shortest-path length using FloydWarshall algorithm.
for (int k = 0; k < num_nodes; ++k) {
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
shortest_distance[i][j] =
std::min(shortest_distance[i][j],
shortest_distance[i][k] + shortest_distance[k][j]);
}
}
}
// Test the code from a bunch of random (from, to) pair.
for (int runs = 0; runs < 100; ++runs) {
const int from = absl::Uniform(random, 0, num_nodes);
const int to = absl::Uniform(random, 0, num_nodes);
// No limit. There should always be a path with our generated data.
{
const auto [distance, path] =
SimpleOneToOneShortestPath<int>(from, to, tails, heads, arc_lengths);
EXPECT_EQ(distance, shortest_distance[from][to]);
EXPECT_FALSE(path.empty());
EXPECT_EQ(path.front(), from);
EXPECT_EQ(path.back(), to);
}
// A limit of shortest_distance[from][to] + 1 works too.
{
const auto [distance, path] = SimpleOneToOneShortestPath<int>(
from, to, tails, heads, arc_lengths, shortest_distance[from][to] + 1);
EXPECT_EQ(distance, shortest_distance[from][to]);
EXPECT_FALSE(path.empty());
EXPECT_EQ(path.front(), from);
EXPECT_EQ(path.back(), to);
}
// But a limit of shortest_distance[from][to] should fail.
{
const auto [distance, path] = SimpleOneToOneShortestPath<int>(
from, to, tails, heads, arc_lengths, shortest_distance[from][to]);
EXPECT_EQ(distance, shortest_distance[from][to]);
EXPECT_TRUE(path.empty());
}
}
}
TEST(BoundedDijkstraWrapperTest, MultiRunsOverDynamicGraphAndLengths) {
ListGraph<> graph;
graph.AddArc(0, 1);
graph.AddArc(0, 1);
std::vector<int> arc_lengths = {4, 3};
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 5), ElementsAre(0, 1));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1));
EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(1));
EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 2), ElementsAre(0));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0));
EXPECT_THAT(dijkstra.ArcPathTo(0), IsEmpty());
EXPECT_THAT(dijkstra.RunBoundedDijkstra(1, 99), ElementsAre(1));
EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1));
EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty());
// Add some arcs and nodes...
graph.AddArc(0, 2);
arc_lengths.push_back(1);
graph.AddArc(1, 2);
arc_lengths.push_back(0);
graph.AddArc(2, 1);
arc_lengths.push_back(1);
graph.AddArc(1, 3);
arc_lengths.push_back(5);
EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 10), ElementsAre(0, 2, 1, 3));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3));
EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(2, 4, 5));
EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 6), ElementsAre(0, 2, 1));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1));
EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(2, 4));
}
TEST(BoundedDijkstraWrapperTest, MultipleSources) {
// Use this graph. Source nodes have their initial distance in [ ].
//
// N1[0] --(2)--> N0[4] --(1)--> N2 --(5)--> N3 <--(4)-- N4[3] --(5)--> N5
ListGraph<> graph;
std::vector<int> arc_lengths;
graph.AddArc(1, 0);
arc_lengths.push_back(2);
graph.AddArc(0, 2);
arc_lengths.push_back(1);
graph.AddArc(2, 3);
arc_lengths.push_back(5);
graph.AddArc(4, 3);
arc_lengths.push_back(4);
graph.AddArc(4, 5);
arc_lengths.push_back(5);
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
// The distance limit is exclusive, so we can't reach Node 5.
ASSERT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources(
{{1, 0}, {0, 4}, {4, 3}}, 8),
// The order is deterministic: node 4 comes before node 2, despite
// having equal distance and higher index, because it's a source.
ElementsAre(1, 0, 4, 2, 3));
EXPECT_EQ(2, dijkstra.distances()[0]);
EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(0));
EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre(0));
EXPECT_EQ(0, dijkstra.distances()[1]);
EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1));
EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty());
EXPECT_EQ(3, dijkstra.distances()[2]);
EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(2));
EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1));
EXPECT_EQ(7, dijkstra.distances()[3]);
EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(3));
EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(3));
EXPECT_EQ(3, dijkstra.distances()[4]);
EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(4));
EXPECT_THAT(dijkstra.ArcPathTo(4), IsEmpty());
}
TEST(BoundedDijkstraWrapperTest, SourcesAtOrBeyondDistanceLimitAreNotReached) {
ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/0);
std::vector<int> arc_lengths; // No arcs.
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources(
{{0, 10}, {1, 11}, {2, 12}, {3, 13}}, 12),
ElementsAre(0, 1));
}
TEST(BoundedDijkstraWrapperTest, SourcesListedMultipleTimesKeepsMinDistance) {
ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/1);
graph.AddArc(1, 3);
std::vector<int> arc_lengths = {20};
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources(
{{1, 12}, {1, 10}, {1, 14}}, 31),
ElementsAre(1, 3));
EXPECT_EQ(dijkstra.distances()[3], 30);
}
TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) {
// Source nodes are "S", destination nodes are "D", and the rest are "N".
// Source and destination nodes have their distance offset in [ ].
//
// S0[2] --(3)--> D1[7] --(1)--.
// >--> N5 --(1)--> D4[1]
// S2[4] --(3)--> D3[3] --(0)--' |
// ^ |
// \ /
// `------(0)-----'
//
// The shortest path is S0->D1->N5->D4, of distance 2 + 3 + 1 + 1 + 1 = 8.
ListGraph<> graph;
std::vector<int> arc_lengths;
graph.AddArc(0, 1);
arc_lengths.push_back(3);
graph.AddArc(2, 3);
arc_lengths.push_back(3);
graph.AddArc(1, 5);
arc_lengths.push_back(1);
graph.AddArc(3, 5);
arc_lengths.push_back(0);
graph.AddArc(5, 3);
arc_lengths.push_back(0);
graph.AddArc(5, 4);
arc_lengths.push_back(1);
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
// Repeat the same source and destination multiple times, to verify that
// it's supported.
std::vector<std::pair<int, int>> sources = {{0, 5}, {2, 4}, {0, 2}, {0, 9}};
std::vector<std::pair<int, int>> destinations = {
{1, 7}, {4, 5}, {3, 3}, {4, 1}, {4, 3}};
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/1,
/*distance_limit=*/1000),
Contains(4));
EXPECT_EQ(2 + 3 + 1 + 1, dijkstra.distances()[4]);
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(4));
EXPECT_THAT(dijkstra.ArcPathTo(4),
ElementsAre(/*0->1*/ 0, /*1->5*/ 2, /*5->4*/ 5));
EXPECT_EQ(2, dijkstra.GetSourceIndex(0));
EXPECT_EQ(3, dijkstra.GetDestinationIndex(4));
// Run it with a limit too small: it'll fail to discover any destination.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/2,
/*distance_limit=*/8), // Limit is exclusive.
IsEmpty());
// .. And with a limit that's just big enough for "4". It'll be ok.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/2,
/*distance_limit=*/9), // Limit is exclusive.
ElementsAre(4));
// Slightly modify the graph and try again. We want a case where the best
// destination isn't the one with the smallest distance offset.
destinations.push_back({1, 2}); // D1 will be the closest destination now.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/1,
/*distance_limit=*/8), // Limit is exclusive.
ElementsAre(1));
EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1));
EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(/*0->1*/ 0));
// Corner case: run with no destinations.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, {}, /*num_destinations_to_reach=*/99,
/*distance_limit=*/1000),
IsEmpty());
// Corner case: run with num_destinations_to_reach = 0.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/0,
/*distance_limit=*/1000),
IsEmpty());
// Call Get{Source,Destination}Index() on nodes that aren't sources or
// destinations. This returns junk; so we don't check the returned values,
// but we do check that it doesn't crash.
dijkstra.GetDestinationIndex(4);
dijkstra.GetSourceIndex(1);
// Setting num_reached_destinations=1 now should make '1' the only reachable
// destination, even if the limit is infinite.
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/1,
/*distance_limit=*/1000),
ElementsAre(1));
// Verify that if we set the number of destinations to infinity, they're all
// explored, and the search still stops before exploring the whole graph. To
// do that, we add one extra arc that's beyond the farthest destination's
// distance (including its destination offset), i.e. 1 (distance 2+3+7 = 12).
graph.AddArc(5, 6);
arc_lengths.push_back(2);
graph.AddArc(6, 7);
arc_lengths.push_back(0);
EXPECT_THAT(
dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
sources, destinations, /*num_destinations_to_reach=*/1000,
/*distance_limit=*/1000),
ElementsAre(1, 4, 3));
EXPECT_GE(dijkstra.distances()[1], 5);
EXPECT_GE(dijkstra.distances()[4], 7);
EXPECT_GE(dijkstra.distances()[3], 6);
// To verify that node #7 isn't reached, we can check its distance, which will
// still be set to the initialized "distance_limit - min_destination_offset".
EXPECT_GE(dijkstra.distances()[7], 1000 - 1);
}
TEST(BoundedDijkstraWrapperTest, OneToOneShortestPath) {
// Since we already tested the multiple sources - multiple destinations
// variant, we only need to test the "plumbing" here.
ListGraph<> graph;
std::vector<int> arc_lengths;
graph.AddArc(0, 1);
arc_lengths.push_back(3);
graph.AddArc(1, 2);
arc_lengths.push_back(2);
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &arc_lengths);
EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 2, 6));
EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1));
EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 0, 1));
EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre());
EXPECT_TRUE(dijkstra.OneToOneShortestPath(1, 2, 3));
EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(1));
EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 2, 5));
EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 0, 0));
EXPECT_FALSE(dijkstra.OneToOneShortestPath(1, 2, 2));
EXPECT_FALSE(dijkstra.OneToOneShortestPath(2, 1, 1000));
}
TEST(BoundedDijkstraWrapperTest, CustomSettledNodeCallback) {
// A small chain: 8 --[3]--> 1 --[2]--> 42 --[3]--> 3 --[2]--> 4.
ListGraph<> graph;
std::vector<int> arc_lengths;
graph.AddArc(8, 1);
arc_lengths.push_back(3);
graph.AddArc(1, 42);
arc_lengths.push_back(2);
graph.AddArc(42, 3);
arc_lengths.push_back(3);
graph.AddArc(3, 4);
arc_lengths.push_back(2);
typedef BoundedDijkstraWrapper<ListGraph<>, int> DijkstraType;
DijkstraType dijkstra(&graph, &arc_lengths);
// Tracks each NodeDistance it's called on, and sets the distance limit
// to 10 if it gets called on node 42.
std::vector<std::pair<int, int>> settled_node_dists;
auto callback = [&settled_node_dists](int node, int distance,
int* distance_limit) {
settled_node_dists.push_back({node, distance});
if (node == 42) *distance_limit = 10;
};
EXPECT_THAT(dijkstra.RunBoundedDijkstraWithSettledNodeCallback({{8, 0}},
callback, 999),
ElementsAre(8, 1, 42, 3));
EXPECT_THAT(settled_node_dists,
ElementsAre(Pair(8, 0), Pair(1, 3), Pair(42, 5), Pair(3, 8)));
}
TEST(BoundedDisjktraTest, RandomizedStressTest) {
std::mt19937 random;
const int kNumTests = 10'000;
constexpr int kint32max = std::numeric_limits<int>::max();
for (int test = 0; test < kNumTests; ++test) {
// Generate a random graph with random weights.
const int num_nodes = absl::Uniform(random, 1, 12);
const int num_arcs =
absl::Uniform(absl::IntervalClosed, random, 0,
std::min(num_nodes * (num_nodes - 1), 15));
ListGraph<> graph(num_nodes, num_arcs);
for (int a = 0; a < num_arcs; ++a) {
graph.AddArc(absl::Uniform(random, 0, num_nodes),
absl::Uniform(random, 0, num_nodes));
}
std::vector<int> lengths(num_arcs);
for (int& w : lengths) w = absl::Uniform(random, 0, 5);
// Run Floyd-Warshall as a 'reference' shortest path algorithm.
FlatMatrix<int> ref_dist(num_nodes, num_nodes, kint32max);
for (int a = 0; a < num_arcs; ++a) {
int& d = ref_dist[graph.Tail(a)][graph.Head(a)];
if (lengths[a] < d) d = lengths[a];
}
for (int node = 0; node < num_nodes; ++node) {
ref_dist[node][node] = 0;
}
for (int k = 0; k < num_nodes; ++k) {
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
const int64_t dist_through_k =
static_cast<int64_t>(ref_dist[i][k]) + ref_dist[k][j];
if (dist_through_k < ref_dist[i][j]) ref_dist[i][j] = dist_through_k;
}
}
}
// Compute the graph's largest distance below kint32max.
int max_distance = 0;
for (int i = 0; i < num_nodes; ++i) {
for (int j = 0; j < num_nodes; ++j) {
const int d = ref_dist[i][j];
if (d != kint32max && d > max_distance) max_distance = d;
}
}
// Now, run some Dijkstras and verify that they match. To balance out the
// FW (Floyd-Warshall) which is O(N³), we run more than one Dijkstra per FW.
BoundedDijkstraWrapper<ListGraph<>, int> dijkstra(&graph, &lengths);
for (int num_dijkstra = 0; num_dijkstra < 20; ++num_dijkstra) {
// Draw the distance limit.
const int limit =
absl::Bernoulli(random, 0.2)
? kint32max
: absl::Uniform(absl::IntervalClosed, random, 0, max_distance);
// Draw sources (*with* repetition) with initial distances.
const int num_sources = absl::Uniform(random, 1, 5);
std::vector<std::pair<int, int>> sources(num_sources);
for (auto& [s, dist] : sources) {
s = absl::Uniform(random, 0, num_nodes);
dist = absl::Uniform(absl::IntervalClosed, random, 0, max_distance + 1);
}
// Precompute the reference minimum distance to each node (using any of
// the sources), and the expected reached nodes: any node whose distance
// is < limit. That includes the sources: if a source's initial distance
// is ≥ limit, it won't be reached.That includes the source themselves.
std::vector<int> node_min_dist(num_nodes, kint32max);
std::vector<int> expected_reached_nodes;
for (int node = 0; node < num_nodes; ++node) {
int min_dist = kint32max;
for (const auto& [src, dist] : sources) {
// Cast to int64_t to avoid overflows.
min_dist = std::min<int64_t>(
min_dist, static_cast<int64_t>(ref_dist[src][node]) + dist);
}
node_min_dist[node] = min_dist;
if (min_dist < limit) expected_reached_nodes.push_back(node);
}
const std::vector<int> reached_nodes =
dijkstra.RunBoundedDijkstraFromMultipleSources(sources, limit);
EXPECT_THAT(reached_nodes,
UnorderedElementsAreArray(expected_reached_nodes));
for (const int node : reached_nodes) {
EXPECT_EQ(dijkstra.distances()[node], node_min_dist[node]) << node;
}
ASSERT_FALSE(HasFailure())
<< DUMP_VARS(num_nodes, num_arcs, num_sources, limit, sources,
lengths)
<< "\n With graph:\n"
<< util::GraphToString(graph, util::PRINT_GRAPH_ARCS);
}
}
}
template <bool arc_lengths_are_discrete>
void BM_GridGraph(benchmark::State& state) {
typedef util::StaticGraph<int> Graph;
const int64_t kWidth = 100;
const int64_t kHeight = 10000;
const int kSourceNode = static_cast<int>(kWidth * kHeight / 2);
std::unique_ptr<Graph> graph =
util::Create2DGridGraph<Graph>(/*width=*/kWidth, /*height=*/kHeight);
std::vector<int64_t> arc_lengths(graph->num_arcs(), 0);
const int64_t min_length = arc_lengths_are_discrete ? 0 : 1;
const int64_t max_length = arc_lengths_are_discrete ? 2 : 1000000000000000L;
std::mt19937 random(12345);
for (int64_t& length : arc_lengths) {
length = absl::Uniform(random, min_length, max_length + 1);
}
BoundedDijkstraWrapper<Graph, int64_t> dijkstra(graph.get(), &arc_lengths);
const int64_t kSearchRadius = kWidth * (min_length + max_length) / 2;
// NOTE(user): The expected number of nodes visited is in ϴ(kWidth²),
// since the search radius is ϴ(kWidth). The exact constant is hard to
// derive mathematically as a function of the radius formula, so I measured
// it empirically and it was close to 0.5, which seemed about right.
const int64_t kExpectedApproximateSearchSize = kWidth * kWidth / 2;
int64_t total_nodes_visited = 0;
for (auto _ : state) {
const int num_nodes_visited =
dijkstra
.RunBoundedDijkstra(/*source_node=*/kSourceNode,
/*distance_limit=*/kSearchRadius)
.size();
// We verify that the Dijkstra search size is in the expected range, to
// make sure that we're measuring what we want in this benchmark.
CHECK_GE(num_nodes_visited, kExpectedApproximateSearchSize / 2);
CHECK_GE(num_nodes_visited, kExpectedApproximateSearchSize * 2);
total_nodes_visited += num_nodes_visited;
}
state.SetItemsProcessed(total_nodes_visited);
}
BENCHMARK(BM_GridGraph<true>);
BENCHMARK(BM_GridGraph<false>);
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,852 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/max_flow.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <string>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "net/proto2/contrib/parse_proto/testing.h"
#include "ortools/base/logging.h"
#include "ortools/base/path.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/graphs.h"
#include "ortools/linear_solver/linear_solver.h"
#include "ortools/util/file_util.h"
namespace operations_research {
namespace {
using ::google::protobuf::contrib::parse_proto::ParseTestProto;
using ::testing::ContainerEq;
using ::testing::WhenSorted;
TEST(SimpleMaxFlowTest, EmptyWithValidSourceAndSink) {
SimpleMaxFlow max_flow;
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 1));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(1, 0));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(0, max_flow.NumArcs());
EXPECT_EQ(0, max_flow.NumNodes());
}
TEST(SimpleMaxFlowTest, ArcFlowIsZeroOnDisconnectedGraph) {
SimpleMaxFlow max_flow;
max_flow.AddArcWithCapacity(0, 1, 10);
max_flow.AddArcWithCapacity(1, 0, 10);
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 2));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(0, max_flow.Flow(0));
EXPECT_EQ(0, max_flow.Flow(1));
}
TEST(SimpleMaxFlowTest, EmptyWithInvalidSourceAndSink) {
SimpleMaxFlow max_flow;
EXPECT_EQ(SimpleMaxFlow::BAD_INPUT, max_flow.Solve(0, 0));
EXPECT_EQ(SimpleMaxFlow::BAD_INPUT, max_flow.Solve(-1, 10));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(0, max_flow.NumArcs());
EXPECT_EQ(0, max_flow.NumNodes());
}
TEST(SimpleMaxFlowTest, TrivialGraphWithMaxCapacity) {
SimpleMaxFlow max_flow;
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
EXPECT_EQ(0, max_flow.AddArcWithCapacity(0, 1, kCapacityMax));
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(1, 0));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 1));
EXPECT_EQ(kCapacityMax, max_flow.OptimalFlow());
EXPECT_EQ(1, max_flow.NumArcs());
EXPECT_EQ(2, max_flow.NumNodes());
EXPECT_EQ(kCapacityMax, max_flow.Flow(0));
}
TEST(SimpleMaxFlowTest, TrivialGraphWithRepeatedArc) {
SimpleMaxFlow max_flow;
EXPECT_EQ(0, max_flow.AddArcWithCapacity(0, 1, 10));
EXPECT_EQ(1, max_flow.AddArcWithCapacity(0, 1, 10));
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(1, 0));
EXPECT_EQ(0, max_flow.OptimalFlow());
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 1));
EXPECT_EQ(20, max_flow.OptimalFlow());
EXPECT_EQ(2, max_flow.NumArcs());
EXPECT_EQ(2, max_flow.NumNodes());
EXPECT_EQ(10, max_flow.Flow(0));
EXPECT_EQ(10, max_flow.Flow(1));
}
TEST(SimpleMaxFlowTest, SelfLoop) {
SimpleMaxFlow max_flow;
EXPECT_EQ(0, max_flow.AddArcWithCapacity(0, 0, 10));
EXPECT_EQ(1, max_flow.AddArcWithCapacity(0, 1, 10));
EXPECT_EQ(2, max_flow.AddArcWithCapacity(1, 1, 10));
EXPECT_EQ(3, max_flow.AddArcWithCapacity(1, 2, 10));
EXPECT_EQ(4, max_flow.AddArcWithCapacity(2, 2, 10));
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 2));
EXPECT_EQ(10, max_flow.OptimalFlow());
}
TEST(SimpleMaxFlowTest, SetArcCapacity) {
SimpleMaxFlow max_flow;
// Graph: 0------[10]-->1--[15]->3
// \ ^ ^
// \ [3] |
// \ | |
// `--[15]-->2--[10]--'
//
// Max flow is saturated by the 2->1 arc: it's 23.
max_flow.AddArcWithCapacity(0, 1, 10);
max_flow.AddArcWithCapacity(0, 2, 15);
const int arc21 = max_flow.AddArcWithCapacity(2, 1, 3);
max_flow.AddArcWithCapacity(2, 3, 10);
max_flow.AddArcWithCapacity(1, 3, 15);
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 3));
EXPECT_EQ(23, max_flow.OptimalFlow());
max_flow.SetArcCapacity(arc21, 10);
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, max_flow.Solve(0, 3));
EXPECT_EQ(25, max_flow.OptimalFlow());
}
SimpleMaxFlow::Status LoadAndSolveFlowModel(const FlowModelProto& model,
SimpleMaxFlow* solver) {
for (int a = 0; a < model.arcs_size(); ++a) {
solver->AddArcWithCapacity(model.arcs(a).tail(), model.arcs(a).head(),
model.arcs(a).capacity());
}
int source, sink;
for (int n = 0; n < model.nodes_size(); ++n) {
if (model.nodes(n).supply() == 1) source = model.nodes(n).id();
if (model.nodes(n).supply() == -1) sink = model.nodes(n).id();
}
return solver->Solve(source, sink);
}
TEST(SimpleMaxFlowTest, CreateFlowModelProto) {
SimpleMaxFlow solver1;
solver1.AddArcWithCapacity(0, 1, 10);
solver1.AddArcWithCapacity(0, 2, 10);
solver1.AddArcWithCapacity(1, 2, 5);
solver1.AddArcWithCapacity(2, 3, 15);
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, solver1.Solve(0, 3));
EXPECT_EQ(15, solver1.OptimalFlow());
const FlowModelProto model_proto = solver1.CreateFlowModelProto(0, 3);
// Load the model_proto in another solver, and check that the solve and that
// the new dump is the same.
SimpleMaxFlow solver2;
EXPECT_EQ(SimpleMaxFlow::OPTIMAL,
LoadAndSolveFlowModel(model_proto, &solver2));
EXPECT_EQ(15, solver2.OptimalFlow());
EXPECT_THAT(model_proto,
::testing::EqualsProto(solver2.CreateFlowModelProto(0, 3)));
// Check that the proto is what we expect it is.
const FlowModelProto expected = ParseTestProto(R"pb(
problem_type: MAX_FLOW
nodes { id: 0 supply: 1 }
nodes { id: 1 }
nodes { id: 2 }
nodes { id: 3 supply: -1 }
arcs { tail: 0 head: 1 capacity: 10 }
arcs { tail: 0 head: 2 capacity: 10 }
arcs { tail: 1 head: 2 capacity: 5 }
arcs { tail: 2 head: 3 capacity: 15 }
)pb");
EXPECT_THAT(model_proto, testing::EqualsProto(expected));
}
// A problem that was triggering a issue on 28/11/2014 (now fixed).
TEST(SimpleMaxFlowTest, ProblematicProblemWithMaxCapacity) {
CHECK_OK_AND_ASSIGN(
FlowModelProto model,
ReadFileToProto<FlowModelProto>(
file::JoinPathRespectAbsolute(absl::GetFlag(testing::SrcDir()),
"ortools/graph/"
"testdata/max_flow_test1.pb.txt")));
SimpleMaxFlow solver;
EXPECT_EQ(SimpleMaxFlow::OPTIMAL, LoadAndSolveFlowModel(model, &solver));
EXPECT_EQ(10290243, solver.OptimalFlow());
}
template <typename Graph>
typename GenericMaxFlow<Graph>::Status MaxFlowTester(
const typename Graph::NodeIndex num_nodes,
const typename Graph::ArcIndex num_arcs,
const typename Graph::NodeIndex* tail,
const typename Graph::NodeIndex* head, const FlowQuantity* capacity,
const FlowQuantity* expected_flow, const FlowQuantity expected_total_flow,
const std::vector<typename Graph::NodeIndex>* expected_source_min_cut =
nullptr,
const std::vector<typename Graph::NodeIndex>* expected_sink_min_cut =
nullptr) {
Graph graph(num_nodes, num_arcs);
for (int i = 0; i < num_arcs; ++i) {
graph.AddArc(tail[i], head[i]);
}
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
typename GenericMaxFlow<Graph>::Status result =
GenericMaxFlow<Graph>::OPTIMAL;
for (int options = 0; options < 8; ++options) {
GenericMaxFlow<Graph> max_flow(&graph, 0, num_nodes - 1);
max_flow.SetUseGlobalUpdate(options & 1);
max_flow.SetUseTwoPhaseAlgorithm(options & 2);
max_flow.ProcessNodeByHeight(options & 3);
for (typename Graph::ArcIndex arc = 0; arc < num_arcs; ++arc) {
max_flow.SetArcCapacity(arc, capacity[arc]);
EXPECT_EQ(max_flow.Capacity(arc), capacity[arc]);
}
EXPECT_TRUE(max_flow.CheckInputConsistency());
EXPECT_TRUE(max_flow.Solve());
if (max_flow.status() == GenericMaxFlow<Graph>::OPTIMAL) {
const FlowQuantity total_flow = max_flow.GetOptimalFlow();
EXPECT_EQ(expected_total_flow, total_flow);
for (int i = 0; i < num_arcs; ++i) {
EXPECT_EQ(expected_flow[i], max_flow.Flow(i)) << " i = " << i;
}
}
EXPECT_TRUE(max_flow.CheckResult());
if (options == 0) {
result = max_flow.status();
} else {
EXPECT_EQ(result, max_flow.status());
}
// Tests the min-cut functions.
if (expected_source_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSourceSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_source_min_cut, WhenSorted(ContainerEq(cut)));
}
if (expected_sink_min_cut != nullptr) {
std::vector<NodeIndex> cut;
max_flow.GetSinkSideMinCut(&cut);
std::sort(cut.begin(), cut.end());
EXPECT_THAT(*expected_sink_min_cut, WhenSorted(ContainerEq(cut)));
}
}
return result;
}
template <typename Graph>
class GenericMaxFlowTest : public ::testing::Test {};
typedef ::testing::Types<StarGraph, util::ReverseArcListGraph<>,
util::ReverseArcStaticGraph<>,
util::ReverseArcMixedGraph<> >
GraphTypes;
TYPED_TEST_SUITE(GenericMaxFlowTest, GraphTypes);
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow1) {
const int kNumNodes = 4;
const int kNumArcs = 3;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3};
const FlowQuantity kCapacity[kNumArcs] = {8, 10, 8};
const FlowQuantity kExpectedFlow[kNumArcs] = {8, 8, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlow2) {
const int kNumNodes = 6;
const int kNumArcs = 9;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1,
2, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3,
4, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {6, 8, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {1, 4, 5, 0, 1, 4, 0, 6, 4};
const FlowQuantity kExpectedTotalFlow = 10;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FeasibleFlowWithMultipleArcs) {
const int kNumNodes = 5;
const int kNumArcs = 8;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 1,
2, 2, 3, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 1, 2, 2,
3, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 4, 4, 4, 4};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, HugeCapacity) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax, 5, 3,
kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {5, 3, 5, 3, 8};
const FlowQuantity kExpectedTotalFlow = 8;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4, 3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflowLimitCase) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const FlowQuantity kHalfLow = kCapacityMax / 2;
const FlowQuantity kHalfHigh = kCapacityMax - kHalfLow;
const int kNumNodes = 5;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3, 4};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kHalfLow, kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kHalfLow, kHalfHigh, kHalfLow,
kHalfHigh, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({4});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowQuantityOverflow) {
const FlowQuantity kCapacityMax = std::numeric_limits<FlowQuantity>::max();
const int kNumNodes = 4;
const int kNumArcs = 4;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedFlow[kNumArcs] = {kCapacityMax, kCapacityMax,
kCapacityMax, kCapacityMax};
const FlowQuantity kExpectedTotalFlow = kCapacityMax;
EXPECT_EQ(
GenericMaxFlow<TypeParam>::INT_OVERFLOW,
MaxFlowTester<TypeParam>(kNumNodes, kNumArcs, kTail, kHead, kCapacity,
kExpectedFlow, kExpectedTotalFlow));
}
TYPED_TEST(GenericMaxFlowTest, DirectArcFromSourceToSink) {
const int kNumNodes = 4;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 1, 2};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 3, 2, 3, 3};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 2, 2};
const FlowQuantity kExpectedFlow[kNumArcs] = {2, 8, 2, 2, 2};
const FlowQuantity kExpectedTotalFlow = 12;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph1) {
const int kNumNodes = 6;
const int kNumArcs = 7;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 2, 3};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 3, 4, 3, 4, 4};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 5, 3, 4, 5, 6};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2, 3, 4});
std::vector<int> sink_cut({5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
TYPED_TEST(GenericMaxFlowTest, FlowOnDisconnectedGraph2) {
const int kNumNodes = 6;
const int kNumArcs = 5;
const typename TypeParam::NodeIndex kTail[kNumArcs] = {0, 0, 3, 3, 4};
const typename TypeParam::NodeIndex kHead[kNumArcs] = {1, 2, 4, 5, 5};
const FlowQuantity kCapacity[kNumArcs] = {5, 8, 6, 6, 4};
const FlowQuantity kExpectedFlow[kNumArcs] = {0, 0, 0, 0, 0};
const FlowQuantity kExpectedTotalFlow = 0;
std::vector<int> source_cut({0, 1, 2});
std::vector<int> sink_cut({3, 4, 5});
EXPECT_EQ(GenericMaxFlow<TypeParam>::OPTIMAL,
MaxFlowTester<TypeParam>(
kNumNodes, kNumArcs, kTail, kHead, kCapacity, kExpectedFlow,
kExpectedTotalFlow, &source_cut, &sink_cut));
}
template <typename Graph>
void AddSourceAndSink(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads, Graph* graph) {
const typename Graph::NodeIndex source = num_tails + num_heads;
const typename Graph::NodeIndex sink = num_tails + num_heads + 1;
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
graph->AddArc(source, tail);
}
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(num_tails + head, sink);
}
}
template <typename Graph>
void GenerateCompleteGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * num_heads + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex head = 0; head < num_heads; ++head) {
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GeneratePartialRandomGraph(const typename Graph::NodeIndex num_tails,
const typename Graph::NodeIndex num_heads,
const typename Graph::NodeIndex degree,
Graph* graph) {
const typename Graph::NodeIndex num_nodes = num_tails + num_heads + 2;
const typename Graph::ArcIndex num_arcs =
num_tails * degree + num_tails + num_heads;
graph->Reserve(num_nodes, num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::NodeIndex tail = 0; tail < num_tails; ++tail) {
for (typename Graph::NodeIndex d = 0; d < degree; ++d) {
const typename Graph::NodeIndex head =
absl::Uniform(randomizer, 0, num_heads);
graph->AddArc(tail, head + num_tails);
}
}
AddSourceAndSink(num_tails, num_heads, graph);
}
template <typename Graph>
void GenerateRandomArcValuations(const Graph& graph, const int64_t max_range,
std::vector<int64_t>* arc_valuation) {
const typename Graph::ArcIndex num_arcs = graph.num_arcs();
arc_valuation->resize(num_arcs);
std::mt19937 randomizer(0);
for (typename Graph::ArcIndex arc = 0; arc < graph.num_arcs(); ++arc) {
(*arc_valuation)[arc] = absl::Uniform(randomizer, 0, max_range);
}
}
template <typename Graph>
void SetUpNetworkData(const std::vector<int64_t>& arc_capacity,
GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc, arc_capacity[arc]);
}
}
template <typename Graph>
FlowQuantity SolveMaxFlow(GenericMaxFlow<Graph>* max_flow) {
EXPECT_TRUE(max_flow->Solve());
EXPECT_EQ(GenericMaxFlow<Graph>::OPTIMAL, max_flow->status());
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
EXPECT_LE(max_flow->Flow(Graphs<Graph>::OppositeArc(*graph, arc)), 0)
<< arc;
EXPECT_EQ(0, max_flow->Capacity(Graphs<Graph>::OppositeArc(*graph, arc)))
<< arc;
EXPECT_LE(0, max_flow->Flow(arc)) << arc;
EXPECT_LE(max_flow->Flow(arc), max_flow->Capacity(arc)) << arc;
}
return max_flow->GetOptimalFlow();
}
template <typename Graph>
FlowQuantity SolveMaxFlowWithLP(GenericMaxFlow<Graph>* max_flow) {
MPSolver solver("LPSolver", MPSolver::GLOP_LINEAR_PROGRAMMING);
const double infinity = solver.infinity();
const Graph* graph = max_flow->graph();
const typename Graph::NodeIndex num_nodes = graph->num_nodes();
const typename Graph::ArcIndex num_arcs = graph->num_arcs();
const typename Graph::NodeIndex source_index = num_nodes - 2;
std::unique_ptr<MPConstraint*[]> constraint(new MPConstraint*[num_nodes]);
for (typename Graph::NodeIndex node = 0; node < graph->num_nodes(); ++node) {
constraint[node] = solver.MakeRowConstraint();
if (node < source_index) { // Node is neither source nor sink.
constraint[node]->SetBounds(0.0, 0.0); // Flow is conserved.
} else {
constraint[node]->SetBounds(-infinity, +infinity);
}
}
std::unique_ptr<MPVariable*[]> var(new MPVariable*[num_arcs]);
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
var[arc] = solver.MakeNumVar(0.0, max_flow->Capacity(arc),
absl::StrFormat("v%d", arc));
constraint[graph->Tail(arc)]->SetCoefficient(var[arc], 1.0);
constraint[graph->Head(arc)]->SetCoefficient(var[arc], -1.0);
}
MPObjective* const objective = solver.MutableObjective();
for (typename Graph::OutgoingArcIterator arc_it(*graph, source_index);
arc_it.Ok(); arc_it.Next()) {
const typename Graph::ArcIndex arc = arc_it.Index();
objective->SetCoefficient(var[arc], -1.0);
}
solver.Solve();
return static_cast<FlowQuantity>(-objective->Value() + .5);
}
template <typename Graph>
struct MaxFlowSolver {
typedef FlowQuantity (*Solver)(GenericMaxFlow<Graph>* max_flow);
};
template <typename Graph>
void FullRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
Graphs<Graph>::Build(&graph);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void PartialRandomAssignment(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
Graphs<Graph>::Build(&graph);
CHECK_EQ(graph.num_arcs(), num_tails * kDegree + num_tails + num_heads);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void ChangeCapacities(const std::vector<int64_t>& arc_capacity,
FlowQuantity delta, GenericMaxFlow<Graph>* max_flow) {
const Graph* graph = max_flow->graph();
for (typename Graph::ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
max_flow->SetArcCapacity(arc,
std::max(arc_capacity[arc] - delta, int64_t{0}));
}
}
template <typename Graph>
void PartialRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1,
FlowQuantity expected_flow2) {
const typename Graph::NodeIndex kDegree = 10;
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GeneratePartialRandomGraph(num_tails, num_heads, kDegree, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
template <typename Graph>
void FullRandomFlow(typename MaxFlowSolver<Graph>::Solver f,
typename Graph::NodeIndex num_tails,
typename Graph::NodeIndex num_heads,
FlowQuantity expected_flow1, FlowQuantity expected_flow2) {
const FlowQuantity kCapacityRange = 10000;
const FlowQuantity kCapacityDelta = 1000;
Graph graph;
GenerateCompleteGraph(num_tails, num_heads, &graph);
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph, kCapacityRange, &arc_capacity);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
util::Permute(permutation, &arc_capacity);
std::unique_ptr<GenericMaxFlow<Graph> > max_flow(new GenericMaxFlow<Graph>(
&graph, graph.num_nodes() - 2, graph.num_nodes() - 1));
SetUpNetworkData(arc_capacity, max_flow.get());
FlowQuantity flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
ChangeCapacities(arc_capacity, kCapacityDelta, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow2, flow);
ChangeCapacities(arc_capacity, 0, max_flow.get());
flow = f(max_flow.get());
EXPECT_EQ(expected_flow1, flow);
}
#define LP_AND_FLOW_TEST(test_name, size, expected_flow1, expected_flow2) \
LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2)
#define LP_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(LPMaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlowWithLP<StarGraph>, size, size, \
expected_flow1, expected_flow2); \
}
#define FLOW_ONLY_TEST(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMaxFlow, size, size, expected_flow1, \
expected_flow2); \
}
#define FLOW_ONLY_TEST_SG(test_name, size, expected_flow1, expected_flow2) \
TEST(MaxFlowTestStaticGraph, test_name##size) { \
test_name<util::ReverseArcStaticGraph<> >(SolveMaxFlow, size, size, \
expected_flow1, expected_flow2); \
}
LP_AND_FLOW_TEST(FullRandomAssignment, 300, 300, 300);
LP_AND_FLOW_TEST(PartialRandomAssignment, 100, 100, 100);
LP_AND_FLOW_TEST(PartialRandomAssignment, 1000, 1000, 1000);
LP_AND_FLOW_TEST(PartialRandomFlow, 400, 1898664, 1515203);
LP_AND_FLOW_TEST(FullRandomFlow, 100, 482391, 386587);
// LARGE must be defined from the build command line to test larger instances.
#ifdef LARGE
LP_AND_FLOW_TEST(PartialRandomAssignment, 10000, 10000, 10000);
#endif
template <typename Graph>
static void BM_FullRandomAssignment(benchmark::State& state) {
const int kSize = 3000;
for (auto _ : state) {
FullRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomAssignment(benchmark::State& state) {
const int kSize = 10100;
for (auto _ : state) {
PartialRandomAssignment<Graph>(SolveMaxFlow, kSize, kSize, kSize, kSize);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_PartialRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
PartialRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 3884850, 3112123);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
template <typename Graph>
static void BM_FullRandomFlow(benchmark::State& state) {
const int kSize = 800;
for (auto _ : state) {
FullRandomFlow<Graph>(SolveMaxFlow, kSize, kSize, 4000549, 3239512);
}
state.SetItemsProcessed(static_cast<int64_t>(state.max_iterations) * kSize);
}
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomAssignment, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomFlow, util::ReverseArcMixedGraph<>);
// One iteration of each of the following tests is slow.
BENCHMARK_TEMPLATE(BM_FullRandomFlow, StarGraph);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_FullRandomFlow, util::ReverseArcMixedGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, StarGraph);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcListGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcStaticGraph<>);
BENCHMARK_TEMPLATE(BM_PartialRandomAssignment, util::ReverseArcMixedGraph<>);
#undef LP_AND_FLOW_TEST
#undef LP_ONLY_TEST
#undef FLOW_ONLY_TEST
#undef FLOW_ONLY_TEST_SG
// ----------------------------------------------------------
// PriorityQueueWithRestrictedPush tests.
// ----------------------------------------------------------
TEST(PriorityQueueWithRestrictedPushTest, BasicBehavior) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
queue.Push("D", 10);
queue.Push("E", 9);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
EXPECT_FALSE(queue.IsEmpty());
queue.Clear();
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, BasicBehaviorWithMixedPushPop) {
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
queue.Push("A", 1);
queue.Push("B", 0);
queue.Push("C", 2);
EXPECT_EQ("C", queue.Pop());
EXPECT_EQ("A", queue.Pop());
queue.Push("D", 1);
queue.Push("E", 0);
EXPECT_EQ("D", queue.Pop());
EXPECT_EQ("E", queue.Pop());
EXPECT_EQ("B", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
queue.Push("E", 1);
EXPECT_FALSE(queue.IsEmpty());
EXPECT_EQ("E", queue.Pop());
EXPECT_TRUE(queue.IsEmpty());
}
TEST(PriorityQueueWithRestrictedPushTest, RandomPushPop) {
struct ElementWithPriority {
ElementWithPriority(int _element, int _priority)
: element(_element), priority(_priority) {}
bool operator<(const ElementWithPriority& other) const {
return priority < other.priority;
}
int element;
int priority;
};
std::vector<ElementWithPriority> pairs;
std::mt19937 randomizer(1);
const int kNumElement = 10000;
const int kMaxPriority = 10000; // We want duplicates and gaps.
for (int i = 0; i < kNumElement; ++i) {
pairs.push_back(
ElementWithPriority(i, absl::Uniform(randomizer, 0, kMaxPriority)));
}
std::sort(pairs.begin(), pairs.end());
// Randomly add +1 and push to the queue.
PriorityQueueWithRestrictedPush<int, int> queue;
for (int i = 0; i < pairs.size(); ++i) {
pairs[i].priority += absl::Bernoulli(randomizer, 1.0 / 2) ? 1 : 0;
queue.Push(pairs[i].element, pairs[i].priority);
}
// Stable sort the pairs for checking (the queue order is stable).
std::stable_sort(pairs.begin(), pairs.end());
// Random Push() and Pop() with more Pop().
int current = pairs.size();
while (!queue.IsEmpty()) {
EXPECT_GT(current, 0);
if (absl::Bernoulli(randomizer, 1.0 / 4) && current < pairs.size()) {
queue.Push(pairs[current].element, pairs[current].priority);
++current;
} else {
--current;
EXPECT_EQ(pairs[current].element, queue.Pop());
}
}
}
TEST(PriorityQueueWithRestrictedPushDeathTest, DCHECK) {
// Don't run this test in opt mode.
if (!DEBUG_MODE) GTEST_SKIP();
PriorityQueueWithRestrictedPush<std::string, int> queue;
EXPECT_TRUE(queue.IsEmpty());
ASSERT_DEATH(queue.Pop(), "");
queue.Push("A", 10);
queue.Push("B", 9);
ASSERT_DEATH(queue.Push("C", 4), "");
ASSERT_DEATH(queue.Push("C", 8), "");
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,975 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/min_cost_flow.h"
#include <algorithm> // For max.
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include <vector>
#include "absl/random/distributions.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
#include "ortools/algorithms/binary_search.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/graphs.h"
#include "ortools/linear_solver/linear_solver.h"
namespace operations_research {
template <typename Graph>
void GenericMinCostFlowTester(
const typename Graph::NodeIndex num_nodes,
const typename Graph::ArcIndex num_arcs, const FlowQuantity* node_supply,
const typename Graph::NodeIndex* tail,
const typename Graph::NodeIndex* head, const CostValue* cost,
const FlowQuantity* capacity, const CostValue expected_flow_cost,
const FlowQuantity* expected_flow,
const typename GenericMinCostFlow<Graph>::Status expected_status) {
Graph graph(num_nodes, num_arcs);
for (int arc = 0; arc < num_arcs; ++arc) {
graph.AddArc(tail[arc], head[arc]);
}
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
GenericMinCostFlow<Graph> min_cost_flow(&graph);
for (int arc = 0; arc < num_arcs; ++arc) {
min_cost_flow.SetArcUnitCost(arc, cost[arc]);
min_cost_flow.SetArcCapacity(arc, capacity[arc]);
EXPECT_EQ(min_cost_flow.UnitCost(arc), cost[arc]);
EXPECT_EQ(min_cost_flow.Capacity(arc), capacity[arc]);
}
for (int i = 0; i < num_nodes; ++i) {
min_cost_flow.SetNodeSupply(i, node_supply[i]);
EXPECT_EQ(min_cost_flow.Supply(i), node_supply[i]);
}
for (int options = 0; options < 2; ++options) {
min_cost_flow.SetUseUpdatePrices(options & 1);
bool ok = min_cost_flow.Solve();
typename GenericMinCostFlow<Graph>::Status status = min_cost_flow.status();
EXPECT_EQ(expected_status, status);
if (expected_status == GenericMinCostFlow<Graph>::OPTIMAL) {
EXPECT_TRUE(ok);
CostValue total_flow_cost = min_cost_flow.GetOptimalCost();
EXPECT_EQ(expected_flow_cost, total_flow_cost);
for (int i = 0; i < num_arcs; ++i) {
EXPECT_EQ(expected_flow[i], min_cost_flow.Flow(i)) << " i = " << i;
}
} else if (expected_status == GenericMinCostFlow<Graph>::INFEASIBLE) {
EXPECT_FALSE(ok);
for (int node = 0; node < graph.num_nodes(); ++node) {
FlowQuantity delta = min_cost_flow.InitialSupply(node) -
min_cost_flow.FeasibleSupply(node);
EXPECT_EQ(0, delta) << "at node " << node;
}
}
}
}
template <typename Graph>
class GenericMinCostFlowTest : public ::testing::Test {};
typedef ::testing::Types<StarGraph, util::ReverseArcListGraph<>,
util::ReverseArcStaticGraph<>,
util::ReverseArcMixedGraph<>>
GraphTypes;
TYPED_TEST_SUITE(GenericMinCostFlowTest, GraphTypes);
TYPED_TEST(GenericMinCostFlowTest, CapacityRange) {
// Check that we can set capacities to large numbers.
const int kNumNodes = 7;
const int kNumArcs = 12;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 10, 25, -11, -13, -17, -14};
const NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2};
const NodeIndex kHead[kNumArcs] = {3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6};
const CostValue kCost[kNumArcs] = {1, 6, 3, 5, 7, 3, 1, 6, 9, 4, 5, 3};
// Since MinCostFlow stores node excess as a FlowQuantity, one must take care
// to check that the total flow in/out of a node is less than kint64max. To
// guarantee this here, we set kCapMax to kint64max / 4 since the maximum
// degree of a node is 4.
const int64_t kCapMax = std::numeric_limits<int64_t>::max() / 4;
const FlowQuantity kCapacity[kNumArcs] = {kCapMax, kCapMax, kCapMax, kCapMax,
kCapMax, kCapMax, kCapMax, kCapMax,
kCapMax, kCapMax, kCapMax, kCapMax};
const CostValue kExpectedFlowCost = 138;
const FlowQuantity kExpectedFlow[kNumArcs] = {11, 0, 9, 0, 0, 2,
8, 0, 0, 11, 0, 14};
GenericMinCostFlowTester<TypeParam>(
kNumNodes, kNumArcs, kNodeSupply, kTail, kHead, kCost, kCapacity,
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TYPED_TEST(GenericMinCostFlowTest, Test1) {
const int kNumNodes = 2;
const int kNumArcs = 1;
const FlowQuantity kNodeSupply[kNumNodes] = {12, -12};
const NodeIndex kTail[kNumArcs] = {0};
const NodeIndex kHead[kNumArcs] = {1};
const CostValue kCost[kNumArcs] = {10};
const FlowQuantity kCapacity[kNumArcs] = {20};
const CostValue kExpectedFlowCost = 120;
const FlowQuantity kExpectedFlow[kNumArcs] = {12};
GenericMinCostFlowTester<TypeParam>(
kNumNodes, kNumArcs, kNodeSupply, kTail, kHead, kCost, kCapacity,
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TYPED_TEST(GenericMinCostFlowTest, Test2) {
const int kNumNodes = 7;
const int kNumArcs = 12;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 10, 25, -11, -13, -17, -14};
const NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2};
const NodeIndex kHead[kNumArcs] = {3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6};
const CostValue kCost[kNumArcs] = {1, 6, 3, 5, 7, 3, 1, 6, 9, 4, 5, 3};
const FlowQuantity kCapacity[kNumArcs] = {100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100};
const CostValue kExpectedFlowCost = 138;
const FlowQuantity kExpectedFlow[kNumArcs] = {11, 0, 9, 0, 0, 2,
8, 0, 0, 11, 0, 14};
GenericMinCostFlowTester<TypeParam>(
kNumNodes, kNumArcs, kNodeSupply, kTail, kHead, kCost, kCapacity,
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TYPED_TEST(GenericMinCostFlowTest, Test3) {
const int kNumNodes = 7;
const int kNumArcs = 12;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 10, 25, -11, -13, -17, -14};
const NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2};
const NodeIndex kHead[kNumArcs] = {3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6};
const CostValue kCost[kNumArcs] = {0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};
const FlowQuantity kCapacity[kNumArcs] = {100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100};
const CostValue kExpectedFlowCost = 0;
const FlowQuantity kExpectedFlow[kNumArcs] = {7, 13, 0, 0, 0, 0,
10, 0, 4, 0, 7, 14};
GenericMinCostFlowTester<TypeParam>(
kNumNodes, kNumArcs, kNodeSupply, kTail, kHead, kCost, kCapacity,
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TYPED_TEST(GenericMinCostFlowTest, Infeasible) {
const int kNumNodes = 6;
const int kNumArcs = 9;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 0, 0, 0, 0, -20};
const FlowQuantity kNodeInfeasibility[kNumNodes] = {4, 0, 0, 0, 0, -4};
const NodeIndex kTail[kNumArcs] = {0, 0, 1, 1, 1, 2, 3, 4, 4};
const NodeIndex kHead[kNumArcs] = {1, 2, 1, 3, 4, 3, 5, 3, 5};
const CostValue kCost[kNumArcs] = {1, 6, 3, 5, 7, 3, 1, 6, 9};
const FlowQuantity kCapacity[kNumArcs] = {10, 10, 10, 6, 6, 6, 10, 10, 10};
const NodeIndex kInfeasibleSupplyNode[] = {0};
const NodeIndex kInfeasibleDemandNode[] = {5};
const NodeIndex kFeasibleSupply[] = {16};
const NodeIndex kFeasibleDemand[] = {-16};
TypeParam graph(kNumNodes, kNumArcs);
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
graph.AddArc(kTail[arc], kHead[arc]);
}
std::vector<ArcIndex> permutation;
Graphs<TypeParam>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
GenericMinCostFlow<TypeParam> min_cost_flow(&graph);
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
min_cost_flow.SetArcUnitCost(arc, kCost[arc]);
min_cost_flow.SetArcCapacity(arc, kCapacity[arc]);
}
for (ArcIndex arc = 0; arc < kNumNodes; ++arc) {
min_cost_flow.SetNodeSupply(arc, kNodeSupply[arc]);
}
std::vector<NodeIndex> infeasible_supply_node;
std::vector<NodeIndex> infeasible_demand_node;
bool feasible = min_cost_flow.CheckFeasibility(&infeasible_supply_node,
&infeasible_demand_node);
EXPECT_FALSE(feasible);
for (int i = 0; i < infeasible_supply_node.size(); ++i) {
const NodeIndex node = infeasible_supply_node[i];
EXPECT_EQ(node, kInfeasibleSupplyNode[i]);
EXPECT_EQ(min_cost_flow.FeasibleSupply(node), kFeasibleSupply[i]);
}
for (int i = 0; i < infeasible_demand_node.size(); ++i) {
const NodeIndex node = infeasible_demand_node[i];
EXPECT_EQ(node, kInfeasibleDemandNode[i]);
EXPECT_EQ(min_cost_flow.FeasibleSupply(node), kFeasibleDemand[i]);
}
bool ok = min_cost_flow.Solve();
EXPECT_FALSE(ok);
EXPECT_EQ(GenericMinCostFlow<TypeParam>::INFEASIBLE, min_cost_flow.status());
for (NodeIndex node = 0; node < kNumNodes; ++node) {
FlowQuantity delta =
min_cost_flow.InitialSupply(node) - min_cost_flow.FeasibleSupply(node);
EXPECT_EQ(kNodeInfeasibility[node], delta);
}
EXPECT_EQ(min_cost_flow.GetOptimalCost(), 0);
min_cost_flow.MakeFeasible();
ok = min_cost_flow.Solve();
EXPECT_TRUE(ok);
EXPECT_EQ(GenericMinCostFlow<TypeParam>::OPTIMAL, min_cost_flow.status());
}
// Test on a 4x4 matrix. Example taken from
// http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm
TYPED_TEST(GenericMinCostFlowTest, Small4x4Matrix) {
const int kNumSources = 4;
const int kNumTargets = 4;
const CostValue kCost[kNumSources][kNumTargets] = {{90, 75, 75, 80},
{35, 85, 55, 65},
{125, 95, 90, 105},
{45, 110, 95, 115}};
const CostValue kExpectedCost = 275;
TypeParam graph(kNumSources + kNumTargets, kNumSources * kNumTargets);
for (NodeIndex source = 0; source < kNumSources; ++source) {
for (NodeIndex target = 0; target < kNumTargets; ++target) {
graph.AddArc(source, kNumSources + target);
}
}
std::vector<ArcIndex> permutation;
Graphs<TypeParam>::Build(&graph, &permutation);
EXPECT_TRUE(permutation.empty());
GenericMinCostFlow<TypeParam> min_cost_flow(&graph);
int arc = 0;
for (NodeIndex source = 0; source < kNumSources; ++source) {
for (NodeIndex target = 0; target < kNumTargets; ++target) {
min_cost_flow.SetArcUnitCost(arc, kCost[source][target]);
min_cost_flow.SetArcCapacity(arc, 1);
++arc;
}
}
for (NodeIndex source = 0; source < kNumSources; ++source) {
min_cost_flow.SetNodeSupply(source, 1);
}
for (NodeIndex target = 0; target < kNumTargets; ++target) {
min_cost_flow.SetNodeSupply(kNumSources + target, -1);
}
EXPECT_TRUE(min_cost_flow.Solve());
EXPECT_EQ(GenericMinCostFlow<TypeParam>::OPTIMAL, min_cost_flow.status());
CostValue total_flow_cost = min_cost_flow.GetOptimalCost();
EXPECT_EQ(kExpectedCost, total_flow_cost);
}
// Test that very large flow quantities do not overflow and that the total flow
// cost in cases of overflows stays capped at INT64_MAX.
TYPED_TEST(GenericMinCostFlowTest, TotalFlowCostOverflow) {
const int kNumNodes = 2;
const int kNumArcs = 1;
const FlowQuantity kNodeSupply[kNumNodes] = {1LL << 61, -1LL << 61};
const NodeIndex kTail[kNumArcs] = {0};
const NodeIndex kHead[kNumArcs] = {1};
const CostValue kCost[kNumArcs] = {10};
const FlowQuantity kCapacity[kNumArcs] = {1LL << 61};
const CostValue kExpectedFlowCost = std::numeric_limits<int64_t>::max();
const FlowQuantity kExpectedFlow[kNumArcs] = {1LL << 61};
GenericMinCostFlowTester<TypeParam>(
kNumNodes, kNumArcs, kNodeSupply, kTail, kHead, kCost, kCapacity,
kExpectedFlowCost, kExpectedFlow, GenericMinCostFlow<TypeParam>::OPTIMAL);
}
TEST(GenericMinCostFlowTest, OverflowPrevention1) {
util::ReverseArcListGraph<> graph;
const int arc = graph.AddArc(0, 1);
GenericMinCostFlow<util::ReverseArcListGraph<>> mcf(&graph);
mcf.SetArcCapacity(arc, std::numeric_limits<int64_t>::max() - 1);
mcf.SetArcUnitCost(arc, -std::numeric_limits<int64_t>::max() + 1);
mcf.SetNodeSupply(0, std::numeric_limits<int64_t>::max());
mcf.SetNodeSupply(1, -std::numeric_limits<int64_t>::max());
EXPECT_FALSE(mcf.Solve());
EXPECT_EQ(mcf.status(), MinCostFlowBase::BAD_COST_RANGE);
}
TEST(GenericMinCostFlowTest, OverflowPrevention2) {
util::ReverseArcListGraph<> graph;
const int arc = graph.AddArc(0, 0);
GenericMinCostFlow<util::ReverseArcListGraph<>> mcf(&graph);
mcf.SetArcCapacity(arc, std::numeric_limits<int64_t>::max() - 1);
mcf.SetArcUnitCost(arc, -std::numeric_limits<int64_t>::max() + 1);
EXPECT_FALSE(mcf.Solve());
EXPECT_EQ(mcf.status(), MinCostFlowBase::BAD_COST_RANGE);
}
TEST(GenericMinCostFlowTest, SelfLoop) {
util::ReverseArcListGraph<> graph;
const int arc = graph.AddArc(0, 0);
GenericMinCostFlow<util::ReverseArcListGraph<>> mcf(&graph);
const int64_t kMaxCost = std::numeric_limits<int64_t>::max();
mcf.SetArcCapacity(arc, kMaxCost - 1);
mcf.SetArcUnitCost(arc, -kMaxCost / 4);
EXPECT_TRUE(mcf.Solve());
EXPECT_EQ(mcf.status(), MinCostFlowBase::OPTIMAL);
EXPECT_EQ(mcf.GetOptimalCost(), kMaxCost); // Indicate overflow.
EXPECT_EQ(mcf.Flow(arc), kMaxCost - 1);
}
TEST(SimpleMinCostFlowTest, Empty) {
SimpleMinCostFlow min_cost_flow;
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, min_cost_flow.Solve());
EXPECT_EQ(0, min_cost_flow.NumNodes());
EXPECT_EQ(0, min_cost_flow.NumArcs());
EXPECT_EQ(0, min_cost_flow.OptimalCost());
EXPECT_EQ(0, min_cost_flow.MaximumFlow());
}
TEST(SimpleMinCostFlowTest, NegativeCost) {
SimpleMinCostFlow min_cost_flow;
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 1, 10, -10);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 2, 10, -10);
min_cost_flow.SetNodeSupply(0, 8);
min_cost_flow.SetNodeSupply(2, -8);
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, min_cost_flow.Solve());
EXPECT_EQ(-160, min_cost_flow.OptimalCost());
EXPECT_EQ(8, min_cost_flow.MaximumFlow());
}
TEST(SimpleMinCostFlowTest, NegativeCostWithLoop) {
SimpleMinCostFlow min_cost_flow;
// We have a loop 0 -> 1 -> 2 -> 0 with negative cost (but capacity bounded).
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 1, 10, -10);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 2, 10, -10);
min_cost_flow.AddArcWithCapacityAndUnitCost(2, 0, 10, -10);
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 3, 10, -10);
min_cost_flow.SetNodeSupply(0, 8);
min_cost_flow.SetNodeSupply(3, -8);
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, min_cost_flow.Solve());
EXPECT_EQ(-300 - 80, min_cost_flow.OptimalCost());
EXPECT_EQ(8, min_cost_flow.MaximumFlow());
}
TEST(SimpleMinCostFlowTest, SelfLoop) {
SimpleMinCostFlow min_cost_flow;
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 0, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 1, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 1, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 2, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(2, 2, 10, 0);
min_cost_flow.SetNodeSupply(0, 8);
min_cost_flow.SetNodeSupply(2, -8);
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, min_cost_flow.Solve());
EXPECT_EQ(0, min_cost_flow.OptimalCost());
EXPECT_EQ(8, min_cost_flow.MaximumFlow());
EXPECT_EQ(8, min_cost_flow.Flow(1));
EXPECT_EQ(8, min_cost_flow.Flow(3));
}
TEST(SimpleMinCostFlowTest, SelfLoopWithNegativeCost) {
SimpleMinCostFlow min_cost_flow;
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 0, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(0, 1, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 1, 10, -10);
min_cost_flow.AddArcWithCapacityAndUnitCost(1, 2, 10, 0);
min_cost_flow.AddArcWithCapacityAndUnitCost(2, 2, 10, 0);
min_cost_flow.SetNodeSupply(0, 8);
min_cost_flow.SetNodeSupply(2, -8);
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, min_cost_flow.Solve());
EXPECT_EQ(-100, min_cost_flow.OptimalCost());
EXPECT_EQ(8, min_cost_flow.MaximumFlow());
EXPECT_EQ(8, min_cost_flow.Flow(1));
EXPECT_EQ(10, min_cost_flow.Flow(2));
EXPECT_EQ(8, min_cost_flow.Flow(3));
}
TEST(SimpleMinCostFlowTest, FeasibleProblem) {
const int kNumNodes = 7;
const int kNumArcs = 12;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 10, 25, -11, -13, -17, -14};
const NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2};
const NodeIndex kHead[kNumArcs] = {3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6};
const CostValue kCost[kNumArcs] = {0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};
const FlowQuantity kCapacity[kNumArcs] = {100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100};
const CostValue kExpectedFlowCost = 0;
const CostValue kExpectedFlowVolume = 55;
const FlowQuantity kExpectedFlow[kNumArcs] = {7, 13, 0, 0, 0, 0,
10, 0, 4, 0, 7, 14};
SimpleMinCostFlow min_cost_flow;
for (NodeIndex node = 0; node < kNumNodes; ++node) {
min_cost_flow.SetNodeSupply(node, kNodeSupply[node]);
}
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
EXPECT_EQ(arc, min_cost_flow.AddArcWithCapacityAndUnitCost(
kTail[arc], kHead[arc], kCapacity[arc], kCost[arc]));
}
SimpleMinCostFlow::Status status = min_cost_flow.Solve();
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL, status);
EXPECT_EQ(kExpectedFlowCost, min_cost_flow.OptimalCost());
EXPECT_EQ(kExpectedFlowVolume, min_cost_flow.MaximumFlow());
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
EXPECT_EQ(kExpectedFlow[arc], min_cost_flow.Flow(arc))
<< " for Arc #" << arc << ": " << kTail[arc] << "->" << kHead[arc];
}
}
TEST(SimpleMinCostFlowTest, InfeasibleProblem) {
const int kNumNodes = 7;
const int kNumArcs = 12;
const FlowQuantity kNodeSupply[kNumNodes] = {20, 10, 25, -11, -13, -17, -14};
const NodeIndex kTail[kNumArcs] = {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2};
const NodeIndex kHead[kNumArcs] = {3, 4, 5, 6, 3, 4, 5, 6, 3, 4, 5, 6};
const CostValue kCost[kNumArcs] = {0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};
SimpleMinCostFlow min_cost_flow;
for (NodeIndex node = 0; node < kNumNodes; ++node) {
min_cost_flow.SetNodeSupply(node, kNodeSupply[node]);
}
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
min_cost_flow.AddArcWithCapacityAndUnitCost(kTail[arc], kHead[arc], 1.0,
kCost[arc]);
}
EXPECT_EQ(SimpleMinCostFlow::INFEASIBLE, min_cost_flow.Solve());
EXPECT_EQ(SimpleMinCostFlow::OPTIMAL,
min_cost_flow.SolveMaxFlowWithMinCost());
// There should be unit flow through all the arcs we added.
EXPECT_EQ(6, min_cost_flow.OptimalCost());
EXPECT_EQ(12, min_cost_flow.MaximumFlow());
for (ArcIndex arc = 0; arc < kNumArcs; ++arc) {
EXPECT_EQ(1, min_cost_flow.Flow(arc))
<< " for Arc #" << arc << ": " << kTail[arc] << "->" << kHead[arc];
}
}
// Create a single path graph with large arc unit cost.
// Note that the capacity does not directly influence the max usable cost.
TEST(SimpleMinCostFlowTest, OverflowCostBound) {
const int n = 100;
const int64_t kCapacity = 1'000'000;
const int64_t kint64Max = std::numeric_limits<int64_t>::max();
const int64_t safe_divisor =
BinarySearch<int64_t>(kint64Max, 1, [](int64_t divisor) {
SimpleMinCostFlow min_cost_flow;
const int64_t kMaxCost = kint64Max / divisor;
for (int i = 0; i + 1 < n; ++i) {
min_cost_flow.AddArcWithCapacityAndUnitCost(i, i + 1, kCapacity,
kMaxCost);
}
min_cost_flow.SetNodeSupply(0, kCapacity);
min_cost_flow.SetNodeSupply(n - 1, -kCapacity);
const auto status = min_cost_flow.Solve();
if (status == SimpleMinCostFlow::OPTIMAL) return true;
CHECK_EQ(status, SimpleMinCostFlow::BAD_COST_RANGE);
return false;
});
// On a single path graph, the threshold is around n ^ 2.
EXPECT_EQ(safe_divisor, 11009);
}
template <typename Graph>
void GenerateCompleteGraph(const NodeIndex num_sources,
const NodeIndex num_targets, Graph* graph) {
const NodeIndex num_nodes = num_sources + num_targets;
const ArcIndex num_arcs = num_sources * num_targets;
graph->Reserve(num_nodes, num_arcs);
for (NodeIndex source = 0; source < num_sources; ++source) {
for (NodeIndex target = 0; target < num_targets; ++target) {
graph->AddArc(source, target + num_sources);
}
}
}
template <typename Graph>
void GeneratePartialRandomGraph(const NodeIndex num_sources,
const NodeIndex num_targets,
const NodeIndex degree, Graph* graph) {
const NodeIndex num_nodes = num_sources + num_targets;
const ArcIndex num_arcs = num_sources * degree;
graph->Reserve(num_nodes, num_arcs);
std::mt19937 randomizer(12345);
for (NodeIndex source = 0; source < num_sources; ++source) {
// For each source, we create degree - 1 random arcs.
for (NodeIndex d = 0; d < degree - 1; ++d) {
NodeIndex target = absl::Uniform(randomizer, 0, num_targets);
graph->AddArc(source, target + num_sources);
}
}
// Make sure that each target has at least one corresponding source.
for (NodeIndex target = 0; target < num_targets; ++target) {
NodeIndex source = absl::Uniform(randomizer, 0, num_sources);
graph->AddArc(source, target + num_sources);
}
}
void GenerateRandomSupply(const NodeIndex num_sources,
const NodeIndex num_targets,
const NodeIndex num_generations, const int64_t range,
std::vector<int64_t>* supply) {
const NodeIndex num_nodes = num_sources + num_targets;
supply->resize(num_nodes, 0);
std::mt19937 randomizer(12345);
for (int64_t i = 0; i < num_sources * num_generations; ++i) {
FlowQuantity q = absl::Uniform(randomizer, 0, range);
int supply_index = absl::Uniform(randomizer, 0, num_sources);
int demand_index = absl::Uniform(randomizer, 0, num_targets) + num_sources;
(*supply)[supply_index] += q;
(*supply)[demand_index] -= q;
}
}
void GenerateAssignmentSupply(const NodeIndex num_sources,
const NodeIndex num_targets,
std::vector<int64_t>* supply) {
supply->resize(num_sources + num_targets);
for (int i = 0; i < num_sources; ++i) {
(*supply)[i] = 1;
}
for (int i = 0; i < num_targets; ++i) {
(*supply)[i + num_sources] = -1;
}
}
void GenerateRandomArcValuations(const ArcIndex num_arcs,
const int64_t max_range,
std::vector<int64_t>* arc_valuation) {
arc_valuation->resize(num_arcs);
std::mt19937 randomizer(12345);
for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
(*arc_valuation)[arc] = absl::Uniform(randomizer, 0, max_range);
}
}
template <typename Graph>
void SetUpNetworkData(absl::Span<const ArcIndex> permutation,
absl::Span<const int64_t> supply,
absl::Span<const int64_t> arc_cost,
absl::Span<const int64_t> arc_capacity, Graph* graph,
GenericMinCostFlow<Graph>* min_cost_flow) {
for (NodeIndex node = 0; node < graph->num_nodes(); ++node) {
min_cost_flow->SetNodeSupply(node, supply[node]);
}
for (ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
ArcIndex permuted_arc = arc < permutation.size() ? permutation[arc] : arc;
min_cost_flow->SetArcUnitCost(permuted_arc, arc_cost[arc]);
min_cost_flow->SetArcCapacity(permuted_arc, arc_capacity[arc]);
}
}
template <typename Graph>
CostValue SolveMinCostFlow(GenericMinCostFlow<Graph>* min_cost_flow) {
bool ok = min_cost_flow->Solve();
if (ok && min_cost_flow->status() == GenericMinCostFlow<Graph>::OPTIMAL) {
CostValue cost = min_cost_flow->GetOptimalCost();
CostValue computed_cost = 0;
for (ArcIndex arc = 0; arc < min_cost_flow->graph()->num_arcs(); ++arc) {
const FlowQuantity flow = min_cost_flow->Flow(arc);
EXPECT_GE(min_cost_flow->Capacity(arc), flow);
computed_cost += min_cost_flow->UnitCost(arc) * flow;
}
EXPECT_EQ(cost, computed_cost);
return cost;
} else {
return 0;
}
}
template <typename Graph>
CostValue SolveMinCostFlowWithLP(GenericMinCostFlow<Graph>* min_cost_flow) {
MPSolver solver("LPSolver", MPSolver::CLP_LINEAR_PROGRAMMING);
const Graph* graph = min_cost_flow->graph();
const NodeIndex num_nodes = graph->num_nodes();
const ArcIndex num_arcs = graph->num_arcs();
std::unique_ptr<MPConstraint*[]> constraint(new MPConstraint*[num_nodes]);
for (NodeIndex node = 0; node < graph->num_nodes(); ++node) {
constraint[node] = solver.MakeRowConstraint();
FlowQuantity supply = min_cost_flow->Supply(node);
constraint[node]->SetBounds(supply, supply);
}
std::unique_ptr<MPVariable*[]> var(new MPVariable*[num_arcs]);
MPObjective* const objective = solver.MutableObjective();
for (ArcIndex arc = 0; arc < graph->num_arcs(); ++arc) {
var[arc] = solver.MakeNumVar(0.0, min_cost_flow->Capacity(arc),
absl::StrFormat("v%d", arc));
constraint[graph->Tail(arc)]->SetCoefficient(var[arc], 1.0);
constraint[graph->Head(arc)]->SetCoefficient(var[arc], -1.0);
objective->SetCoefficient(var[arc], min_cost_flow->UnitCost(arc));
}
solver.Solve();
return static_cast<CostValue>(objective->Value() + .5);
}
template <typename Graph>
bool CheckAssignmentFeasibility(const Graph& graph,
absl::Span<const int64_t> supply) {
for (NodeIndex node = 0; node < graph.num_nodes(); ++node) {
if (supply[node] != 0) {
typename Graph::OutgoingOrOppositeIncomingArcIterator it(graph, node);
EXPECT_TRUE(it.Ok()) << node << " has no incident arc";
}
}
return true;
}
template <typename Graph>
struct MinCostFlowSolver {
typedef FlowQuantity (*Solver)(GenericMinCostFlow<Graph>* min_cost_flow);
};
template <typename Graph>
void FullRandomAssignment(typename MinCostFlowSolver<Graph>::Solver f,
NodeIndex num_sources, NodeIndex num_targets,
CostValue expected_cost1, CostValue expected_cost2) {
const CostValue kCostRange = 1000;
Graph graph;
GenerateCompleteGraph(num_sources, num_targets, &graph);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
std::vector<int64_t> supply;
GenerateAssignmentSupply(num_sources, num_targets, &supply);
EXPECT_TRUE(CheckAssignmentFeasibility(graph, supply));
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::vector<int64_t> arc_cost(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCostRange, &arc_cost);
GenericMinCostFlow<Graph> min_cost_flow(&graph);
SetUpNetworkData(permutation, supply, arc_cost, arc_capacity, &graph,
&min_cost_flow);
CostValue cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
}
template <typename Graph>
void PartialRandomAssignment(typename MinCostFlowSolver<Graph>::Solver f,
NodeIndex num_sources, NodeIndex num_targets,
CostValue expected_cost1,
CostValue expected_cost2) {
const NodeIndex kDegree = 10;
const CostValue kCostRange = 1000;
Graph graph;
GeneratePartialRandomGraph(num_sources, num_targets, kDegree, &graph);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
std::vector<int64_t> supply;
GenerateAssignmentSupply(num_sources, num_targets, &supply);
EXPECT_TRUE(CheckAssignmentFeasibility(graph, supply));
EXPECT_EQ(graph.num_arcs(), num_sources * kDegree);
std::vector<int64_t> arc_capacity(graph.num_arcs(), 1);
std::vector<int64_t> arc_cost(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCostRange, &arc_cost);
GenericMinCostFlow<Graph> min_cost_flow(&graph);
SetUpNetworkData(permutation, supply, arc_cost, arc_capacity, &graph,
&min_cost_flow);
CostValue cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
}
template <typename Graph>
void ChangeCapacities(absl::Span<const ArcIndex> permutation,
absl::Span<const int64_t> arc_capacity,
FlowQuantity delta,
GenericMinCostFlow<Graph>* min_cost_flow,
float probability) {
std::mt19937 randomizer(12345);
const ArcIndex num_arcs = min_cost_flow->graph()->num_arcs();
for (ArcIndex arc = 0; arc < num_arcs; ++arc) {
ArcIndex permuted_arc = arc < permutation.size() ? permutation[arc] : arc;
if (absl::Bernoulli(randomizer, probability)) {
min_cost_flow->SetArcCapacity(
permuted_arc, std::max(arc_capacity[arc] - delta, int64_t{0}));
} else {
min_cost_flow->SetArcCapacity(permuted_arc, arc_capacity[arc]);
}
}
}
template <typename Graph>
void PartialRandomFlow(typename MinCostFlowSolver<Graph>::Solver f,
NodeIndex num_sources, NodeIndex num_targets,
CostValue expected_cost1, CostValue expected_cost2) {
const NodeIndex kDegree = 15;
const FlowQuantity kSupplyRange = 500;
const int64_t kSupplyGens = 15;
const FlowQuantity kCapacityRange = 10000;
const CostValue kCostRange = 1000;
const FlowQuantity kCapacityDelta = 500;
const float kProbability = 0.9;
Graph graph;
GeneratePartialRandomGraph(num_sources, num_targets, kDegree, &graph);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
std::vector<int64_t> supply;
GenerateRandomSupply(num_sources, num_targets, kSupplyGens, kSupplyRange,
&supply);
EXPECT_TRUE(CheckAssignmentFeasibility(graph, supply));
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCapacityRange, &arc_capacity);
std::vector<int64_t> arc_cost(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCostRange, &arc_cost);
GenericMinCostFlow<Graph> min_cost_flow(&graph);
SetUpNetworkData(permutation, supply, arc_cost, arc_capacity, &graph,
&min_cost_flow);
CostValue cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
ChangeCapacities(permutation, arc_capacity, kCapacityDelta, &min_cost_flow,
kProbability);
cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost2, cost);
ChangeCapacities(permutation, arc_capacity, 0, &min_cost_flow, 1.0);
cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
}
template <typename Graph>
void FullRandomFlow(typename MinCostFlowSolver<Graph>::Solver f,
NodeIndex num_sources, NodeIndex num_targets,
CostValue expected_cost1, CostValue expected_cost2) {
const FlowQuantity kSupplyRange = 1000;
const int64_t kSupplyGens = 10;
const FlowQuantity kCapacityRange = 10000;
const CostValue kCostRange = 1000;
const FlowQuantity kCapacityDelta = 1000;
const float kProbability = 0.9;
Graph graph;
GenerateCompleteGraph(num_sources, num_targets, &graph);
std::vector<typename Graph::ArcIndex> permutation;
Graphs<Graph>::Build(&graph, &permutation);
std::vector<int64_t> supply;
GenerateRandomSupply(num_sources, num_targets, kSupplyGens, kSupplyRange,
&supply);
EXPECT_TRUE(CheckAssignmentFeasibility(graph, supply));
std::vector<int64_t> arc_capacity(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCapacityRange, &arc_capacity);
std::vector<int64_t> arc_cost(graph.num_arcs());
GenerateRandomArcValuations(graph.num_arcs(), kCostRange, &arc_cost);
GenericMinCostFlow<Graph> min_cost_flow(&graph);
SetUpNetworkData(permutation, supply, arc_cost, arc_capacity, &graph,
&min_cost_flow);
CostValue cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
ChangeCapacities(permutation, arc_capacity, kCapacityDelta, &min_cost_flow,
kProbability);
cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost2, cost);
ChangeCapacities(permutation, arc_capacity, 0, &min_cost_flow, 1.0);
cost = f(&min_cost_flow);
EXPECT_EQ(expected_cost1, cost);
}
#define LP_AND_FLOW_TEST(test_name, size, expected_cost1, expected_cost2) \
LP_ONLY_TEST(test_name, size, expected_cost1, expected_cost2) \
FLOW_ONLY_TEST(test_name, size, expected_cost1, expected_cost2) \
FLOW_ONLY_TEST_SG(test_name, size, expected_cost1, expected_cost2)
#define LP_ONLY_TEST(test_name, size, expected_cost1, expected_cost2) \
TEST(LPMinCostFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMinCostFlowWithLP, size, size, expected_cost1, \
expected_cost2); \
}
#define FLOW_ONLY_TEST(test_name, size, expected_cost1, expected_cost2) \
TEST(MinCostFlowTest, test_name##size) { \
test_name<StarGraph>(SolveMinCostFlow, size, size, expected_cost1, \
expected_cost2); \
}
#define FLOW_ONLY_TEST_SG(test_name, size, expected_cost1, expected_cost2) \
TEST(MinCostFlowTestStaticGraph, test_name##size) { \
test_name<util::ReverseArcStaticGraph<>>(SolveMinCostFlow, size, size, \
expected_cost1, expected_cost2); \
}
// The times indicated below are in opt mode.
// The figures indicate the time with the LP solver and with MinCostFlow,
// respectively. _ indicates "N/A".
LP_AND_FLOW_TEST(FullRandomAssignment, 100, 1653, 0); // 0.070s / 0.007s
LP_AND_FLOW_TEST(FullRandomAssignment, 300, 1487, 0); // 0.5s / 0.121s
LP_AND_FLOW_TEST(PartialRandomFlow, 10, 9195615, 10720774);
LP_AND_FLOW_TEST(PartialRandomFlow, 100, 80098192, 95669398); // 12ms / 8ms
LP_AND_FLOW_TEST(PartialRandomFlow, 1000, 770743566, 936886845);
// 1.6s / 0.094s
LP_AND_FLOW_TEST(FullRandomFlow, 100, 40998962, 81814978); // 0.085s / 0.025s
LP_AND_FLOW_TEST(FullRandomFlow, 300, 67301515, 173406965); // 0.7s / 0.412s
LP_AND_FLOW_TEST(PartialRandomAssignment, 100, 15418, 0); // 0.012s/0.003s
LP_AND_FLOW_TEST(PartialRandomAssignment, 1000, 155105, 0); // 0.416s/0.041s
// LARGE must be defined from the build command line to test larger instances.
#ifdef LARGE
LP_AND_FLOW_TEST(FullRandomAssignment, 1000, 1142, 0); // 7.2s / 5.809s
FLOW_ONLY_TEST(FullRandomAssignment, 3000, 392, 0); // 800s / 93.9s
FLOW_ONLY_TEST_SG(FullRandomAssignment, 3000, 392, 0); // 40s
LP_AND_FLOW_TEST(PartialRandomAssignment, 10000, 3649506, 0); // 22s / 0.953s
FLOW_ONLY_TEST(PartialRandomAssignment, 100000, 36722363, 0); // 4740s / 23s
FLOW_ONLY_TEST_SG(PartialRandomAssignment, 100000, 36722363, 0); // 4740s / 23s
FLOW_ONLY_TEST(PartialRandomAssignment, 1000000, 367732438, 0); // _ / 430s
FLOW_ONLY_TEST_SG(PartialRandomAssignment, 1000000, 367732438, 0); // 336s
LP_AND_FLOW_TEST(PartialRandomFlow, 2000, 3040966812, 3072394992);
// 7.15s / 0.269s
LP_AND_FLOW_TEST(FullRandomFlow, 800, 10588600, 12057369);
LP_AND_FLOW_TEST(FullRandomFlow, 1000, 9491720, 10994039); // 14.4s / 13.183s
FLOW_ONLY_TEST(FullRandomFlow, 3000, 5588622, 7140712); // 1460s / 488s
FLOW_ONLY_TEST_SG(FullRandomFlow, 3000, 5588622, 7140712); // 230s
#endif // LARGE
#undef LP_AND_FLOW_TEST
#undef LP_ONLY_TEST
#undef FLOW_ONLY_TEST
#undef FLOW_ONLY_TEST_SG
// Benchmark inspired from the existing problem of matching Youtube ads channels
// to Youtube users, maximizing the expected revenue:
// - Each channel needs an exact number of users assigned to it.
// - Each user has an upper limit on the number of channels they can be assigned
// to, with a guarantee that this upper limit won't prevent the channels to
// get their required number of users.
// - Each pair (user, channel) has a known expected revenue, which is modeled
// as a small-ish integer (<3K). Using larger ranges can slightly impact
// performance, and you should look for a good trade-off with the accuracy.
//
// IMPORTANT: don't run this with default flags! Use:
// blaze run -c opt --linkopt=-static [--run_under=perflab] --
// ortools/graph/min_cost_flow_test --benchmarks=all
// --benchmark_min_iters=1 --heap_check= --benchmark_memory_usage
static void BM_MinCostFlowOnMultiMatchingProblem(benchmark::State& state) {
std::mt19937 my_random(12345);
const int kNumChannels = 20000;
const int kNumUsers = 20000;
// Average probability of a user-channel pair being matched.
const double kDensity = 1.0 / 200;
const int kMaxChannelsPerUser = 5 * static_cast<int>(kDensity * kNumChannels);
const int kAverageNumUsersPerChannels =
static_cast<int>(kDensity * kNumUsers);
std::vector<int> num_users_per_channel(kNumChannels, -1);
int total_demand = 0;
for (int i = 0; i < kNumChannels; ++i) {
num_users_per_channel[i] =
1 + absl::Uniform(my_random, 0, 2 * kAverageNumUsersPerChannels - 1);
total_demand += num_users_per_channel[i];
}
// User #j, when assigned to channel #i, is expected to generate
// -expected_cost_per_channel_user[kNumUsers * i + j]: since MinCostFlow
// only *minimizes* costs, and doesn't maximizes revenue, we just set
// cost = -revenue.
// To stress the algorithm, we generate a cost matrix that is highly skewed
// and that would probably challenge greedy approaches: each user gets
// a random coefficient, each channel as well, and then the expected revenue
// of a (user, channel) pairing is the product of these coefficient, plus a
// small (per-cell) random perturbation.
std::vector<int16_t> expected_cost_per_channel_user(kNumChannels * kNumUsers);
{
std::vector<int16_t> channel_coeff(kNumChannels);
for (int i = 0; i < kNumChannels; ++i) {
channel_coeff[i] = absl::Uniform(my_random, 0, 48);
}
std::vector<int16_t> user_coeff(kNumUsers);
for (int j = 0; j < kNumUsers; ++j) {
user_coeff[j] = absl::Uniform(my_random, 0, 48);
}
for (int i = 0; i < kNumChannels; ++i) {
for (int j = 0; j < kNumUsers; ++j) {
expected_cost_per_channel_user[kNumUsers * i + j] =
-channel_coeff[i] * user_coeff[j] - absl::Uniform(my_random, 0, 10);
}
}
}
CHECK_LE(total_demand, kNumUsers * kMaxChannelsPerUser);
for (auto _ : state) {
// We don't have more than 65536 nodes, so we use 16-bit integers to spare
// memory (and potentially speed up the code). Arc indices must be 32 bits
// though, since we have much more.
typedef util::ReverseArcStaticGraph<
/*NodeIndexType*/ uint16_t, /*ArcIndexType*/ int32_t>
Graph;
Graph graph(/*num_nodes=*/kNumUsers + kNumChannels + 1,
/*arc_capacity=*/kNumChannels * kNumUsers + kNumUsers);
// We model this problem as a graph (on which we'll do a min-cost flow):
// - Each channel #i is a source node (index #i + 1) with supply
// num_users_per_channel[i].
// - There is a global sink node (index 0) with a demand equal to the
// sum of num_users_per_channel.
// - Each user #j is an intermediate node (index 1 + kNumChannels + j)
// with no supply or demand, but with an arc of capacity
// kMaxChannelsPerUser towards the global sink node (and of unit cost 0).
// - There is an arc from each channel #i to each user #j, with capacity 1
// and unit cost expected_cost_per_channel_user[kNumUsers * i + j].
for (int i = 0; i < kNumChannels; ++i) {
for (int j = 0; j < kNumUsers; ++j) {
graph.AddArc(/*tail=*/i + 1, /*head=*/kNumChannels + 1 + j);
}
}
for (int j = 0; j < kNumUsers; ++j) {
graph.AddArc(/*tail=*/kNumChannels + 1 + j, /*head=*/0);
}
std::vector<Graph::ArcIndex> permutation;
graph.Build(&permutation);
// To spare memory, we added arcs in the right order, so that no permutation
// is needed. See graph.h.
CHECK(permutation.empty());
// To spare memory, the types are chosen as small as possible.
GenericMinCostFlow<Graph,
/*ArcFlowType=*/int16_t,
/*ArcScaledCostType=*/int32_t>
min_cost_flow(&graph);
// We also disable the feasibility check which takes a huge amount of
// memory.
min_cost_flow.SetCheckFeasibility(false);
min_cost_flow.SetNodeSupply(/*node=*/0, /*supply=*/-total_demand);
// Now, set the arcs capacity and cost, in the same order as we created them
// above.
Graph::ArcIndex arc_index = 0;
for (int i = 0; i < kNumChannels; ++i) {
min_cost_flow.SetNodeSupply(
/*node=*/i + 1, /*supply=*/num_users_per_channel[i]);
for (int j = 0; j < kNumUsers; ++j) {
min_cost_flow.SetArcUnitCost(
arc_index, expected_cost_per_channel_user[kNumUsers * i + j]);
min_cost_flow.SetArcCapacity(arc_index, 1);
arc_index++;
}
}
for (int j = 0; j < kNumUsers; ++j) {
min_cost_flow.SetArcUnitCost(arc_index, 0);
min_cost_flow.SetArcCapacity(arc_index, kMaxChannelsPerUser);
arc_index++;
}
const bool solved_ok = min_cost_flow.Solve();
CHECK(solved_ok);
LOG(INFO) << "Maximum revenue: " << -min_cost_flow.GetOptimalCost();
}
}
BENCHMARK(BM_MinCostFlowOnMultiMatchingProblem);
} // namespace operations_research

View File

@@ -0,0 +1,324 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/minimum_spanning_tree.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "absl/base/macros.h"
#include "absl/random/distributions.h"
#include "absl/types/span.h"
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/base/types.h"
#include "ortools/graph/graph.h"
namespace operations_research {
namespace {
using ::testing::UnorderedElementsAreArray;
using ::util::CompleteGraph;
using ::util::ListGraph;
TEST(MSTTest, EmptyGraph) {
ListGraph<int, int> graph(0, 0);
std::vector<int64_t> costs;
const std::vector<int> mst =
BuildKruskalMinimumSpanningTree<ListGraph<int, int>>(
graph, [&costs](int a, int b) { return costs[a] < costs[b]; });
EXPECT_EQ(0, mst.size());
}
TEST(MSTTest, NoArcGraph) {
ListGraph<int, int> graph(5, 0);
std::vector<int64_t> costs;
const std::vector<int> mst =
BuildKruskalMinimumSpanningTree<ListGraph<int, int>>(
graph, [&costs](int a, int b) { return costs[a] < costs[b]; });
EXPECT_EQ(0, mst.size());
}
// Helper function to check the expected MST is obtained with Kruskal.
void CheckMSTWithKruskal(const ListGraph<int, int>& graph,
absl::Span<const int64_t> costs,
const std::vector<int>& expected_arcs) {
const auto ByCost = [costs](int a, int b) {
if (costs[a] != costs[b]) {
return costs[a] < costs[b];
}
// for the sake of stability, preserve the order of arcs with the same cost
return a < b;
};
const std::vector<int> mst = BuildKruskalMinimumSpanningTree(graph, ByCost);
EXPECT_THAT(expected_arcs, UnorderedElementsAreArray(mst));
std::vector<int> sorted_arcs(graph.num_arcs());
for (const int arc : graph.AllForwardArcs()) {
sorted_arcs[arc] = arc;
}
std::sort(sorted_arcs.begin(), sorted_arcs.end(), ByCost);
EXPECT_THAT(mst, UnorderedElementsAreArray(
BuildKruskalMinimumSpanningTreeFromSortedArcs(
graph, sorted_arcs)));
}
// Helper function to check the expected MST is obtained with Prim.
void CheckMSTWithPrim(const ListGraph<int, int>& graph,
const std::vector<int64_t>& costs,
const std::vector<int>& expected_arcs) {
const std::vector<int> prim_mst = BuildPrimMinimumSpanningTree(
graph, [&costs](int arc) { return costs[arc]; });
EXPECT_THAT(expected_arcs, UnorderedElementsAreArray(prim_mst));
}
// Testing Kruskal MST on a small undirectedgraph:
// - original graph:
// 0 -(1)- 1 -(2)- 2
// | |
// (1) (1)
// | |
// 4 -(4)- 3
//
// - minimum spanning tree:
// 0 ----> 1 ----> 2
// | |
// | |
// v v
// 4 3
//
TEST(MSTTest, SmallGraph) {
const int kArcs[][2] = {{0, 1}, {1, 2}, {1, 4}, {2, 3}, {3, 4}};
const int64_t kCosts[] = {1, 2, 1, 1, 4};
const int kNodes = 5;
ListGraph<int, int> graph(kNodes, ABSL_ARRAYSIZE(kArcs) * 2);
std::vector<int64_t> costs(ABSL_ARRAYSIZE(kArcs) * 2, 0);
for (int i = 0; i < ABSL_ARRAYSIZE(kArcs); ++i) {
costs[graph.AddArc(kArcs[i][0], kArcs[i][1])] = kCosts[i];
costs[graph.AddArc(kArcs[i][1], kArcs[i][0])] = kCosts[i];
}
CheckMSTWithKruskal(graph, costs, {0, 4, 6, 2});
CheckMSTWithPrim(graph, costs, {0, 4, 6, 2});
}
// Testing on a small graph with kint64max as value for arcs.
TEST(MSTTest, SmallGraphWithMaxValueArcs) {
const int kArcs[][2] = {{0, 1}, {1, 2}};
const int kNodes = 3;
const int64_t kCosts[] = {std::numeric_limits<int64_t>::max(),
std::numeric_limits<int64_t>::max()};
ListGraph<int, int> graph(kNodes, ABSL_ARRAYSIZE(kArcs) * 2);
std::vector<int64_t> costs(ABSL_ARRAYSIZE(kArcs) * 2, 0);
for (int i = 0; i < ABSL_ARRAYSIZE(kArcs); ++i) {
costs[graph.AddArc(kArcs[i][0], kArcs[i][1])] = kCosts[i];
costs[graph.AddArc(kArcs[i][1], kArcs[i][0])] = kCosts[i];
}
CheckMSTWithKruskal(graph, costs, {0, 2});
CheckMSTWithPrim(graph, costs, {0, 2});
}
// Testing Kruskal MST on a small directed graph:
// - original graph:
// 0 <-(1)- 1 <-(2)- 2
// ^ \ |
// (1) (0) (1)
// | \ |
// | > v
// 4 -(4)-> 3
//
// - minimum spanning tree:
// 0 <---- 1 2
// ^ \ |
// | \ |
// | \ |
// | >v
// 4 3
//
TEST(MSTTest, SmallDirectedGraph) {
const int kArcs[][2] = {{1, 0}, {2, 1}, {4, 1}, {2, 3}, {4, 3}, {1, 3}};
const int64_t kCosts[] = {1, 2, 1, 1, 4, 0};
const int kNodes = 5;
ListGraph<int, int> graph(kNodes, ABSL_ARRAYSIZE(kArcs));
std::vector<int64_t> costs(ABSL_ARRAYSIZE(kArcs), 0);
for (int i = 0; i < ABSL_ARRAYSIZE(kArcs); ++i) {
costs[graph.AddArc(kArcs[i][0], kArcs[i][1])] = kCosts[i];
}
CheckMSTWithKruskal(graph, costs, {5, 0, 2, 3});
}
// Testing Kruskal MST on a small disconnected graph:
// - original graph:
// 0 -(1)- 1 2
// | |
// (1) (1)
// | |
// 4 3
//
// - minimum spanning tree:
// 0 ----> 1 2
// | |
// | |
// v v
// 4 3
//
TEST(MSTTest, SmallDisconnectedGraph) {
const int kArcs[][2] = {{0, 1}, {1, 4}, {2, 3}};
const int64_t kCosts[] = {1, 1, 1};
const int kNodes = 5;
ListGraph<int, int> graph(kNodes, ABSL_ARRAYSIZE(kArcs) * 2);
std::vector<int64_t> costs(ABSL_ARRAYSIZE(kArcs) * 2, 0);
for (int i = 0; i < ABSL_ARRAYSIZE(kArcs); ++i) {
costs[graph.AddArc(kArcs[i][0], kArcs[i][1])] = kCosts[i];
costs[graph.AddArc(kArcs[i][1], kArcs[i][0])] = kCosts[i];
}
CheckMSTWithKruskal(graph, costs, {0, 2, 4});
}
// Benchmark on a grid graph with random arc costs; 'size' corresponds to the
// number of nodes on a row/column of the grid.
template <typename GraphType>
void BM_KruskalMinimimumSpanningTreeOnGrid(benchmark::State& state) {
int size = state.range(0);
const int64_t kCostLimit = 1000000;
std::mt19937 randomizer(0);
const int num_nodes = size * size;
const int num_edges = 2 * (2 * size * (size - 1) + 2 * size - 4);
std::vector<int64_t> edge_costs(num_edges, 0);
ListGraph<int, int> graph(num_nodes, num_edges);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (j < size - 1) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size + j, i * size + j + 1)] = cost;
edge_costs[graph.AddArc(i * size + j + 1, i * size + j)] = cost;
}
if (i < size - 1) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size + j, (i + 1) * size + j)] = cost;
edge_costs[graph.AddArc((i + 1) * size + j, i * size + j)] = cost;
}
}
}
for (int i = 1; i < size - 1; ++i) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size, i * size + size - 1)] = cost;
edge_costs[graph.AddArc(i * size + size - 1, i * size)] = cost;
}
for (int i = 1; i < size - 1; ++i) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i, (size - 1) * size + i)] = cost;
edge_costs[graph.AddArc((size - 1) * size + i, i)] = cost;
}
for (auto _ : state) {
const std::vector<int> mst = BuildKruskalMinimumSpanningTree(
graph,
[&edge_costs](int a, int b) { return edge_costs[a] < edge_costs[b]; });
EXPECT_EQ(num_nodes - 1, mst.size());
}
}
BENCHMARK_TEMPLATE(BM_KruskalMinimimumSpanningTreeOnGrid, ListGraph<>)
->Range(2, 1 << 8);
template <typename GraphType>
void BM_PrimMinimimumSpanningTreeOnGrid(benchmark::State& state) {
int size = state.range(0);
const int64_t kCostLimit = 1000000;
std::mt19937 randomizer(0);
const int num_nodes = size * size;
const int num_edges = 2 * (2 * size * (size - 1) + 2 * size - 4);
std::vector<int64_t> edge_costs(num_edges, 0);
ListGraph<int, int> graph(num_nodes, num_edges);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (j < size - 1) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size + j, i * size + j + 1)] = cost;
edge_costs[graph.AddArc(i * size + j + 1, i * size + j)] = cost;
}
if (i < size - 1) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size + j, (i + 1) * size + j)] = cost;
edge_costs[graph.AddArc((i + 1) * size + j, i * size + j)] = cost;
}
}
}
for (int i = 1; i < size - 1; ++i) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i * size, i * size + size - 1)] = cost;
edge_costs[graph.AddArc(i * size + size - 1, i * size)] = cost;
}
for (int i = 1; i < size - 1; ++i) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[graph.AddArc(i, (size - 1) * size + i)] = cost;
edge_costs[graph.AddArc((size - 1) * size + i, i)] = cost;
}
for (auto _ : state) {
const std::vector<int> mst = BuildPrimMinimumSpanningTree(
graph, [&edge_costs](int arc) { return edge_costs[arc]; });
EXPECT_EQ(num_nodes - 1, mst.size());
}
}
BENCHMARK_TEMPLATE(BM_PrimMinimimumSpanningTreeOnGrid, ListGraph<>)
->Range(2, 1 << 8);
// Benchmark on complete graph with random arc costs.
void BM_KruskalMinimimumSpanningTreeOnCompleteGraph(benchmark::State& state) {
int num_nodes = state.range(0);
const int64_t kCostLimit = 1000000;
std::mt19937 randomizer(0);
CompleteGraph<int, int> graph(num_nodes);
std::vector<int64_t> edge_costs(graph.num_arcs(), 0);
for (const int node : graph.AllNodes()) {
for (const auto arc : graph.OutgoingArcs(node)) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[arc] = cost;
}
}
for (auto _ : state) {
const std::vector<int> mst = BuildKruskalMinimumSpanningTree(
graph,
[&edge_costs](int a, int b) { return edge_costs[a] < edge_costs[b]; });
EXPECT_EQ(num_nodes - 1, mst.size());
}
}
BENCHMARK(BM_KruskalMinimimumSpanningTreeOnCompleteGraph)->Range(2, 1 << 10);
void BM_PrimMinimimumSpanningTreeOnCompleteGraph(benchmark::State& state) {
int num_nodes = state.range(0);
const int64_t kCostLimit = 1000000;
std::mt19937 randomizer(0);
CompleteGraph<int, int> graph(num_nodes);
std::vector<int64_t> edge_costs(graph.num_arcs(), 0);
for (const int node : graph.AllNodes()) {
for (const auto arc : graph.OutgoingArcs(node)) {
const int64_t cost = absl::Uniform(randomizer, 0, kCostLimit);
edge_costs[arc] = cost;
}
}
for (auto _ : state) {
const std::vector<int> mst = BuildPrimMinimumSpanningTree(
graph, [&edge_costs](int arc) { return edge_costs[arc]; });
EXPECT_EQ(num_nodes - 1, mst.size());
}
}
BENCHMARK(BM_PrimMinimimumSpanningTreeOnCompleteGraph)->Range(2, 1 << 10);
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,272 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/multi_dijkstra.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <numeric>
#include <random>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/random/distributions.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/base/map_util.h"
#include "ortools/base/types.h"
#include "ortools/graph/connected_components.h"
#include "ortools/graph/graph.h"
#include "ortools/graph/random_graph.h"
#include "ortools/graph/util.h"
namespace operations_research {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(MultiDijkstraTest, SmallTest) {
// On the following graph (with lengths divided by 10, to test non-integer).
//
// .--------------[6]---------------.
// | v
// 0 --[3]--> 1 --[0]--> 2 --[2]--> 4
// ^ |
// | |
// [0] [0]
// | |
// '--- 3 <---'
//
util::Graph graph;
std::vector<double> arc_lengths;
graph.AddArc(0, 1);
arc_lengths.push_back(0.3);
graph.AddArc(0, 4);
arc_lengths.push_back(0.6);
graph.AddArc(1, 2);
arc_lengths.push_back(0.0);
graph.AddArc(2, 4);
arc_lengths.push_back(0.2);
graph.AddArc(2, 3);
arc_lengths.push_back(0.0);
graph.AddArc(3, 1);
arc_lengths.push_back(0.0);
EXPECT_THAT(
MultiDijkstra<double>(
graph, [&arc_lengths](int arc) { return arc_lengths[arc]; },
{{0}, {1, 2}, {3, 4}, {4}, {}},
[](int, int, double) -> bool { return false; }),
ElementsAre(
UnorderedElementsAre(Pair(0, DistanceAndParentArc<double>{0, -1}),
Pair(1, DistanceAndParentArc<double>{.3, 0}),
Pair(2, DistanceAndParentArc<double>{.3, 2}),
Pair(3, DistanceAndParentArc<double>{.3, 4}),
Pair(4, DistanceAndParentArc<double>{.5, 3})),
UnorderedElementsAre(Pair(1, DistanceAndParentArc<double>{0, -1}),
Pair(2, DistanceAndParentArc<double>{0, -1}),
Pair(3, DistanceAndParentArc<double>{0, 4}),
Pair(4, DistanceAndParentArc<double>{.2, 3})),
UnorderedElementsAre(Pair(3, DistanceAndParentArc<double>{0, -1}),
Pair(1, DistanceAndParentArc<double>{0, 5}),
Pair(2, DistanceAndParentArc<double>{0, 2}),
Pair(4, DistanceAndParentArc<double>{0, -1})),
UnorderedElementsAre(Pair(4, DistanceAndParentArc<double>{0, -1})),
IsEmpty()));
}
TEST(MultiDijkstraTest, RandomizedStressTest) {
// Verify on random graphs that a few invariants are respected.
// Non-exhaustive list:
// - the output looks good: all nodes and arcs are valid integers, etc.
// Also, the parent arcs and their length is consistent with the
// node distances.
// - the arc_length_functor is called at most once on each arc for each
// source, and was called for all of the returned "parent arcs".
// - the settled_node_callback is called at most once on each (node, source)
// pair, and with a distance corresponding to the node's distance in the
// returned search tree from that source.
// - the settled node callback may not be called on a source that has stopped
// its search.
// - when a dijkstra search hasn't been stopped, verify that the set of
// reached nodes corresponds to that source's connected component.
auto random = std::mt19937(1234);
const int num_trials = DEBUG_MODE ? 1000 : 10000;
const int max_num_nodes = 100;
const int max_num_arcs = 200;
const std::vector<int> kNumSources = {0, 1, 3, -1};
for (int trial = 0; trial < num_trials; ++trial) {
// Set up the input graph.
const int num_nodes = absl::Uniform(random, 0, max_num_nodes);
const int num_arcs =
num_nodes == 0 ? 0 : absl::Uniform(random, 0, max_num_arcs);
std::unique_ptr<util::StaticGraph<>> graph = util::GenerateRandomMultiGraph(
num_nodes, num_arcs, /*finalized=*/true, random);
// Set up the input source sets.
int num_sources =
kNumSources[absl::Uniform<int>(random, 0, kNumSources.size())];
if (num_sources < 0) {
num_sources = absl::Uniform(random, 1, num_nodes + 1);
}
std::vector<std::vector<int>> source_sets(num_sources);
// Each source set gets 0 to 3 random nodes, not necessarily distinct.
// Then, with 50% probability, we'll pick two random source sets and append
// either Uniform(num_nodes) random nodes to them (not necessarily distinct)
// or all nodes (distinct).
for (std::vector<int>& source_set : source_sets) {
const int size = num_nodes == 0 ? 0 : absl::Uniform(random, 0, 4);
while (source_set.size() < size) {
source_set.push_back(absl::Uniform(random, 0, num_nodes));
}
}
if (num_sources > 0 && absl::Bernoulli(random, 0.5)) {
for (int i = 0; i < 2; ++i) {
const int source = absl::Uniform(random, 0, num_sources);
if (absl::Bernoulli(random, 0.5)) {
// Append Uniform(num_nodes) random nodes, with repetitions.
const int num = absl::Uniform(random, 0, num_nodes);
for (int j = 0; j < num; ++j) {
source_sets[source].push_back(absl::Uniform(random, 0, num_nodes));
}
} else {
// Append all nodes (shuffled).
std::vector<int> shuffled_nodes(num_nodes, 0);
std::iota(shuffled_nodes.begin(), shuffled_nodes.end(), 0);
std::shuffle(shuffled_nodes.begin(), shuffled_nodes.end(), random);
source_sets[source].insert(source_sets[source].end(),
shuffled_nodes.begin(),
shuffled_nodes.end());
}
}
}
// Set up the (tracked) arc length functor and settled node callbacks.
std::vector<bool> search_was_stopped(num_sources, false);
std::vector<double> search_stop_probability(num_sources, 0.0);
for (double& stop_proba : search_stop_probability) {
if (absl::Bernoulli(random, 0.5)) {
stop_proba = 1.0 / absl::Uniform<int>(random, 1, num_nodes + 1);
}
}
absl::flat_hash_map<int, int> num_arc_length_functor_calls;
absl::flat_hash_map<int, int64_t> arc_length;
std::vector<absl::flat_hash_map<int, int64_t>> settled_node_distance(
num_sources);
auto arc_length_functor = [&](int arc) -> int64_t {
CHECK_GE(arc, 0);
CHECK_LT(arc, graph->num_arcs());
++num_arc_length_functor_calls[arc];
return gtl::LookupOrInsert(&arc_length, arc,
absl::Uniform<int64_t>(random, 0, 1e12));
};
auto settled_node_callback = [&](int node, int source_index,
int64_t distance) -> bool {
CHECK(!search_was_stopped[source_index]);
CHECK_GE(node, 0);
CHECK_LT(node, num_nodes);
CHECK_GE(source_index, 0);
CHECK_LT(source_index, num_sources);
CHECK(settled_node_distance[source_index].insert({node, distance}).second)
<< "In search #" << source_index << ", node #" << node
<< " was settled twice!";
const bool stop =
absl::Bernoulli(random, search_stop_probability[source_index]);
if (stop) {
search_was_stopped[source_index] = true;
}
return stop;
};
// Run the Dijkstra!
const std::vector<absl::flat_hash_map<int, DistanceAndParentArc<int64_t>>>
reached = MultiDijkstra<int64_t>(*graph, arc_length_functor,
source_sets, settled_node_callback);
// Verify the output.
ASSERT_EQ(reached.size(), num_sources);
for (int source_index = 0; source_index < num_sources; ++source_index) {
// Verify that "reached[source_index]" forms a shortest path tree.
for (const auto& p : reached[source_index]) {
const int node = p.first;
const int parent_arc = p.second.parent_arc;
const int64_t distance = p.second.distance;
ASSERT_GE(node, 0);
ASSERT_LT(node, num_nodes);
if (parent_arc == -1) {
ASSERT_EQ(distance, 0);
} else {
ASSERT_GE(parent_arc, 0);
ASSERT_LT(parent_arc, graph->num_arcs());
ASSERT_TRUE(arc_length.contains(parent_arc));
const int parent_node = graph->Tail(parent_arc);
ASSERT_TRUE(reached[source_index].contains(parent_node));
ASSERT_EQ(gtl::FindOrDie(reached[source_index], parent_node).distance,
distance - gtl::FindOrDie(arc_length, parent_arc));
}
}
for (const auto& p : settled_node_distance[source_index]) {
ASSERT_TRUE(reached[source_index].contains(p.first));
ASSERT_EQ(gtl::FindOrDie(reached[source_index], p.first).distance,
p.second);
}
if (!search_was_stopped[source_index]) {
if (source_sets[source_index].empty()) {
ASSERT_EQ(reached[source_index].size(), 0);
} else {
// All sources have been settled with distance 0.
for (const int source : source_sets[source_index]) {
ASSERT_TRUE(settled_node_distance[source_index].contains(source));
ASSERT_EQ(
gtl::FindOrDie(settled_node_distance[source_index], source), 0);
}
// All reached nodes have been settled.
ASSERT_EQ(reached[source_index].size(),
settled_node_distance[source_index].size());
// Run a BFS from the source set and verify that we reach the same
// number of nodes.
std::vector<int> bfs_queue;
std::vector<bool> touched(num_nodes, false);
for (const int src : source_sets[source_index]) {
if (!touched[src]) {
touched[src] = true;
bfs_queue.push_back(src);
}
}
int num_visited = 0;
while (num_visited < bfs_queue.size()) {
const int node = bfs_queue[num_visited++];
for (const int neigh : (*graph)[node]) {
if (!touched[neigh]) {
touched[neigh] = true;
bfs_queue.push_back(neigh);
}
}
}
ASSERT_EQ(reached[source_index].size(), bfs_queue.size());
}
}
}
for (const auto& p : num_arc_length_functor_calls) {
ASSERT_LE(p.second, num_sources);
}
}
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,95 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/one_tree_lower_bound.h"
#include <cstdint>
#include <cstdlib>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "gtest/gtest.h"
#include "ortools/base/path.h"
#include "ortools/base/types.h"
#include "ortools/routing/tsplib_parser.h"
namespace operations_research {
namespace {
TEST(OneTreeLBTest, VolgenantJonkerEmpty) {
const double cost = ComputeOneTreeLowerBound(0, [](int from, int to) {
ADD_FAILURE(); // Making sure the function is not being called.
return 0;
});
EXPECT_EQ(0, cost);
}
TEST(OneTreeLBTest, HeldWolfeCrowderEmpty) {
TravelingSalesmanLowerBoundParameters parameters;
parameters.algorithm =
TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder;
const double cost = ComputeOneTreeLowerBoundWithParameters(
0,
[](int from, int to) {
ADD_FAILURE(); // Making sure the function is not being called.
return 0;
},
parameters);
EXPECT_EQ(0, cost);
}
TEST(OneTreeLBTest, VolgenantJonkerOneNode) {
const double cost =
ComputeOneTreeLowerBound(1, [](int from, int to) { return 0; });
EXPECT_EQ(0, cost);
}
TEST(OneTreeLBTest, HeldWolfeCrowderOneNode) {
TravelingSalesmanLowerBoundParameters parameters;
parameters.algorithm =
TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder;
const double cost = ComputeOneTreeLowerBoundWithParameters(
1, [](int from, int to) { return 0; }, parameters);
EXPECT_EQ(0, cost);
}
TEST(OneTreeLBTest, VolgenantJonkerTwoNodes) {
const double cost =
ComputeOneTreeLowerBound(2, [](int from, int to) { return 1; });
EXPECT_EQ(2, cost);
}
TEST(OneTreeLBTest, HeldWolfeCrowderTwoNodes) {
TravelingSalesmanLowerBoundParameters parameters;
parameters.algorithm =
TravelingSalesmanLowerBoundParameters::HeldWolfeCrowder;
const double cost = ComputeOneTreeLowerBoundWithParameters(
2, [](int from, int to) { return 1; }, parameters);
EXPECT_EQ(2, cost);
}
TEST(OneTreeLBTest, Small) {
const std::vector<int> x = {0, 0, 0, 1, 1, 1, 2, 2, 2};
const std::vector<int> y = {0, 1, 2, 0, 1, 2, 0, 1, 2};
double cost = ComputeOneTreeLowerBound(9, [&x, &y](int from, int to) {
const int dx = std::abs(x[from] - x[to]);
const int dy = std::abs(y[from] - y[to]);
return dx + dy;
});
EXPECT_EQ(9, cost);
}
} // namespace
} // namespace operations_research

View File

@@ -0,0 +1,356 @@
// Copyright 2010-2024 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ortools/graph/perfect_matching.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ortools/linear_solver/linear_solver.pb.h"
#include "ortools/linear_solver/solve_mp_model.h"
namespace operations_research {
namespace {
TEST(MinCostPerfectMatchingTest, Empty) {
MinCostPerfectMatching matcher(0);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL);
EXPECT_EQ(matcher.OptimalCost(), 0);
EXPECT_EQ(matcher.Matches().size(), 0);
}
TEST(MinCostPerfectMatchingTest, OptimumMatching) {
MinCostPerfectMatching matcher(4);
matcher.AddEdgeWithCost(0, 2, 0);
matcher.AddEdgeWithCost(0, 3, 2);
matcher.AddEdgeWithCost(1, 2, 3);
matcher.AddEdgeWithCost(1, 3, 4);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL);
EXPECT_EQ(matcher.OptimalCost(), 4);
EXPECT_EQ(matcher.Matches().size(), 4);
EXPECT_EQ(matcher.Match(0), 2);
EXPECT_EQ(matcher.Match(1), 3);
EXPECT_EQ(matcher.Match(2), 0);
EXPECT_EQ(matcher.Match(3), 1);
}
TEST(MinCostPerfectMatchingTest, BipartiteInfeasibleProblem) {
MinCostPerfectMatching matcher(4);
matcher.AddEdgeWithCost(0, 3, 2);
matcher.AddEdgeWithCost(0, 3, 10);
matcher.AddEdgeWithCost(1, 3, 3);
matcher.AddEdgeWithCost(1, 3, 20);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::INFEASIBLE);
}
TEST(MinCostPerfectMatchingTest, LargerBipartiteInfeasibleProblem) {
MinCostPerfectMatching matcher(10);
matcher.AddEdgeWithCost(0, 5, 0);
matcher.AddEdgeWithCost(0, 6, 2);
matcher.AddEdgeWithCost(1, 5, 3);
matcher.AddEdgeWithCost(1, 6, 4);
matcher.AddEdgeWithCost(2, 5, 4);
matcher.AddEdgeWithCost(2, 6, 4);
matcher.AddEdgeWithCost(3, 7, 4);
matcher.AddEdgeWithCost(3, 8, 4);
matcher.AddEdgeWithCost(3, 9, 4);
matcher.AddEdgeWithCost(4, 7, 4);
matcher.AddEdgeWithCost(4, 8, 4);
matcher.AddEdgeWithCost(4, 9, 4);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::INFEASIBLE);
}
TEST(MinCostPerfectMatchingTest, IntegerOverflow) {
MinCostPerfectMatching matcher(4);
matcher.AddEdgeWithCost(0, 2, std::numeric_limits<int64_t>::max());
matcher.AddEdgeWithCost(0, 3, std::numeric_limits<int64_t>::max());
matcher.AddEdgeWithCost(1, 2, std::numeric_limits<int64_t>::max());
matcher.AddEdgeWithCost(1, 3, std::numeric_limits<int64_t>::max());
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::INTEGER_OVERFLOW);
}
TEST(MinCostPerfectMatchingTest, CostOverflow) {
MinCostPerfectMatching matcher(4);
matcher.AddEdgeWithCost(0, 2, std::numeric_limits<int64_t>::max() / 3);
matcher.AddEdgeWithCost(0, 3, std::numeric_limits<int64_t>::max() / 3);
matcher.AddEdgeWithCost(1, 2, std::numeric_limits<int64_t>::max() / 3);
matcher.AddEdgeWithCost(1, 3, std::numeric_limits<int64_t>::max() / 3);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::COST_OVERFLOW);
EXPECT_EQ(matcher.OptimalCost(), std::numeric_limits<int64_t>::max());
}
class MacholWienTest : public ::testing::TestWithParam<int> {};
// The following test computes bi-partite assignments on the instances described
// in Robert E. Machol and Michael Wien, "Errata: A Hard Assignment
// Problem" Operations Research, vol. 25, p. 364, 1977.
// http://www.jstor.org/stable/169842
//
// Such instances are proven difficult for the Hungarian method. Note that since
// this is a bi-partite problem, this doesn't exercise the Shrink()/Expand()
// methods.
TEST_P(MacholWienTest, SolveHardProblem) {
const int n = GetParam();
MinCostPerfectMatching matcher(2 * n);
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
matcher.AddEdgeWithCost(i, n + j, /*cost=*/i * j);
}
}
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL);
int64_t cost = 0;
for (int i = 0; i < n; ++i) {
cost += i * (n - 1 - i);
EXPECT_EQ(matcher.Match(i), 2 * n - 1 - i);
}
EXPECT_EQ(matcher.OptimalCost(), cost);
}
// Even with -c opt, a 1000x1000 Machol-Wien problem currently takes too long to
// solve.
#ifdef NDEBUG
INSTANTIATE_TEST_SUITE_P(MacholWienProblems, MacholWienTest,
::testing::Values(10, 50, 100, 200));
#else
INSTANTIATE_TEST_SUITE_P(MacholWienProblems, MacholWienTest,
::testing::Values(10, 50));
#endif
using NodeIndex = BlossomGraph::NodeIndex;
using EdgeIndex = BlossomGraph::EdgeIndex;
using CostValue = BlossomGraph::CostValue;
// Tests on a basic complete graph on 4 nodes.
TEST(BlossomGraphTest, Initialization) {
const int num_nodes = 4;
BlossomGraph graph(num_nodes);
CostValue increasing_cost;
for (NodeIndex a(0); a < num_nodes; ++a) {
for (NodeIndex b(a + 1); b < num_nodes; ++b) {
graph.AddEdge(a, b, ++increasing_cost);
}
}
CHECK(graph.Initialize());
CHECK(graph.DebugDualsAreFeasible());
EXPECT_EQ(graph.Dual(graph.GetNode(0)), CostValue(2));
EXPECT_EQ(graph.Dual(graph.GetNode(1)), CostValue(0));
EXPECT_EQ(graph.Dual(graph.GetNode(2)), CostValue(2));
EXPECT_EQ(graph.Dual(graph.GetNode(3)), CostValue(4));
// We don't have a perfect matching yet. Only 1 pair is matched.
EXPECT_EQ(graph.Match(NodeIndex(0)), NodeIndex(1));
EXPECT_EQ(graph.Match(NodeIndex(1)), NodeIndex(0));
EXPECT_EQ(graph.Slack(graph.GetEdge(0)), CostValue(0)); // edge 0 <-> 1.
EXPECT_FALSE(!graph.NodeIsMatched(NodeIndex(0)));
EXPECT_FALSE(!graph.NodeIsMatched(NodeIndex(1)));
// We have two unmatched nodes, which are tree roots.
EXPECT_EQ(graph.Match(NodeIndex(2)), NodeIndex(2));
EXPECT_EQ(graph.Match(NodeIndex(3)), NodeIndex(3));
EXPECT_TRUE(!graph.NodeIsMatched(NodeIndex(2)));
EXPECT_TRUE(!graph.NodeIsMatched(NodeIndex(3)));
// The edge 2 <-> 3 is not tight.
// Is slack is cost = 6 - dual(2) - dual(3) == 3.
EXPECT_EQ(graph.Slack(graph.GetEdge(5)), CostValue(6));
// There is still some operation we can do, and we can't increase
EXPECT_EQ(graph.ComputeMaxCommonTreeDualDeltaAndResetPrimalEdgeQueue(),
CostValue(0));
graph.PrimalUpdates();
VLOG(2) << graph.DebugString();
const CostValue delta =
graph.ComputeMaxCommonTreeDualDeltaAndResetPrimalEdgeQueue();
EXPECT_EQ(delta, 3);
graph.UpdateAllTrees(delta);
EXPECT_EQ(graph.Dual(graph.GetNode(0)), CostValue(-1));
EXPECT_EQ(graph.Dual(graph.GetNode(1)), CostValue(3));
EXPECT_EQ(graph.Dual(graph.GetNode(2)), CostValue(5));
EXPECT_EQ(graph.Dual(graph.GetNode(3)), CostValue(7));
VLOG(2) << graph.DebugString();
graph.PrimalUpdates();
}
struct Edge {
int node1;
int node2;
int64_t cost;
};
std::vector<Edge> GenerateAndLoadRandomProblem(
int num_nodes, int num_arcs, MinCostPerfectMatching* matcher) {
CHECK_EQ(num_nodes % 2, 0);
absl::BitGen random;
std::uniform_int_distribution<int> random_cost(0, 1000);
std::vector<Edge> all_edges;
// Starts by making sure there is a matching.
std::vector<int> all_nodes;
for (int i = 0; i < num_nodes; ++i) all_nodes.push_back(i);
while (!all_nodes.empty()) {
std::vector<int> edge_nodes;
for (int i = 0; i < 2; ++i) {
const int index =
absl::Uniform(random, 0, static_cast<int>(all_nodes.size() - 1));
edge_nodes.push_back(all_nodes[index]);
all_nodes[index] = all_nodes.back();
all_nodes.pop_back();
}
all_edges.push_back({edge_nodes[0], edge_nodes[1], random_cost(random)});
}
// Now just add random arcs.
for (int i = num_nodes / 2; i < num_arcs; ++i) {
const int node1 = absl::Uniform(random, 0, num_nodes);
const int node2 = absl::Uniform(random, 0, num_nodes);
if (node1 != node2) {
all_edges.push_back({node1, node2, random_cost(random)});
}
}
matcher->Reset(num_nodes);
for (const Edge edge : all_edges) {
matcher->AddEdgeWithCost(edge.node1, edge.node2, edge.cost);
}
return all_edges;
}
// We check that the returned matching is a valid matching with the correct
// costs.
//
// TODO(user): We could theoretically recover the dual and check the optimality
// condition if really needed. This is a bit involved though, and with the MIP
// tests below, we should have a good enough confidence in the code already.
void CheckOptimalSolution(const MinCostPerfectMatching& matcher,
const std::vector<Edge>& edges) {
const std::vector<int>& matches = matcher.Matches();
std::vector<bool> seen(matches.size(), false);
int num_seen = 0;
for (int i = 0; i < matches.size(); ++i) {
const int m = matches[i];
ASSERT_NE(m, i);
ASSERT_GE(m, 0);
ASSERT_LT(m, matches.size());
ASSERT_EQ(matches[m], i);
if (m < i) continue;
ASSERT_FALSE(seen[i]);
ASSERT_FALSE(seen[m]);
seen[i] = true;
seen[m] = true;
num_seen += 2;
}
EXPECT_EQ(num_seen, matches.size());
// Check that the matching returned has the correct cost.
std::vector<int64_t> costs(matches.size(),
std::numeric_limits<int64_t>::max());
for (const Edge e : edges) {
if (matches[e.node1] == e.node2) {
const int rep = std::min(e.node1, e.node2);
const int other = std::max(e.node1, e.node2);
costs[rep] = std::min(costs[rep], e.cost);
costs[other] = 0;
}
}
int64_t actual_cost = 0;
for (int i = 0; i < costs.size(); ++i) {
CHECK_NE(costs[i], std::numeric_limits<int64_t>::max());
actual_cost += costs[i];
}
EXPECT_EQ(matcher.OptimalCost(), actual_cost);
}
TEST(BlossomGraphTest, RandomSmallGraph) {
for (const int size : {10, 40, 100, 1000}) {
for (const int edge_factor : {1, 10, 100}) {
MinCostPerfectMatching matcher;
const std::vector<Edge> edges =
GenerateAndLoadRandomProblem(size, size * edge_factor, &matcher);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL)
<< "Size: " << size << ", Edge factor: " << edge_factor;
CheckOptimalSolution(matcher, edges);
}
}
}
TEST(BlossomGraphTest, RandomLargeGraph) {
if (DEBUG_MODE) GTEST_SKIP() << "Too slow in non-opt";
MinCostPerfectMatching matcher;
const std::vector<Edge> edges =
GenerateAndLoadRandomProblem(10000, 100000, &matcher);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL);
CheckOptimalSolution(matcher, edges);
}
int64_t SolveWithMip(absl::Span<const Edge> edges) {
MPModelRequest request;
request.set_solver_type(MPModelRequest::SAT_INTEGER_PROGRAMMING);
std::vector<MPConstraintProto*> exactly_ones;
for (int i = 0; i < edges.size(); ++i) {
auto* var_proto = request.mutable_model()->add_variable();
var_proto->set_lower_bound(0.0);
var_proto->set_upper_bound(1.0);
var_proto->set_is_integer(true);
var_proto->set_objective_coefficient(edges[i].cost);
for (const int node : {edges[i].node1, edges[i].node2}) {
// Create constraint if needed.
while (node >= exactly_ones.size()) {
exactly_ones.push_back(request.mutable_model()->add_constraint());
exactly_ones.back()->set_lower_bound(1.0);
exactly_ones.back()->set_upper_bound(1.0);
}
// Add edge to it.
exactly_ones[node]->add_var_index(i);
exactly_ones[node]->add_coefficient(1.0);
}
}
const MPSolutionResponse response = SolveMPModel(request);
return static_cast<int64_t>(std::round(response.objective_value()));
}
class AgreeWithMipTest : public ::testing::TestWithParam<int> {};
TEST_P(AgreeWithMipTest, CompareOptimalObjective) {
MinCostPerfectMatching matcher;
const std::vector<Edge> edges =
GenerateAndLoadRandomProblem(50, 100, &matcher);
ASSERT_EQ(matcher.Solve(), MinCostPerfectMatching::OPTIMAL);
EXPECT_EQ(matcher.OptimalCost(), SolveWithMip(edges));
}
INSTANTIATE_TEST_SUITE_P(RandomInstances, AgreeWithMipTest,
::testing::Range(0, 10));
} // namespace
} // namespace operations_research

View File

@@ -11,24 +11,34 @@
# See the License for the specific language governing permissions and
# limitations under the License.
load(":code_samples.bzl", "code_sample_cc_py", "code_sample_java")
code_sample_cc_py(name = "assignment_linear_sum_assignment")
code_sample_cc_py(name = "assignment_min_flow")
code_sample_cc_py(name = "balance_min_flow")
code_sample_cc_py(name = "simple_max_flow_program")
code_sample_cc_py(name = "simple_min_cost_flow_program")
load(":code_samples.bzl", "code_sample_cc", "code_sample_cc_py", "code_sample_java")
code_sample_java(name = "AssignmentLinearSumAssignment")
code_sample_cc_py(name = "assignment_linear_sum_assignment")
code_sample_java(name = "AssignmentMinFlow")
code_sample_cc_py(name = "assignment_min_flow")
code_sample_java(name = "BalanceMinFlow")
code_sample_cc_py(name = "balance_min_flow")
code_sample_cc(name = "bfs_directed")
code_sample_cc(name = "bfs_one_to_all")
code_sample_cc(name = "bfs_undirected")
code_sample_cc(name = "dag_shortest_path_one_to_all")
code_sample_cc(name = "dag_shortest_path_sequential")
code_sample_cc(name = "dag_simple_shortest_path")
code_sample_cc(name = "dijkstra_all_pairs_shortest_paths")
code_sample_cc(name = "dijkstra_directed")
code_sample_cc(name = "dijkstra_one_to_all")
code_sample_cc(name = "dijkstra_sequential")
code_sample_cc(name = "dijkstra_undirected")
code_sample_java(name = "SimpleMaxFlowProgram")
code_sample_cc_py(name = "simple_max_flow_program")
code_sample_java(name = "SimpleMinCostFlowProgram")
code_sample_cc_py(name = "simple_min_cost_flow_program")

View File

@@ -21,11 +21,17 @@ def code_sample_cc(name):
srcs = [name + ".cc"],
deps = [
"//ortools/base",
"//ortools/base:status_macros",
"//ortools/base:top_n",
"//ortools/graph:assignment",
"//ortools/graph:bounded_dijkstra",
"//ortools/graph:bfs",
"//ortools/graph:dag_shortest_path",
"//ortools/graph:ebert_graph",
"//ortools/graph:linear_assignment",
"//ortools/graph:max_flow",
"//ortools/graph:min_cost_flow",
"@com_google_absl//absl/random",
],
)
@@ -36,11 +42,17 @@ def code_sample_cc(name):
deps = [
":" + name + "_cc",
"//ortools/base",
"//ortools/base:status_macros",
"//ortools/base:top_n",
"//ortools/graph:assignment",
"//ortools/graph:bounded_dijkstra",
"//ortools/graph:bfs",
"//ortools/graph:dag_shortest_path",
"//ortools/graph:ebert_graph",
"//ortools/graph:linear_assignment",
"//ortools/graph:max_flow",
"//ortools/graph:min_cost_flow",
"@com_google_absl//absl/random",
],
)