From 52e4c24a9e494751216dd50f3ab2ef7a018a8df5 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Tue, 10 Jun 2025 16:29:34 +0200 Subject: [PATCH 01/81] fix #4677 --- ortools/sat/scheduling_cuts.cc | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index e51929b16d..2af85e168a 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1102,26 +1102,25 @@ std::string CompletionTimeEvent::DebugString() const { void CtExhaustiveHelper::Init( const absl::Span events, Model* model) { + max_task_index_ = 0; + if (events.empty() || events.size() > 100) return; + BinaryRelationsMaps* binary_relations = model->GetOrCreate(); - max_task_index_ = 0; - for (const auto& event : events) { - max_task_index_ = std::max(max_task_index_, event.task_index); - } + + std::vector sorted_events(events.begin(), events.end()); + std::sort(sorted_events.begin(), sorted_events.end(), + [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) { + return a.task_index < b.task_index; + }); + max_task_index_ = sorted_events.back().task_index; predecessors_.reserve(max_task_index_ + 1); for (const auto& e1 : events) { - CHECK_LE(predecessors_.size(), e1.task_index); - while (predecessors_.size() <= e1.task_index) { - predecessors_.Add({}); - } - - // Cap the number of precedences to avoid O(n^2) time complexity. - if (predecessors_.num_entries() > 20000) break; - for (const auto& e2 : events) { if (e2.task_index == e1.task_index) continue; if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) == RelationStatus::IS_TRUE) { + while (predecessors_.size() <= e1.task_index) predecessors_.Add({}); predecessors_.AppendToLastVector(e2.task_index); } } @@ -1138,6 +1137,7 @@ bool CtExhaustiveHelper::PermutationIsCompatibleWithPrecedences( visited_.assign(max_task_index_ + 1, false); for (int i = permutation.size() - 1; i >= 0; --i) { const CompletionTimeEvent& event = events[permutation[i]]; + if (event.task_index >= predecessors_.size()) continue; for (const int predecessor : predecessors_[event.task_index]) { if (visited_[predecessor]) return false; } @@ -1328,9 +1328,11 @@ CompletionTimeExplorationStatus ComputeMinSumOfWeightedEndMins( helper.task_to_index_[events[i].task_index] = i; } helper.valid_permutation_iterator_.Reset(events.size()); + const auto& predecessors = helper.predecessors(); for (int i = 0; i < events.size(); ++i) { const int task_i = events[i].task_index; - for (const int task_j : helper.predecessors()[task_i]) { + if (task_i >= predecessors.size()) continue; + for (const int task_j : predecessors[task_i]) { const int j = helper.task_to_index_[task_j]; if (j != -1) { helper.valid_permutation_iterator_.AddArc(j, i); @@ -1456,6 +1458,7 @@ ABSL_MUST_USE_RESULT bool GenerateShortCompletionTimeCutsWithExactBound( helper, min_sum_of_ends, min_sum_of_weighted_ends, cut_use_precedences, exploration_limit); if (status == CompletionTimeExplorationStatus::NO_VALID_PERMUTATION) { + // TODO(user): We should return false here but there is a bug. break; } else if (status == CompletionTimeExplorationStatus::ABORTED) { break; @@ -1846,6 +1849,7 @@ CutGenerator CreateCumulativeCompletionTimeCutGenerator( auto generate_cuts = [integer_trail, sat_solver, model, manager, helper, demands_helper, capacity](bool time_is_forward) -> bool { + DCHECK_EQ(sat_solver->CurrentDecisionLevel(), 0); if (!helper->SynchronizeAndSetTimeDirection(time_is_forward)) { return false; } From 03ef7e19a4342dabb02b7ce9b4dd3c8fca2d4546 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Jun 2025 14:45:18 +0000 Subject: [PATCH 02/81] build(deps): bump actions/setup-java in the github-actions group Bumps the github-actions group with 1 update: [actions/setup-java](https://github.com/actions/setup-java). Updates `actions/setup-java` from 3 to 4 - [Release notes](https://github.com/actions/setup-java/releases) - [Commits](https://github.com/actions/setup-java/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-java dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major dependency-group: github-actions ... Signed-off-by: dependabot[bot] --- .github/workflows/amd64_macos_bazel.yml | 2 +- .github/workflows/amd64_windows_bazel.yml | 2 +- .github/workflows/arm64_macos_bazel.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/amd64_macos_bazel.yml b/.github/workflows/amd64_macos_bazel.yml index 633e8ef6be..216a6b1c66 100644 --- a/.github/workflows/amd64_macos_bazel.yml +++ b/.github/workflows/amd64_macos_bazel.yml @@ -24,7 +24,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set Java to OpenJDK 17 (Temurin) - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/amd64_windows_bazel.yml b/.github/workflows/amd64_windows_bazel.yml index de68fd82cf..62a56707fd 100644 --- a/.github/workflows/amd64_windows_bazel.yml +++ b/.github/workflows/amd64_windows_bazel.yml @@ -23,7 +23,7 @@ jobs: runs-on: windows-2022 steps: - uses: actions/checkout@v4 - - uses: actions/setup-java@v3 + - uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' diff --git a/.github/workflows/arm64_macos_bazel.yml b/.github/workflows/arm64_macos_bazel.yml index 8e5acceaa4..969756652b 100644 --- a/.github/workflows/arm64_macos_bazel.yml +++ b/.github/workflows/arm64_macos_bazel.yml @@ -24,7 +24,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Set Java to OpenJDK 17 (Temurin) - uses: actions/setup-java@v3 + uses: actions/setup-java@v4 with: distribution: 'temurin' java-version: '17' From 92f78981b7d06c02e8cd6d90a7f1480a88950213 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 08:13:52 +0200 Subject: [PATCH 03/81] tools/release: cleanup --- tools/release/publish_delivery_linux.sh | 4 +- .../publish_delivery_manylinux_amd64.sh | 5 +- .../publish_delivery_manylinux_arm64.sh | 5 +- tools/release/publish_delivery_meta.sh | 126 ++++++++++++++++++ tools/release/publish_delivery_win.cmd | 4 +- 5 files changed, 136 insertions(+), 8 deletions(-) create mode 100755 tools/release/publish_delivery_meta.sh diff --git a/tools/release/publish_delivery_linux.sh b/tools/release/publish_delivery_linux.sh index fcd132729e..a082bb3dc2 100755 --- a/tools/release/publish_delivery_linux.sh +++ b/tools/release/publish_delivery_linux.sh @@ -76,9 +76,9 @@ function publish_java() { if [[ -x "$(command -v openssl11)" ]]; then OPENSSL_PRG=openssl11 fi - command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a build.log + command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a publish.log command -v gpg - command -v gpg | xargs echo "gpg: " | tee -a build.log + command -v gpg | xargs echo "gpg: " | tee -a publish.log echo -n "Publish native Java..." | tee -a publish.log cmake --build temp_java --target java_native_deploy -v diff --git a/tools/release/publish_delivery_manylinux_amd64.sh b/tools/release/publish_delivery_manylinux_amd64.sh index 9f67999de1..17236e7688 100755 --- a/tools/release/publish_delivery_manylinux_amd64.sh +++ b/tools/release/publish_delivery_manylinux_amd64.sh @@ -109,7 +109,7 @@ function main() { local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" echo "RELEASE_DIR: '${RELEASE_DIR}'" | tee -a publish.log - (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a build.log) + (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a publish.log) local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) @@ -124,8 +124,9 @@ function main() { "publish_$1" exit ;; all) + #publish_dotnet publish_java - publish_python + #publish_python exit ;; *) >&2 echo "Target '${1}' unknown" diff --git a/tools/release/publish_delivery_manylinux_arm64.sh b/tools/release/publish_delivery_manylinux_arm64.sh index 5c444b2356..7c1e655f32 100755 --- a/tools/release/publish_delivery_manylinux_arm64.sh +++ b/tools/release/publish_delivery_manylinux_arm64.sh @@ -109,7 +109,7 @@ function main() { local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" echo "RELEASE_DIR: '${RELEASE_DIR}'" | tee -a publish.log - (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a build.log) + (cd "${ROOT_DIR}" && make print-OR_TOOLS_VERSION | tee -a publish.log) local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) @@ -124,8 +124,9 @@ function main() { "publish_$1" exit ;; all) + #publish_dotnet publish_java - publish_python + #publish_python exit ;; *) >&2 echo "Target '${1}' unknown" diff --git a/tools/release/publish_delivery_meta.sh b/tools/release/publish_delivery_meta.sh new file mode 100755 index 0000000000..14df6f295f --- /dev/null +++ b/tools/release/publish_delivery_meta.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# Copyright 2010-2025 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function help() { + local -r NAME=$(basename "$0") + local -r BOLD="\e[1m" + local -r RESET="\e[0m" + local -r help=$(cat << EOF +${BOLD}NAME${RESET} +\t$NAME - Publish delivery using the ${BOLD}local host system${RESET}. +${BOLD}SYNOPSIS${RESET} +\t$NAME [-h|--help] [java] +${BOLD}DESCRIPTION${RESET} +\tPublish Google OR-Tools deliveries. +\tYou ${BOLD}MUST${RESET} define the following variables before running this script: +\t* ORTOOLS_TOKEN: secret use to decrypt key to sign Java package. + +${BOLD}OPTIONS${RESET} +\t-h --help: display this help text +\tjava: publish the Java runtime packages +\tall: publish everything (default) + +${BOLD}EXAMPLES${RESET} +Using export to define the ${BOLD}ORTOOLS_TOKEN${RESET} env and only publishing the Java packages: +export ORTOOLS_TOKEN=SECRET +$0 java + +note: the 'export ORTOOLS_TOKEN=...' should be placed in your bashrc to avoid any leak +of the secret in your bash history +EOF +) + echo -e "$help" +} + +function assert_defined(){ + if [[ -z "${!1}" ]]; then + >&2 echo "Variable '${1}' must be defined" + exit 1 + fi +} + +# Java publish +function publish_java() { + if echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" | cmp --silent "${ROOT_DIR}/export_meta/meta_java_publish" -; then + echo "publish Java up to date!" + return 0 + fi + + # maven require JAVA_HOME + if [[ -z "${JAVA_HOME}" ]]; then + echo "JAVA_HOME: not found !" | tee publish.log + exit 1 + else + echo "JAVA_HOME: ${JAVA_HOME}" | tee -a publish.log + command -v mvn + command -v mvn | xargs echo "mvn: " | tee -a publish.log + java -version 2>&1 | tee -a publish.log + java -version 2>&1 | head -n 1 | grep -q "1.8" + fi + # Maven central need gpg sign and we store the release key encoded using openssl + local OPENSSL_PRG=openssl + if [[ -x "$(command -v openssl11)" ]]; then + OPENSSL_PRG=openssl11 + fi + command -v $OPENSSL_PRG | xargs echo "openssl: " | tee -a publish.log + command -v gpg + command -v gpg | xargs echo "gpg: " | tee -a publish.log + + echo -n "Publish native Java..." | tee -a publish.log + cmake --build temp_meta_java --config Release --target java_deploy -v + echo "DONE" | tee -a publish.log + + echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" > "${ROOT_DIR}/export_meta/meta_java_publish" +} + +# Main +function main() { + case ${1} in + -h | --help) + help; exit ;; + esac + + assert_defined ORTOOLS_TOKEN + echo "ORTOOLS_TOKEN: FOUND" | tee publish.log + make print-OR_TOOLS_VERSION | tee -a publish.log + + local -r ROOT_DIR="$(cd -P -- "$(dirname -- "$0")/../.." && pwd -P)" + echo "ROOT_DIR: '${ROOT_DIR}'" + + local -r RELEASE_DIR="$(cd -P -- "$(dirname -- "$0")" && pwd -P)" + echo "RELEASE_DIR: '${RELEASE_DIR}'" + + local -r ORTOOLS_BRANCH=$(git rev-parse --abbrev-ref HEAD) + local -r ORTOOLS_SHA1=$(git rev-parse --verify HEAD) + local -r PLATFORM=$(uname -m) + + mkdir -p export + case ${1} in + java) + "publish_$1" + exit ;; + all) + publish_java + exit ;; + *) + >&2 echo "Target '${1}' unknown" + exit 1 + esac + exit 0 +} + +main "${1:-all}" + diff --git a/tools/release/publish_delivery_win.cmd b/tools/release/publish_delivery_win.cmd index 4af40864b2..644871aba2 100644 --- a/tools/release/publish_delivery_win.cmd +++ b/tools/release/publish_delivery_win.cmd @@ -83,9 +83,9 @@ which.exe mvn || exit 1 which.exe mvn | tee.exe -a publish.log which.exe gpg || exit 1 -which.exe gpg | tee.exe -a build.log +which.exe gpg | tee.exe -a publish.log which.exe openssl || exit 1 -which.exe openssl | tee.exe -a build.log +which.exe openssl | tee.exe -a publish.log echo Publish native Java... | tee.exe -a publish.log cmake --build temp_java --config Release --target java_native_deploy -v From 4456cd5516694fe250271565ae677e19a7768cc6 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 08:20:50 +0200 Subject: [PATCH 04/81] bazel: Updates `requests` from 2.32.3 to 2.32.4 - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.32.3...v2.32.4) --- updated-dependencies: - dependency-name: requests dependency-version: 2.32.4 dependency-type: direct:production dependency-group: pip ... --- bazel/notebook_requirements.in | 2 +- bazel/notebook_requirements.txt | 2 +- bazel/ortools_requirements.in | 2 +- bazel/ortools_requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index d7c30c0201..c557b3cb6c 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -3,7 +3,7 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 protobuf==6.31.0 -requests==2.32.3 +requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index b8a7a5aa7a..b76f09dba1 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -256,7 +256,7 @@ referencing==0.36.2 # jsonschema # jsonschema-specifications # jupyter-events -requests==2.32.3 +requests==2.32.4 # via # -r bazel/notebook_requirements.in # jupyterlab-server diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index 0b4c89ab40..3d3a8acfe2 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -3,7 +3,7 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 protobuf==6.31.0 -requests==2.32.3 +requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 820668e036..2c99af6946 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -53,7 +53,7 @@ python-dateutil==2.8.2 # via pandas pytz==2022.7.1 # via pandas -requests==2.32.3 +requests==2.32.4 # via -r bazel/ortools_requirements.in scipy==1.14.1 # via -r bazel/ortools_requirements.in From aa726710d0b7c428a2379fca384d5f7a63cdb964 Mon Sep 17 00:00:00 2001 From: galabovaa Date: Fri, 6 Jun 2025 15:04:13 +0300 Subject: [PATCH 05/81] cmake: HiGHS v1.11.0 (#4670) * no longer needs patch ref: https://github.com/ERGO-Code/HiGHS/releases/tag/v1.11.0 --- cmake/dependencies/CMakeLists.txt | 4 +- patches/highs-v1.10.0.patch | 169 ------------------------------ 2 files changed, 1 insertion(+), 172 deletions(-) delete mode 100644 patches/highs-v1.10.0.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index bdd1d4362c..2b461eacd3 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -285,10 +285,8 @@ if(BUILD_HIGHS) FetchContent_Declare( highs GIT_REPOSITORY "https://github.com/ERGO-Code/HiGHS.git" - GIT_TAG "v1.10.0" + GIT_TAG "v1.11.0" GIT_SHALLOW TRUE - PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/highs-v1.10.0.patch" ) FetchContent_MakeAvailable(highs) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/patches/highs-v1.10.0.patch b/patches/highs-v1.10.0.patch deleted file mode 100644 index e7f58d1962..0000000000 --- a/patches/highs-v1.10.0.patch +++ /dev/null @@ -1,169 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index ffaf5290..bf7d1f56 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -194,11 +194,11 @@ if (BUILD_CXX) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - # for multi-config build system (e.g. xcode) -- foreach(OUTPUTCONFIG IN LISTS CMAKE_CONFIGURATION_TYPES) -- string(TOUPPER ${OUTPUTCONFIG} OUTPUTCONFIG) -- set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_LIBDIR}) -- set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_LIBDIR}) -- set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -+ foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) -+ string(TOUPPER ${OutputConfig} OUTPUTCONFIG) -+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) -+ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) -+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) - endforeach() - else() - option(BUILD_SHARED_LIBS "Build shared libraries (.dll)." OFF) -@@ -206,14 +206,11 @@ if (BUILD_CXX) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) - # for multi-config builds (e.g. msvc) -- foreach(OUTPUTCONFIG IN LISTS CMAKE_CONFIGURATION_TYPES) -- string(TOLOWER ${OUTPUTCONFIG} OUTPUTCONFIG) -- set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}/${OUTPUTCONFIG}) -- # set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -- # set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -- # set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OUTPUTCONFIG}/${CMAKE_INSTALL_BINDIR}) -+ foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) -+ string(TOUPPER ${OutputConfig} OUTPUTCONFIG) -+ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) -+ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) -+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_BINDIR}) - endforeach() - endif() - -diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index e390ac4b..0e2c470c 100644 ---- a/src/CMakeLists.txt -+++ b/src/CMakeLists.txt -@@ -1,7 +1,7 @@ - if (NOT BUILD_CXX) - return() - endif() -- -+ - # Define library. - include(sources) - set(sources ${highs_sources} ${cupdlp_sources} ${ipx_sources} ${basiclu_sources}) -@@ -84,7 +84,7 @@ if(NOT FAST_BUILD) - # target_compile_options(libipx PRIVATE "-Wno-sign-compare") - # target_compile_options(libipx PRIVATE "-Wno-logical-op-parentheses") - endif() -- -+ - install(TARGETS libhighs EXPORT highs-targets - LIBRARY - ARCHIVE -@@ -154,8 +154,6 @@ else() - # $ - ) - -- target_include_directories(highs PUBLIC "${CMAKE_CUDA_PATH}/include") -- - # target_include_directories(highs PRIVATE - # $ - # $ -@@ -180,8 +178,8 @@ else() - # $) - - target_sources(highs PRIVATE ${sources} ${headers} ${win_version_file}) -- -- # Optional Cuda -+ -+ # Optional Cuda - if (CUPDLP_GPU) - # enable_language(CXX CUDA) - # target_sources(highs PRIVATE ${cuda_sources}) -@@ -189,9 +187,11 @@ else() - # set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) - - # target_link_libraries(highs ${CUDA_LIBRARY} m) -- -+ - # target_include_directories(highs PUBLIC "/usr/local/include") - -+ target_include_directories(highs PUBLIC -+ $) - set(CUPDLP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/src/pdlp/cupdlp/") - - add_subdirectory(pdlp/cupdlp/cuda) -@@ -201,7 +201,7 @@ else() - else() - target_link_libraries(highs cudalin ${CUDA_LIBRARY} m) - endif() -- -+ - set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) - - endif() -@@ -257,13 +257,13 @@ else() - $ - ) - target_link_libraries(highs ZLIB::ZLIB) -- set(CONF_DEPS -+ set(CONF_DEPS - "include(CMakeFindDependencyMacro)\nfind_dependency(Threads)\nfind_dependency(ZLIB)") - set(CONF_DEPENDENCIES ${CONF_DEPS}) -- else() -+ else() - set(CONF_DEPENDENCIES "include(CMakeFindDependencyMacro)\nfind_dependency(Threads)") - endif() -- -+ - - # # on UNIX system the 'lib' prefix is automatically added - # set_target_properties(highs PROPERTIES -@@ -274,7 +274,7 @@ else() - # set_target_properties(highs PROPERTIES - # LIBRARY_OUTPUT_DIRECTORY "${HIGHS_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}") - # endif() -- -+ - # set_target_properties(highs PROPERTIES PUBLIC_HEADER "src/Highs.h;src/lp_data/HighsLp.h;src/lp_data/HighsLpSolverObject.h") - - # install the header files of highs -@@ -291,7 +291,7 @@ else() - - # target_compile_options(highs PRIVATE "-Wall") - # target_compile_options(highs PRIVATE "-Wunused") -- -+ - if (UNIX) - target_compile_options(highs PRIVATE "-Wno-unused-variable") - target_compile_options(highs PRIVATE "-Wno-unused-const-variable") -@@ -324,7 +324,7 @@ else() - - - if (BUILD_DOTNET) -- -+ - # see: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog - if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") - set(DOTNET_PLATFORM arm64) -@@ -355,8 +355,8 @@ else() - set(TARGET_FILE_NAME "highs.dll") - endif() - -- add_custom_command(TARGET highs POST_BUILD -- COMMAND "${CMAKE_COMMAND}" -E copy -+ add_custom_command(TARGET highs POST_BUILD -+ COMMAND "${CMAKE_COMMAND}" -E copy - "$" - ${DOTNET_PROJECT_DIR}/runtimes/${DOTNET_RID}/native/${TARGET_FILE_NAME} - COMMENT "Copying to output directory") -@@ -375,7 +375,7 @@ if(FORTRAN_FOUND) - target_link_libraries(FortranHighs PUBLIC highs) - endif() - -- install(TARGETS FortranHighs -+ install(TARGETS FortranHighs - LIBRARY - ARCHIVE - RUNTIME From 3bac8f9fdfa93ebae8864d604b87e3b581494649 Mon Sep 17 00:00:00 2001 From: galabovaa Date: Fri, 6 Jun 2025 15:05:39 +0300 Subject: [PATCH 06/81] bazel: update HiGHS to v1.11.0 ref: Update highs bazelbuild/bazel-central-registry#4770 --- MODULE.bazel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MODULE.bazel b/MODULE.bazel index a6cafbe1e8..df19d71a17 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -24,7 +24,7 @@ bazel_dep(name = "gazelle", version = "0.43.0") bazel_dep(name = "glpk", version = "5.0.bcr.4") bazel_dep(name = "google_benchmark", version = "1.9.2") bazel_dep(name = "googletest", version = "1.17.0") -bazel_dep(name = "highs", version = "1.10.0") +bazel_dep(name = "highs", version = "1.11.0") bazel_dep(name = "platforms", version = "0.0.11") bazel_dep(name = "protobuf", version = "31.0") bazel_dep(name = "pybind11_abseil", version = "202402.0") From 474b5c337f5a4ed7a7a5e87f1cae2de7dc311b1e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Jun 2025 14:09:15 +0200 Subject: [PATCH 07/81] HiGHS cleanup --- Dependencies.txt | 2 +- patches/BUILD.bazel | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Dependencies.txt b/Dependencies.txt index b1dc1ed9b7..f0813f2c1d 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -9,7 +9,7 @@ Clp=1.17.10 Cgl=0.60.9 Cbc=2.10.12 GLPK=5.0 -HiGHS=v1.10.0 +HiGHS=v1.11.0 Scip=v922 # Python pybind11=v2.13.6 diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 22f2795c84..28b25b4abe 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -13,7 +13,6 @@ exports_files([ "abseil-cpp-20250512.0.patch", - "highs-v1.10.patch", "protobuf-v31.0.patch", "pybind11_bazel.patch", "pybind11_abseil.patch", From 0ca1d9b8d8dfb19c215c9be8a6e78e1e18f90dc9 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 11 Jun 2025 12:48:56 +0200 Subject: [PATCH 08/81] proper fix --- ortools/sat/scheduling_cuts.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ortools/sat/scheduling_cuts.cc b/ortools/sat/scheduling_cuts.cc index 2af85e168a..066fc486d0 100644 --- a/ortools/sat/scheduling_cuts.cc +++ b/ortools/sat/scheduling_cuts.cc @@ -1103,7 +1103,13 @@ std::string CompletionTimeEvent::DebugString() const { void CtExhaustiveHelper::Init( const absl::Span events, Model* model) { max_task_index_ = 0; - if (events.empty() || events.size() > 100) return; + if (events.empty()) return; + // We compute the max_task_index_ from the events early to avoid sorting + // the events if there are too many of them. + for (const auto& event : events) { + max_task_index_ = std::max(max_task_index_, event.task_index); + } + if (events.size() > 100) return; BinaryRelationsMaps* binary_relations = model->GetOrCreate(); @@ -1113,10 +1119,9 @@ void CtExhaustiveHelper::Init( [](const CompletionTimeEvent& a, const CompletionTimeEvent& b) { return a.task_index < b.task_index; }); - max_task_index_ = sorted_events.back().task_index; predecessors_.reserve(max_task_index_ + 1); - for (const auto& e1 : events) { - for (const auto& e2 : events) { + for (const auto& e1 : sorted_events) { + for (const auto& e2 : sorted_events) { if (e2.task_index == e1.task_index) continue; if (binary_relations->GetLevelZeroPrecedenceStatus(e2.end, e1.start) == RelationStatus::IS_TRUE) { From a56a50aa006ef0a368511c6d458aba22d4acfabe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Tue, 10 Jun 2025 14:53:23 +0200 Subject: [PATCH 09/81] cmake: Add support for custom protoc executable via OR_TOOLS_PROTOC_EXECUTABLE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow users to specify a custom protoc executable by setting the OR_TOOLS_PROTOC_EXECUTABLE variable, which takes precedence over the default cross-compilation and system protoc detection logic. Signed-off-by: Clément Péron --- cmake/host.cmake | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmake/host.cmake b/cmake/host.cmake index fe303362fe..f95949cb90 100644 --- a/cmake/host.cmake +++ b/cmake/host.cmake @@ -11,6 +11,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +if (OR_TOOLS_PROTOC_EXECUTABLE) + set(PROTOC_PRG ${OR_TOOLS_PROTOC_EXECUTABLE}) + return() +endif() + if(NOT CMAKE_CROSSCOMPILING) set(PROTOC_PRG protobuf::protoc) return() From abb4301ea2023f07514db1229eb8ec5b74811344 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Wed, 11 Jun 2025 15:04:28 +0200 Subject: [PATCH 10/81] [CP-SAT] minor python improvements (handling of None; remove all names from a model) --- ortools/sat/python/cp_model.py | 8 ++++++++ ortools/sat/python/cp_model_helper.cc | 16 ++++++++++------ ortools/sat/python/cp_model_helper_test.py | 3 +++ ortools/sat/python/cp_model_test.py | 21 ++++++++++++++++++++- 4 files changed, 41 insertions(+), 7 deletions(-) diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 0a8780d1c3..435be0f7ba 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -2306,6 +2306,14 @@ class CpModel: """ return cmh.CpSatHelper.write_model_to_file(self.__model, file) + def remove_all_names(self) -> None: + """Removes all names from the model.""" + self.__model.ClearField("name") + for v in self.__model.variables: + v.ClearField("name") + for c in self.__model.constraints: + c.ClearField("name") + @overload def add_hint(self, var: IntVar, value: int) -> None: ... diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index c2aae7fd4c..ef40d8161e 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -503,8 +503,10 @@ PYBIND11_MODULE(cp_model_helper, m) { py::class_(m, "ResponseWrapper") .def("best_objective_bound", &ResponseWrapper::BestObjectiveBound) - .def("boolean_value", &ResponseWrapper::BooleanValue, py::arg("lit")) - .def("boolean_value", &ResponseWrapper::FixedBooleanValue, py::arg("lit")) + .def("boolean_value", &ResponseWrapper::BooleanValue, + py::arg("lit").none(false)) + .def("boolean_value", &ResponseWrapper::FixedBooleanValue, + py::arg("lit").none(false)) .def("deterministic_time", &ResponseWrapper::DeterministicTime) .def("num_binary_propagations", &ResponseWrapper::NumBinaryPropagations) .def("num_booleans", &ResponseWrapper::NumBooleans) @@ -520,10 +522,12 @@ PYBIND11_MODULE(cp_model_helper, m) { .def("sufficient_assumptions_for_infeasibility", &ResponseWrapper::SufficientAssumptionsForInfeasibility) .def("user_time", &ResponseWrapper::UserTime) - .def("float_value", &ResponseWrapper::FloatValue, py::arg("expr")) - .def("float_value", &ResponseWrapper::FixedFloatValue, py::arg("value")) - .def("value", &ResponseWrapper::Value, py::arg("expr")) - .def("value", &ResponseWrapper::FixedValue, py::arg("value")) + .def("float_value", &ResponseWrapper::FloatValue, + py::arg("expr").none(false)) + .def("float_value", &ResponseWrapper::FixedFloatValue, + py::arg("value").none(false)) + .def("value", &ResponseWrapper::Value, py::arg("expr").none(false)) + .def("value", &ResponseWrapper::FixedValue, py::arg("value").none(false)) .def("wall_time", &ResponseWrapper::WallTime); py::class_(m, "SolveWrapper") diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index e8ee7c4695..46cd288225 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -187,6 +187,9 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) self.assertEqual(30.0, response_wrapper.objective_value()) self.assertEqual(30.0, response_wrapper.best_objective_bound()) + self.assertRaises(TypeError, response_wrapper.value, None) + self.assertRaises(TypeError, response_wrapper.float_value, None) + self.assertRaises(TypeError, response_wrapper.boolean_value, None) def test_solution_callback(self): model_string = """ diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 7d16abcf42..bbed30ffa0 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -272,6 +272,18 @@ class CpModelTest(absltest.TestCase): self.assertEqual(10, solver.value(x)) self.assertEqual(-5, solver.value(y)) + def test_none_argument(self) -> None: + model = cp_model.CpModel() + x = model.new_int_var(-10, 10, "x") + y = model.new_int_var(-10, 10, "y") + model.add_linear_constraint(x + 2 * y, 0, 10) + model.minimize(y) + solver = cp_model.CpSolver() + self.assertEqual(cp_model.OPTIMAL, solver.solve(model)) + self.assertRaises(TypeError, solver.value, None) + self.assertRaises(TypeError, solver.float_value, None) + self.assertRaises(TypeError, solver.boolean_value, None) + def test_linear_constraint(self) -> None: model = cp_model.CpModel() model.add_linear_constraint(5, 0, 10) @@ -443,12 +455,19 @@ class CpModelTest(absltest.TestCase): self.assertEqual(-4, model.proto.constraints[2].enforcement_literal[0]) self.assertEqual(2, model.proto.constraints[2].enforcement_literal[1]) - def test_constraint_with_name(self) -> None: + def test_names(self) -> None: model = cp_model.CpModel() + model.name = "test_model" x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") ct = model.add_linear_constraint(x + 2 * y, 0, 10).with_name("test_constraint") + self.assertEqual(model.name, "test_model") + self.assertEqual(x.name, "x") self.assertEqual("test_constraint", ct.name) + model.remove_all_names() + self.assertEmpty(model.name) + self.assertEmpty(x.name) + self.assertEmpty(ct.name) def test_natural_api_minimize(self) -> None: model = cp_model.CpModel() From 7a8e52dde6031657d6b0f3c622dbf84d87e7030d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 15:23:46 +0200 Subject: [PATCH 11/81] make: Enable HiGHS support by default --- makefiles/Makefile.cpp.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefiles/Makefile.cpp.mk b/makefiles/Makefile.cpp.mk index 3d3e656ba0..d26ede715f 100644 --- a/makefiles/Makefile.cpp.mk +++ b/makefiles/Makefile.cpp.mk @@ -35,7 +35,7 @@ endif BUILD_TYPE ?= Release USE_COINOR ?= ON USE_GLPK ?= OFF -USE_HIGHS ?= OFF +USE_HIGHS ?= ON USE_PDLP := ON # OFF not supported USE_SCIP ?= ON USE_CPLEX ?= OFF From 97dfc039af58f02a356a09f7103a5658589f0543 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 10:33:02 +0200 Subject: [PATCH 12/81] tools/release: Detect /Users path in libortools.dylib (#4674) --- tools/release/build_delivery_macos.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 31aa8d1d47..c3052fb932 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -94,6 +94,9 @@ function build_dotnet() { echo -n "Build .Net..." | tee -a build.log cmake -S. -Btemp_dotnet -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_DOTNET=ON cmake --build temp_dotnet -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L temp_dotnet/lib/libortools.dylib | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_dotnet --target test #echo "cmake test: DONE" | tee -a build.log @@ -181,6 +184,9 @@ function build_java() { cmake -S. -Btemp_java -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF \ -DBUILD_JAVA=ON -DSKIP_GPG=OFF ${GPG_EXTRA} cmake --build temp_java -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L temp_java/lib/libortools.dylib | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_java --target test #echo "cmake test: DONE" | tee -a build.log @@ -272,6 +278,9 @@ function build_python() { echo -n "Build Python ${PY_VERSION}..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" cmake --build "temp_python${PY_VERSION}" -j8 -v + echo " Check libortools.dylib..." | tee -a build.log + otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_python${PY_VERSION} --target test #echo "cmake test_python${PY_VERSION}: DONE" | tee -a build.log @@ -321,14 +330,23 @@ function build_archive() { echo -n "Make cpp archive..." | tee -a build.log make archive_cpp + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log echo -n "Make dotnet archive..." | tee -a build.log make archive_dotnet + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log echo -n "Make java archive..." | tee -a build.log make archive_java + echo " Check libortools.dylib..." | tee -a build.log + otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" + echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log # move archive to export @@ -372,6 +390,7 @@ function reset() { cd "${ROOT_DIR}" || exit 2 make clean + rm -rf temp_cpp rm -rf temp_dotnet rm -rf temp_java rm -rf temp_python* From 5587f01a23ad710dd588ff0986062e00806d361a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 11 Jun 2025 11:16:43 +0200 Subject: [PATCH 13/81] dependencies: Fix MACOSX_RPATH usage (#4674) 1. This is a boolean property which must be set to TRUE or FALSE If TRUE, the default, cmake will use @rpath as directory portion (aka prefix) of the install_name (otool LC_ID_DYLIB) note: CMP0042 set it to TRUE by default 2. To change this prefix you must use INSTALL_NAME_DIR 3. To change the INSTALL_RPATH (otool LC_RPATH) (e.g. to set it to @loader_path) you must use the INSTALL_RPATH property. ref: https://cmake.org/cmake/help/latest/variable/CMAKE_MACOSX_RPATH.html https://cmake.org/cmake/help/latest/prop_tgt/MACOSX_RPATH.html https://cmake.org/cmake/help/latest/prop_tgt/INSTALL_RPATH.html https://cmake.org/cmake/help/latest/policy/CMP0042.html --- cmake/dependencies/CMakeLists.txt | 2 + patches/highs-v1.11.0.patch | 276 ++++++++++++++++++++++++++++++ patches/scip-v922.patch | 42 ++++- patches/soplex-v7.1.3.patch | 28 ++- 4 files changed, 338 insertions(+), 10 deletions(-) create mode 100644 patches/highs-v1.11.0.patch diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 2b461eacd3..926e51c047 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -287,6 +287,8 @@ if(BUILD_HIGHS) GIT_REPOSITORY "https://github.com/ERGO-Code/HiGHS.git" GIT_TAG "v1.11.0" GIT_SHALLOW TRUE + PATCH_COMMAND git apply --ignore-whitespace + "${CMAKE_CURRENT_LIST_DIR}/../../patches/highs-v1.11.0.patch" ) FetchContent_MakeAvailable(highs) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/patches/highs-v1.11.0.patch b/patches/highs-v1.11.0.patch new file mode 100644 index 0000000000..ce02101076 --- /dev/null +++ b/patches/highs-v1.11.0.patch @@ -0,0 +1,276 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 661aa078..2606e08d 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -57,7 +57,7 @@ endif() + # message("CMAKE_CXX_COMPILER_ID is ${CMAKE_CXX_COMPILER_ID}") + if (CMAKE_CXX_COMPILER_ID STREQUAL "IntelLLVM") + message(STATUS "Compiler is IntelLLVM") +- if (CMAKE_HOST_WIN32 AND CMAKE_VERSION VERSION_LESS "3.23.0") ++ if (CMAKE_HOST_WIN32 AND CMAKE_VERSION VERSION_LESS "3.23.0") + message(FATAL_ERROR "Need at least CMake 3.23 for IntelLLVM support of IntelDPCPP package on Windows") + elseif(CMAKE_VERSION VERSION_LESS "3.23.0") + message(FATAL_ERROR "CMake 3.20.5 is the minimum recommended for IntelLLVM on Linux") +@@ -121,9 +121,9 @@ endif() + + option(HIGHS_COVERAGE "Activate the code coverage compilation" OFF) + +-# Address | Thread | Leak ++# Address | Thread | Leak + # Linux atm +-# Only Debug is theted atm ++# Only Debug is theted atm + # See below for RelWithDeb info, todo test wip + set(DEBUG_MEMORY "Off" CACHE STRING "Sanitizers") + +@@ -137,7 +137,7 @@ message(STATUS "Build pdlp with GPU: ${CUPDLP_GPU}") + option(CUPDLP_FIND_CUDA "Build pdlp with GPU" OFF) + message(STATUS "Use FindCUDAConf: ${CUPDLP_FIND_CUDA}") + +-if(CUPDLP_GPU AND CMAKE_VERSION VERSION_LESS "3.25.0") ++if(CUPDLP_GPU AND CMAKE_VERSION VERSION_LESS "3.25.0") + message("CUPDLP FindCUDAConf requires CMake version minumum 3.24. Please use a higher version of CMake.") + endif() + +@@ -158,11 +158,11 @@ if (CUPDLP_GPU) + # With FindCUDAConf.cmake + # Need to have the CUDA_HOME environment variable set. + include(FindCUDAConf) +- else() ++ else() + # Without FindCUDAConf.cmake + enable_language(CUDA) + find_package(CUDAToolkit REQUIRED) +- ++ + set(CUDA_LIBRARY-NOTFOUND, OFF) + set(CUDA_LIBRARY CUDA::cudart CUDA::cublas CUDA::cusparse) + endif() +@@ -205,7 +205,7 @@ if (BUILD_CXX) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) + set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_BINDIR}) +- # for multi-config build system (e.g. xcode) ++ # for multi-config build system (e.g. xcode) + foreach(OutputConfig IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER ${OutputConfig} OUTPUTCONFIG) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_${OUTPUTCONFIG} ${CMAKE_BINARY_DIR}/${OutputConfig}/${CMAKE_INSTALL_LIBDIR}) +@@ -244,14 +244,14 @@ if (BUILD_CXX) + option(STDCALL "Build highs with the __stdcall convention" OFF) + endif() + +- if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR +- CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR +- CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") ++ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR ++ CMAKE_CXX_COMPILER_ID STREQUAL "Clang" OR ++ CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +- # elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") ++ # elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # not recognised by cl +- # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++11") +- endif() ++ # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++11") ++ endif() + + # Basic type + include(CMakePushCheckState) +@@ -275,7 +275,7 @@ if (BUILD_CXX) + check_type_size("int *" SIZEOF_INT_P LANGUAGE CXX) + message(STATUS "Found int * size: ${SIZEOF_INT_P}") + cmake_pop_check_state() +- ++ + # Use current CMAKE_C_FLAGS and CMAKE_CXX_FLAGS when checking for IPO support, + # instead of defaults: https://cmake.org/cmake/help/latest/policy/CMP0138.html + if(MSVC AND BUILD_SHARED_LIBS) +@@ -293,7 +293,7 @@ if (BUILD_CXX) + set(ipo_supported NO) + message(STATUS "IPO / LTO not currently supported building HiGHS on MinGW") + else() +- if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") ++ if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + cmake_policy(SET CMP0138 NEW) + endif() + +@@ -371,19 +371,8 @@ else() + HIGHS_HAVE_BUILTIN_CLZ) + endif() + +-set(CMAKE_MACOSX_RPATH ON) +- +-if (BUILD_DOTNET) +- set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) +-else() +- # use, i.e. don't skip the full RPATH for the build tree +- set(CMAKE_SKIP_BUILD_RPATH FALSE) +- +- # when building, don't use the install RPATH already +- # (but later on when installing) +- set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) +- set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) +-endif() ++# set the correct rpath for OS X ++set(CMAKE_MACOSX_RPATH TRUE) + + if(NOT FAST_BUILD) + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${HIGHS_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) +@@ -428,7 +417,7 @@ endif() + + # For debug of cuda locally + +-# does not work with older CMake ++# does not work with older CMake + # add_compile_options("$<$,$>:-G>") + + # add_compile_options("$<$:-G>") +@@ -453,7 +442,7 @@ if(MSVC) + add_compile_options("$<$:-D_CRT_SECURE_NO_WARNINGS>") + add_compile_options("$<$:/MP>") + +- # Try to split large pdb files into objects. ++ # Try to split large pdb files into objects. + # https://github.com/tensorflow/tensorflow/issues/31610 + # add_compile_options("/Z7") + # add_link_options("/DEBUG:FASTLINK") +@@ -611,11 +600,11 @@ if(FAST_BUILD AND HIGHS_COVERAGE) + message(STATUS "Building in coverage mode") + + # Enable coverage flags +- add_compile_options(-O0) +- add_compile_options(--coverage) +- add_compile_options(-fprofile-update=atomic) ++ add_compile_options(-O0) ++ add_compile_options(--coverage) ++ add_compile_options(-fprofile-update=atomic) + +- add_link_options(-O0) ++ add_link_options(-O0) + add_link_options(--coverage) # Ensure coverage data is linked correctly + + find_program(GCOV_PATH gcov) +diff --git a/highs/CMakeLists.txt b/highs/CMakeLists.txt +index 50301433..f7b982fb 100644 +--- a/highs/CMakeLists.txt ++++ b/highs/CMakeLists.txt +@@ -1,7 +1,7 @@ + if (NOT BUILD_CXX) + return() + endif() +- ++ + # Define library. + include(sources) + set(sources ${highs_sources} ${cupdlp_sources} ${ipx_sources} ${basiclu_sources}) +@@ -43,7 +43,7 @@ if(NOT FAST_BUILD) + set_target_properties(libhighs PROPERTIES + OUTPUT_NAME "highs" + PDB_NAME "libhighs" +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ ) + + if(ZLIB AND ZLIB_FOUND) + target_link_libraries(libhighs ZLIB::ZLIB) +@@ -51,8 +51,11 @@ if(NOT FAST_BUILD) + endif() + + # set the install rpath to the installed destination +- set_target_properties(libhighs PROPERTIES INSTALL_RPATH +- "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ if(APPLE) ++ set_target_properties(libhighs PROPERTIES INSTALL_RPATH "@loader_path") ++ elseif (UNIX) ++ set_target_properties(libhighs PROPERTIES INSTALL_RPATH "$ORIGIN") ++ endif() + + # install the header files of highs + foreach(file ${headers}) +@@ -84,7 +87,7 @@ if(NOT FAST_BUILD) + # target_compile_options(libipx PRIVATE "-Wno-sign-compare") + # target_compile_options(libipx PRIVATE "-Wno-logical-op-parentheses") + endif() +- ++ + install(TARGETS libhighs EXPORT highs-targets + LIBRARY + ARCHIVE +@@ -150,8 +153,8 @@ else() + + + target_sources(highs PRIVATE ${sources} ${headers} ${win_version_file}) +- +- # Optional Cuda ++ ++ # Optional Cuda + if (CUPDLP_GPU) + + target_include_directories(highs PUBLIC "$") +@@ -164,7 +167,7 @@ else() + else() + target_link_libraries(highs cudalin ${CUDA_LIBRARY} m) + endif() +- ++ + set_target_properties(highs PROPERTIES CUDA_SEPARABLE_COMPILATION ON) + + endif() +@@ -221,7 +224,7 @@ else() + ) + target_link_libraries(highs ZLIB::ZLIB) + endif() +- ++ + # install the header files of highs + foreach(file ${headers}) + get_filename_component(dir ${file} DIRECTORY) +@@ -236,9 +239,9 @@ else() + + # target_compile_options(highs PRIVATE "-Wall") + # target_compile_options(highs PRIVATE "-Wunused") +- ++ + if (UNIX) +- if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") ++ if ( CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + target_compile_options(highs PRIVATE "-Wall") + target_compile_options(highs PRIVATE "-Wreturn-type") + target_compile_options(highs PRIVATE "-Wmissing-declarations") +@@ -248,7 +251,7 @@ else() + target_compile_options(highs PRIVATE "-Wno-comment") + target_compile_options(highs PRIVATE "-Wno-unused-label") + +- if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") ++ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + target_compile_options(highs PRIVATE "-Wno-unused-lambda-capture") + endif() + +@@ -267,7 +270,7 @@ else() + endif() + + if (BUILD_DOTNET) +- ++ + # see: https://docs.microsoft.com/en-us/dotnet/core/rid-catalog + if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)") + set(DOTNET_PLATFORM arm64) +@@ -298,8 +301,8 @@ else() + set(TARGET_FILE_NAME "highs.dll") + endif() + +- add_custom_command(TARGET highs POST_BUILD +- COMMAND "${CMAKE_COMMAND}" -E copy ++ add_custom_command(TARGET highs POST_BUILD ++ COMMAND "${CMAKE_COMMAND}" -E copy + "$" + ${DOTNET_PROJECT_DIR}/runtimes/${DOTNET_RID}/native/${TARGET_FILE_NAME} + COMMENT "Copying to output directory") +@@ -318,7 +321,7 @@ if(FORTRAN_FOUND) + target_link_libraries(FortranHighs PUBLIC highs) + endif() + +- install(TARGETS FortranHighs ++ install(TARGETS FortranHighs + LIBRARY + ARCHIVE + RUNTIME diff --git a/patches/scip-v922.patch b/patches/scip-v922.patch index 7a92254118..b1093c9e34 100644 --- a/patches/scip-v922.patch +++ b/patches/scip-v922.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 8492dc75..4c12a9bf 100644 +index 38ac7845..9b0d4fcb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,9 +38,11 @@ set(CPACK_PACKAGE_VENDOR "Zuse Institute Berlin") @@ -17,6 +17,15 @@ index 8492dc75..4c12a9bf 100644 if(SCIPOptSuite_BINARY_DIR) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${SCIPOptSuite_BINARY_DIR}/bin) +@@ -239,7 +241,7 @@ if(DEBUGSOL) + endif() + + #set the correct rpath for OS X +-set(CMAKE_MACOSX_RPATH ON) ++set(CMAKE_MACOSX_RPATH TRUE) + + #set defines for Windows + if(WIN32) @@ -412,22 +414,11 @@ endif() #search the selected LP solver library message(STATUS "Finding Solver \"${LPS}\"") @@ -96,10 +105,35 @@ index 559552f9..682ac40a 100644 set(SCIP_INCLUDE_DIRS "@CONF_INCLUDE_DIRS@") set(SCIP_FOUND TRUE) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index e6fda2d5..2d04b845 100644 +index d6dd3acf..a146ddec 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt -@@ -1149,17 +1149,8 @@ install(TARGETS scip libscip EXPORT scip-targets +@@ -5,8 +5,8 @@ include(GNUInstallDirs) + + function(setLibProperties targetname outputname) + set_target_properties(${targetname} PROPERTIES +- OUTPUT_NAME ${outputname} +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ OUTPUT_NAME ${outputname} ++ ) + endfunction(setLibProperties) + + set(CMAKE_C_STANDARD 99) +@@ -1112,6 +1112,13 @@ target_link_libraries(scip + add_dependencies(libscip scip_update_githash) + add_dependencies(scip scip_update_githash) + ++if(APPLE) ++ set_target_properties(libscip PROPERTIES ++ INSTALL_RPATH "@loader_path") ++elseif(UNIX) ++ set_target_properties(libscip PROPERTIES ++ INSTALL_RPATH "$ORIGIN") ++endif() + set_target_properties(libscip PROPERTIES + VERSION ${SCIP_VERSION_MAJOR}.${SCIP_VERSION_MINOR}.${SCIP_VERSION_PATCH}.${SCIP_VERSION_SUB} + SOVERSION ${SCIP_VERSION_MAJOR}.${SCIP_VERSION_MINOR} +@@ -1150,17 +1157,8 @@ install(TARGETS scip libscip EXPORT scip-targets INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) # Add all targets to the build-tree export set @@ -119,7 +153,7 @@ index e6fda2d5..2d04b845 100644 # configure the config file for the build tree set(CONF_INCLUDE_DIRS "${PROJECT_SOURCE_DIR}/src" "${PROJECT_BINARY_DIR}") -@@ -1175,18 +1166,16 @@ ${PROJECT_BINARY_DIR}/scip-config-version.cmake +@@ -1176,18 +1174,16 @@ ${PROJECT_BINARY_DIR}/scip-config-version.cmake #configure the config file for the install set(CONF_INCLUDE_DIRS "\${CMAKE_CURRENT_LIST_DIR}/../../../include") diff --git a/patches/soplex-v7.1.3.patch b/patches/soplex-v7.1.3.patch index 06b629ec98..2df6a36841 100644 --- a/patches/soplex-v7.1.3.patch +++ b/patches/soplex-v7.1.3.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index 0b21f5a..ddf1536 100644 +index 0b21f5a..6f08341 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,6 +27,10 @@ set(CPACK_PACKAGE_VERSION_PATCH "${SOPLEX_VERSION_PATCH}") @@ -34,7 +34,12 @@ index 0b21f5a..ddf1536 100644 # for colorized output if(NOT WIN32) -@@ -69,6 +79,8 @@ set(CMAKE_MACOSX_RPATH ON) +@@ -65,10 +75,12 @@ if(NOT CMAKE_BUILD_TYPE) + endif() + + # set the correct rpath for OS X +-set(CMAKE_MACOSX_RPATH ON) ++set(CMAKE_MACOSX_RPATH TRUE) # use C++14 standard set(CMAKE_CXX_STANDARD 14) @@ -131,9 +136,20 @@ index 0b21f5a..ddf1536 100644 + add_subdirectory(check) +endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt -index 84ec5a5..6f5d4ef 100644 +index 84ec5a5..4552300 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt +@@ -3,8 +3,8 @@ + # + function(setLibProperties targetname outputname) + set_target_properties(${targetname} PROPERTIES +- OUTPUT_NAME ${outputname} +- MACOSX_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") ++ OUTPUT_NAME ${outputname} ++ ) + endfunction(setLibProperties) + + include(GNUInstallDirs) @@ -193,24 +193,28 @@ target_link_libraries(libsoplexshared libsoplex ${libs}) set_target_properties(libsoplexshared PROPERTIES CXX_VISIBILITY_PRESET default) @@ -143,11 +159,11 @@ index 84ec5a5..6f5d4ef 100644 +if(SOPLEX_SOPLEX) + add_executable(soplex EXCLUDE_FROM_ALL soplexmain.cpp) + target_link_libraries(soplex PRIVATE libsoplex ${Boost_LIBRARIES}) - --if(EMSCRIPTEN AND EMSCRIPTEN_HTML) ++ + # set the install rpath to the installed destination + set_target_properties(soplex PROPERTIES INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") -+ + +-if(EMSCRIPTEN AND EMSCRIPTEN_HTML) + if(EMSCRIPTEN AND EMSCRIPTEN_HTML) set_target_properties(soplex PROPERTIES LINK_DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/soplex_webdemo_shell.html) set(CMAKE_EXECUTABLE_SUFFIX ".html") From 9748fe87a8bfc9b27f2cd27fbeec66b615915e60 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 12 Jun 2025 10:02:27 +0200 Subject: [PATCH 14/81] tools/release: cleanup usage --- tools/release/build_delivery_linux.sh | 1 + tools/release/build_delivery_macos.sh | 1 + tools/release/build_delivery_manylinux_amd64.sh | 1 + tools/release/build_delivery_manylinux_arm64.sh | 1 + tools/release/build_delivery_win.cmd | 2 +- 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/release/build_delivery_linux.sh b/tools/release/build_delivery_linux.sh index 1a9431662a..668cc04649 100755 --- a/tools/release/build_delivery_linux.sh +++ b/tools/release/build_delivery_linux.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index c3052fb932..79498d58cd 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_manylinux_amd64.sh b/tools/release/build_delivery_manylinux_amd64.sh index ab0a580856..892745755e 100755 --- a/tools/release/build_delivery_manylinux_amd64.sh +++ b/tools/release/build_delivery_manylinux_amd64.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_manylinux_arm64.sh b/tools/release/build_delivery_manylinux_arm64.sh index d87d4b7bdf..81961b19d4 100755 --- a/tools/release/build_delivery_manylinux_arm64.sh +++ b/tools/release/build_delivery_manylinux_arm64.sh @@ -30,6 +30,7 @@ ${BOLD}DESCRIPTION${RESET} ${BOLD}OPTIONS${RESET} \t-h --help: display this help text +\tarchive: build all (C++, .Net, Java) archives \tdotnet: build all .Net packages \tjava: build all Java packages \tpython: build all Pyhon packages diff --git a/tools/release/build_delivery_win.cmd b/tools/release/build_delivery_win.cmd index d50a83f831..68a0665244 100644 --- a/tools/release/build_delivery_win.cmd +++ b/tools/release/build_delivery_win.cmd @@ -93,7 +93,7 @@ echo help: show this help text (default) echo dotnet: Build dotnet packages echo java: Build java packages echo python: Build python packages -echo archive: Build archive +echo archive: Build all (C++, .Net, Java) archives echo examples: Build examples archives echo all: build everything echo reset: delete all artifacts and suppress cache file From 105835ab9527dcfd83ce353ecdead63728f9fe2e Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:11:12 +0200 Subject: [PATCH 15/81] [CP-SAT] improve python layer for += and -= operators --- ortools/sat/python/cp_model_helper.cc | 60 +++++++++++++++++++++++++++ ortools/sat/python/cp_model_test.py | 14 +++++++ ortools/sat/python/linear_expr.cc | 5 +-- 3 files changed, 76 insertions(+), 3 deletions(-) diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index ef40d8161e..371a87b7a2 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -960,6 +960,36 @@ PYBIND11_MODULE(cp_model_helper, m) { }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) + .def( + "__iadd__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, Add)) + .def( + "__iadd__", + [](py::object self, int64_t cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddIntInPlace(cst); + return expr; + }, + DOC(operations_research, sat, python, LinearExpr, AddInt)) + .def( + "__iadd__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(cst); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, AddFloat)) .def( "__sub__", [](py::object self, @@ -1003,6 +1033,36 @@ PYBIND11_MODULE(cp_model_helper, m) { }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) + .def( + "__isub__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other->MulInt(-1)); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, Sub)) + .def( + "__isub__", + [](py::object self, int64_t cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddIntInPlace(-cst); + return expr; + }, + DOC(operations_research, sat, python, LinearExpr, SubInt)) + .def( + "__isub__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(-cst); + return expr; + }, + py::arg("other").none(false), + DOC(operations_research, sat, python, LinearExpr, SubFloat)) .def_property_readonly("num_exprs", &SumArray::num_exprs) .def_property_readonly("int_offset", &SumArray::int_offset) .def_property_readonly("double_offset", &SumArray::double_offset); diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index bbed30ffa0..2add07de02 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2447,6 +2447,20 @@ TRFM""" x = [model.new_int_var(0, 10, f"x{i}") for i in range(100000)] model.add(sum(x) == 10) + def test_large_iadd(self): + model = cp_model.CpModel() + s = 0 + for _ in range(300000): + s += model.new_bool_var("") + model.add(s == 10) + + def test_large_isub(self): + model = cp_model.CpModel() + s = 0 + for _ in range(300000): + s -= model.new_bool_var("") + model.add(s == 10) + def test_simplification1(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index b158fb9343..42077ef46f 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -89,9 +89,8 @@ std::shared_ptr LinearExpr::AddFloat(double cst) { std::shared_ptr LinearExpr::Sub(std::shared_ptr other) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(other); - const std::vector coeffs = {1, -1}; - return std::make_shared(exprs, coeffs, 0); + exprs.push_back(other->MulInt(-1)); + return std::make_shared(exprs); } std::shared_ptr LinearExpr::SubInt(int64_t cst) { From 6e24da8ea4020a1b9776af20506e6cdcba6952bd Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:18:26 +0200 Subject: [PATCH 16/81] fix --- ortools/sat/python/cp_model_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 2add07de02..47f32d28dd 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), "(-(2 * z) + (x - y))") + self.assertEqual(str(expr), '(x + (-y) + (-(2 * z)))') def test_equality_overload(self) -> None: model = cp_model.CpModel() From 803c4269e2b20cd7e587b5c35446afed78fba012 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 11:52:00 +0200 Subject: [PATCH 17/81] improve python exprs --- .../python/model_builder_helper.cc | 39 +++++++++++++++++++ .../python/model_builder_test.py | 14 +++++++ .../wrappers/model_builder_helper.cc | 5 +-- ortools/sat/python/cp_model_helper_test.py | 6 +-- ortools/sat/python/cp_model_test.py | 2 +- 5 files changed, 59 insertions(+), 7 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index f085b0fe6a..48f9df1dd0 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -460,6 +460,26 @@ PYBIND11_MODULE(model_builder_helper, m) { return expr->AddFloat(cst); }, py::arg("cst"), "Returns `self` + `cst`.") + .def( + "__iadd__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other); + return expr; + }, + py::arg("other").none(false), + "Returns the sum of `self` and `other`.") + .def( + "__iadd__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(cst); + return expr; + }, + py::arg("cst"), "Returns `self` + `cst`.") .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), "Returns `self` + `other`.") .def( @@ -502,6 +522,25 @@ PYBIND11_MODULE(model_builder_helper, m) { return expr->SubFloat(cst); }, py::arg("cst"), "Returns `self` - `cst`.") + .def( + "__isub__", + [](py::object self, + std::shared_ptr other) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddInPlace(other->Neg()); + return expr; + }, + py::arg("other").none(false), "Returns `self` - `other`.") + .def( + "__isub__", + [](py::object self, double cst) -> std::shared_ptr { + std::shared_ptr expr = + self.cast>(); + expr->AddFloatInPlace(-cst); + return expr; + }, + py::arg("cst"), "Returns `self` - `cst`.") .def_property_readonly( "num_exprs", &SumArray::num_exprs, "Returns the number of linear expressions in the sum.") diff --git a/ortools/linear_solver/python/model_builder_test.py b/ortools/linear_solver/python/model_builder_test.py index 475289c4a2..78a3406755 100644 --- a/ortools/linear_solver/python/model_builder_test.py +++ b/ortools/linear_solver/python/model_builder_test.py @@ -364,6 +364,20 @@ ENDATA c5 = x - y == 3 self.assertEqual(str(c5), "(x - y) == 3") + def test_large_iadd(self): + model = mb.Model() + s = 0 + for _ in range(300000): + s += model.new_bool_var("") + model.add(s == 10) + + def test_large_isub(self): + model = mb.Model() + s = 0 + for _ in range(300000): + s -= model.new_bool_var("") + model.add(s == 10) + def test_variables(self): model = mb.Model() x = model.new_int_var(0.0, 4.0, "x") diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index def8d92952..611b07058c 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -816,9 +816,8 @@ std::shared_ptr LinearExpr::AddFloat(double cst) { std::shared_ptr LinearExpr::Sub(std::shared_ptr expr) { std::vector> exprs; exprs.push_back(shared_from_this()); - exprs.push_back(expr); - std::vector coeffs = {1.0, -1.0}; - return std::make_shared(exprs, coeffs, 0.0); + exprs.push_back(expr->MulFloat(-1.0)); + return std::make_shared(exprs, 0.0); } std::shared_ptr LinearExpr::SubFloat(double cst) { diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index 46cd288225..b0dd988597 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -303,7 +303,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e5), "(x - 1)") e6 = x - 2 * y self.assertTrue(e6.is_integer()) - self.assertEqual(str(e6), "(x - (2 * y))") + self.assertEqual(str(e6), "(x + (-(2 * y)))") z = TestIntVar(2, "z", True) e7 = -z self.assertTrue(e7.is_integer()) @@ -323,7 +323,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") e12 = x - y - 2 * z - self.assertEqual(str(e12), "(-(2 * z) + (x - y))") + self.assertEqual(str(e12), "(x + (-y) + (-(2 * z)))") def test_float_lin_expr(self): x = TestIntVar(0, "x") @@ -351,7 +351,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e6), "(x + (2.4 * y))") e7 = x - 2.4 * y self.assertFalse(e7.is_integer()) - self.assertEqual(str(e7), "(x - (2.4 * y))") + self.assertEqual(str(e7), "(x + (-(2.4 * y)))") z = TestIntVar(2, "z") e8 = cmh.LinearExpr.sum([x, y, z, -2]) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index 47f32d28dd..ce36281958 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), '(x + (-y) + (-(2 * z)))') + self.assertEqual(str(expr), "(x + (-y) + (-(2 * z)))") def test_equality_overload(self) -> None: model = cp_model.CpModel() From aa12c651b629d69128dab06ec37d8d4a94743e80 Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Thu, 12 Jun 2025 14:04:51 +0200 Subject: [PATCH 18/81] improve python exprs --- .../python/model_builder_helper.cc | 19 ----------- .../wrappers/model_builder_helper.h | 20 +++++------ ortools/sat/python/cp_model_helper.cc | 34 ------------------- ortools/sat/python/cp_model_helper_test.py | 4 +-- ortools/sat/python/cp_model_test.py | 2 +- ortools/sat/python/linear_expr.h | 20 +++++------ 6 files changed, 23 insertions(+), 76 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index 48f9df1dd0..9a84fbc736 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -550,25 +550,6 @@ PYBIND11_MODULE(model_builder_helper, m) { py::class_, LinearExpr>(m, "AffineExpr") .def(py::init, double, double>()) - .def("__add__", &AffineExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") - .def("__add__", &AffineExpr::AddFloat, py::arg("cst"), - "Returns `self` + `cst`.") - .def("__radd__", &AffineExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") - .def("__radd__", &AffineExpr::AddFloat, py::arg("cst"), - "Returns `self` + `cst`.") - .def("__sub__", &AffineExpr::Sub, py::arg("other").none(false), - "Returns `self` - `other`.") - .def("__sub__", &AffineExpr::SubFloat, py::arg("cst"), - "Returns `self` - `cst`.") - .def("__rsub__", &AffineExpr::RSubFloat, py::arg("cst"), - "Returns `cst` - `self`.") - .def("__mul__", &AffineExpr::MulFloat, py::arg("cst"), - "Returns `self` * `cst`.") - .def("__rmul__", &AffineExpr::MulFloat, py::arg("cst"), - "Returns `self` * `cst`.") - .def("__neg__", &AffineExpr::Neg, "Returns -`self`.") .def_property_readonly("expression", &AffineExpr ::expression) .def_property_readonly("coefficient", &AffineExpr::coefficient) .def_property_readonly("offset", &AffineExpr::offset); diff --git a/ortools/linear_solver/wrappers/model_builder_helper.h b/ortools/linear_solver/wrappers/model_builder_helper.h index 7c6e4f026f..cfd3de5e0e 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.h +++ b/ortools/linear_solver/wrappers/model_builder_helper.h @@ -63,12 +63,12 @@ class LinearExpr : public std::enable_shared_from_this { static std::shared_ptr Constant(double value); std::shared_ptr Add(std::shared_ptr expr); - std::shared_ptr AddFloat(double cst); + virtual std::shared_ptr AddFloat(double cst); std::shared_ptr Sub(std::shared_ptr expr); - std::shared_ptr SubFloat(double cst); - std::shared_ptr RSubFloat(double cst); - std::shared_ptr MulFloat(double cst); - std::shared_ptr Neg(); + virtual std::shared_ptr SubFloat(double cst); + virtual std::shared_ptr RSubFloat(double cst); + virtual std::shared_ptr MulFloat(double cst); + virtual std::shared_ptr Neg(); std::shared_ptr Eq(std::shared_ptr rhs); std::shared_ptr EqCst(double rhs); @@ -243,11 +243,11 @@ class AffineExpr : public LinearExpr { double coefficient() const { return coeff_; } double offset() const { return offset_; } - std::shared_ptr AddFloat(double cst); - std::shared_ptr SubFloat(double cst); - std::shared_ptr RSubFloat(double cst); - std::shared_ptr MulFloat(double cst); - std::shared_ptr Neg(); + std::shared_ptr AddFloat(double cst) override; + std::shared_ptr SubFloat(double cst) override; + std::shared_ptr RSubFloat(double cst) override; + std::shared_ptr MulFloat(double cst) override; + std::shared_ptr Neg() override; private: std::shared_ptr expr_; diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 371a87b7a2..87ef120b8f 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -1079,40 +1079,6 @@ PYBIND11_MODULE(cp_model_helper, m) { py::class_, LinearExpr>( m, "IntAffine", DOC(operations_research, sat, python, IntAffine)) .def(py::init, int64_t, int64_t>()) - .def("__add__", &LinearExpr::Add, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Add)) - .def("__add__", &IntAffine::AddInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddInt)) - .def("__add__", &LinearExpr::AddFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddFloat)) - .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Add)) - .def("__radd__", &IntAffine::AddInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddInt)) - .def("__radd__", &LinearExpr::AddFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, AddFloat)) - .def("__sub__", &LinearExpr::Sub, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, Sub)) - .def("__sub__", &IntAffine::SubInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, SubInt)) - .def("__sub__", &LinearExpr::SubFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, SubFloat)) - .def("__rsub__", &LinearExpr::RSub, py::arg("other").none(false), - DOC(operations_research, sat, python, LinearExpr, RSub)) - .def("__rsub__", &IntAffine::RSubInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, RSubInt)) - .def("__rsub__", &LinearExpr::SubFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, RSubFloat)) - .def("__mul__", &IntAffine::MulInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulInt)) - .def("__mul__", &LinearExpr::MulFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulFloat)) - .def("__rmul__", &IntAffine::MulInt, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulInt)) - .def("__rmul__", &LinearExpr::MulFloat, py::arg("cst"), - DOC(operations_research, sat, python, LinearExpr, MulFloat)) - .def("__neg__", &IntAffine::Neg, - DOC(operations_research, sat, python, LinearExpr, Neg)) .def_property_readonly("expression", &IntAffine::expression, "Returns the linear expression.") .def_property_readonly("coefficient", &IntAffine::coefficient, diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index b0dd988597..d5901787a7 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -303,7 +303,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e5), "(x - 1)") e6 = x - 2 * y self.assertTrue(e6.is_integer()) - self.assertEqual(str(e6), "(x + (-(2 * y)))") + self.assertEqual(str(e6), "(x + (-2 * y))") z = TestIntVar(2, "z", True) e7 = -z self.assertTrue(e7.is_integer()) @@ -323,7 +323,7 @@ class CpModelHelperTest(absltest.TestCase): self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") e12 = x - y - 2 * z - self.assertEqual(str(e12), "(x + (-y) + (-(2 * z)))") + self.assertEqual(str(e12), "(x + (-y) + (-2 * z))") def test_float_lin_expr(self): x = TestIntVar(0, "x") diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index ce36281958..aa06c59b2e 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -252,7 +252,7 @@ class CpModelTest(absltest.TestCase): y = model.NewIntVar(0, 2, "y") z = model.NewIntVar(0, 3, "z") expr = x - y - 2 * z - self.assertEqual(str(expr), "(x + (-y) + (-(2 * z)))") + self.assertEqual(str(expr), "(x + (-y) + (-2 * z))") def test_equality_overload(self) -> None: model = cp_model.CpModel() diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index 631f17f05f..ae92d1c676 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -103,27 +103,27 @@ class LinearExpr : public std::enable_shared_from_this { /// Returns (this) + (expr). std::shared_ptr Add(std::shared_ptr other); /// Returns (this) + (cst). - std::shared_ptr AddInt(int64_t cst); + virtual std::shared_ptr AddInt(int64_t cst); /// Returns (this) + (cst). std::shared_ptr AddFloat(double cst); /// Returns (this) - (expr). std::shared_ptr Sub(std::shared_ptr other); /// Returns (this) - (cst). - std::shared_ptr SubInt(int64_t cst); + virtual std::shared_ptr SubInt(int64_t cst); /// Returns (this) - (cst). std::shared_ptr SubFloat(double cst); /// Returns (expr) - (this). std::shared_ptr RSub(std::shared_ptr other); /// Returns (cst) - (this). - std::shared_ptr RSubInt(int64_t cst); + virtual std::shared_ptr RSubInt(int64_t cst); /// Returns (cst) - (this). std::shared_ptr RSubFloat(double cst); /// Returns (this) * (cst). - std::shared_ptr MulInt(int64_t cst); + virtual std::shared_ptr MulInt(int64_t cst); /// Returns (this) * (cst). std::shared_ptr MulFloat(double cst); /// Returns -(this). - std::shared_ptr Neg(); + virtual std::shared_ptr Neg(); /// Returns (this) == (rhs). std::shared_ptr Eq(std::shared_ptr rhs); @@ -381,11 +381,11 @@ class IntAffine : public LinearExpr { /// Returns the offset. int64_t offset() const { return offset_; } - std::shared_ptr AddInt(int64_t cst); - std::shared_ptr SubInt(int64_t cst); - std::shared_ptr RSubInt(int64_t cst); - std::shared_ptr MulInt(int64_t cst); - std::shared_ptr Neg(); + std::shared_ptr AddInt(int64_t cst) override; + std::shared_ptr SubInt(int64_t cst) override; + std::shared_ptr RSubInt(int64_t cst) override; + std::shared_ptr MulInt(int64_t cst) override; + std::shared_ptr Neg() override; private: std::shared_ptr expr_; From d8b555bcd420d0a77ee84eecfa290fc98cfcf4a3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 12 Jun 2025 10:12:57 +0200 Subject: [PATCH 19/81] tools/release:python: Add typing-extensions>=4.12 install --- tools/release/build_delivery_linux.sh | 2 +- tools/release/build_delivery_macos.sh | 2 +- tools/release/build_delivery_win.cmd | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/release/build_delivery_linux.sh b/tools/release/build_delivery_linux.sh index 668cc04649..6c6558f880 100755 --- a/tools/release/build_delivery_linux.sh +++ b/tools/release/build_delivery_linux.sh @@ -209,7 +209,7 @@ function build_python() { command -v python3 | xargs echo "python3: " | tee -a build.log python3 -c "import platform as p; print(p.platform())" | tee -a build.log python3 -m pip install --upgrade --user --break-system-package pip - python3 -m pip install --upgrade --user --break-system-package wheel absl-py mypy mypy-protobuf virtualenv + python3 -m pip install --upgrade --user --break-system-package wheel absl-py mypy mypy-protobuf virtualenv "typing-extensions>=4.12" echo "check protoc-gen-mypy..." command -v protoc-gen-mypy | xargs echo "protoc-gen-mypy: " | tee -a build.log protoc-gen-mypy --version | xargs echo "protoc-gen-mypy version: " | tee -a build.log diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 79498d58cd..0babed86ad 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -242,7 +242,7 @@ function build_python() { command -v "python${PY_VERSION}" | xargs echo "python${PY_VERSION}: " | tee -a build.log "python${PY_VERSION}" -c "import platform as p; print(p.platform())" | tee -a build.log "python${PY_VERSION}" -m pip install --upgrade --user pip - "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv + "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv "typing-extensions>=4.12" echo "check protoc-gen-mypy..." command -v protoc-gen-mypy | xargs echo "protoc-gen-mypy: " | tee -a build.log protoc-gen-mypy --version | xargs echo "protoc-gen-mypy version: " | tee -a build.log diff --git a/tools/release/build_delivery_win.cmd b/tools/release/build_delivery_win.cmd index 68a0665244..940e0ff7bd 100644 --- a/tools/release/build_delivery_win.cmd +++ b/tools/release/build_delivery_win.cmd @@ -284,7 +284,7 @@ FOR %%v IN (9 10 11 12 13) DO ( echo Check python3.%%v... | tee.exe -a build.log which.exe "C:\python3%%v-64\python.exe" || exit 1 echo "C:\python3%%v-64\python.exe: FOUND" | tee.exe -a build.log - C:\python3%%v-64\python.exe -m pip install --upgrade --user absl-py mypy mypy-protobuf protobuf numpy pandas + C:\python3%%v-64\python.exe -m pip install --upgrade --user absl-py mypy mypy-protobuf protobuf numpy pandas "typing-extensions>=4.12" call :subroutine %%v From 0d00ef7cd39175f362670e5ac9d61639ba09723c Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 13 Jun 2025 13:24:14 +0200 Subject: [PATCH 20/81] cleanup expression for cp-sat and model_builder --- .../python/model_builder_helper.cc | 81 ++++++-------- .../wrappers/model_builder_helper.cc | 66 +++++++++++ .../wrappers/model_builder_helper.h | 62 ++--------- ortools/sat/python/cp_model_helper.cc | 103 ++++++------------ ortools/sat/python/cp_model_test.py | 8 ++ ortools/sat/python/linear_expr.cc | 14 ++- ortools/sat/python/linear_expr.h | 6 +- 7 files changed, 161 insertions(+), 179 deletions(-) diff --git a/ortools/linear_solver/python/model_builder_helper.cc b/ortools/linear_solver/python/model_builder_helper.cc index 9a84fbc736..8036828159 100644 --- a/ortools/linear_solver/python/model_builder_helper.cc +++ b/ortools/linear_solver/python/model_builder_helper.cc @@ -439,11 +439,7 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other); - return expr; - } - return expr->Add(other); + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("other").none(false), "Returns the sum of `self` and `other`.") @@ -453,46 +449,43 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("cst"), "Returns `self` + `cst`.") .def( - "__iadd__", + "__radd__", [](py::object self, std::shared_ptr other) -> std::shared_ptr { + const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - expr->AddInPlace(other); - return expr; - }, - py::arg("other").none(false), - "Returns the sum of `self` and `other`.") - .def( - "__iadd__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(cst); - return expr; + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("cst"), "Returns `self` + `cst`.") - .def("__radd__", &LinearExpr::Add, py::arg("other").none(false), - "Returns `self` + `other`.") .def( "__radd__", [](py::object self, double cst) -> std::shared_ptr { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); + }, + py::arg("cst"), "Returns `self` + `cst`.") + .def( + "__iadd__", + [](std::shared_ptr expr, + std::shared_ptr other) -> std::shared_ptr { + return expr->AddInPlace(other); + }, + py::arg("other").none(false), + "Returns the sum of `self` and `other`.") + .def( + "__iadd__", + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(cst); }, py::arg("cst"), "Returns `self` + `cst`.") .def( @@ -502,11 +495,8 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other->Neg()); - return expr; - } - return expr->Sub(other); + return (num_uses == 4) ? expr->AddInPlace(other->Neg()) + : expr->Sub(other); }, py::arg("other").none(false), "Returns `self` - `other`.") .def( @@ -515,30 +505,23 @@ PYBIND11_MODULE(model_builder_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(-cst); - return expr; - } - return expr->SubFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(-cst) + : expr->SubFloat(cst); }, py::arg("cst"), "Returns `self` - `cst`.") .def( "__isub__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); expr->AddInPlace(other->Neg()); - return expr; + return expr->AddInPlace(other->Neg()); }, py::arg("other").none(false), "Returns `self` - `other`.") .def( "__isub__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(-cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(-cst); }, py::arg("cst"), "Returns `self` - `cst`.") .def_property_readonly( diff --git a/ortools/linear_solver/wrappers/model_builder_helper.cc b/ortools/linear_solver/wrappers/model_builder_helper.cc index 611b07058c..bc5bcdf5eb 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.cc +++ b/ortools/linear_solver/wrappers/model_builder_helper.cc @@ -988,6 +988,72 @@ std::string FlatExpr::DebugString() const { return s; } +SumArray::SumArray(std::vector> exprs, + double offset) + : exprs_(std::move(exprs)), offset_(offset) {} + +void SumArray::Visit(ExprVisitor& lin, double c) { + for (int i = 0; i < exprs_.size(); ++i) { + lin.AddToProcess(exprs_[i], c); + } + if (offset_ != 0.0) { + lin.AddConstant(offset_ * c); + } +} + +std::string SumArray::ToString() const { + if (exprs_.empty()) { + if (offset_ != 0.0) { + return absl::StrCat(offset_); + } + } + std::string s = "("; + for (int i = 0; i < exprs_.size(); ++i) { + if (i > 0) { + absl::StrAppend(&s, " + "); + } + absl::StrAppend(&s, exprs_[i]->ToString()); + } + if (offset_ != 0.0) { + if (offset_ > 0.0) { + absl::StrAppend(&s, " + ", offset_); + } else { + absl::StrAppend(&s, " - ", -offset_); + } + } + absl::StrAppend(&s, ")"); + return s; +} + +std::string SumArray::DebugString() const { + std::string s = absl::StrCat( + "SumArray(", + absl::StrJoin(exprs_, ", ", + [](std::string* out, std::shared_ptr expr) { + absl::StrAppend(out, expr->DebugString()); + })); + if (offset_ != 0.0) { + absl::StrAppend(&s, ", offset=", offset_); + } + absl::StrAppend(&s, ")"); + return s; +} + +std::shared_ptr SumArray::AddInPlace( + std::shared_ptr expr) { + exprs_.push_back(std::move(expr)); + return shared_from_this(); +} + +std::shared_ptr SumArray::AddFloatInPlace(double cst) { + offset_ += cst; + return shared_from_this(); +} + +int SumArray::num_exprs() const { return exprs_.size(); } + +double SumArray::offset() const { return offset_; } + void FixedValue::Visit(ExprVisitor& lin, double c) { lin.AddConstant(value_ * c); } diff --git a/ortools/linear_solver/wrappers/model_builder_helper.h b/ortools/linear_solver/wrappers/model_builder_helper.h index cfd3de5e0e..4f2371da2c 100644 --- a/ortools/linear_solver/wrappers/model_builder_helper.h +++ b/ortools/linear_solver/wrappers/model_builder_helper.h @@ -26,8 +26,6 @@ #include "absl/container/btree_map.h" #include "absl/container/fixed_array.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_join.h" #include "ortools/linear_solver/linear_solver.pb.h" #include "ortools/linear_solver/model_exporter.h" #include "ortools/util/solve_interrupter.h" @@ -150,61 +148,17 @@ class FlatExpr : public LinearExpr { class SumArray : public LinearExpr { public: explicit SumArray(std::vector> exprs, - double offset) - : exprs_(std::move(exprs)), offset_(offset) {} + double offset); ~SumArray() override = default; - void Visit(ExprVisitor& lin, double c) override { - for (int i = 0; i < exprs_.size(); ++i) { - lin.AddToProcess(exprs_[i], c); - } - if (offset_ != 0.0) { - lin.AddConstant(offset_ * c); - } - } + void Visit(ExprVisitor& lin, double c) override; - std::string ToString() const override { - if (exprs_.empty()) { - if (offset_ != 0.0) { - return absl::StrCat(offset_); - } - } - std::string s = "("; - for (int i = 0; i < exprs_.size(); ++i) { - if (i > 0) { - absl::StrAppend(&s, " + "); - } - absl::StrAppend(&s, exprs_[i]->ToString()); - } - if (offset_ != 0.0) { - if (offset_ > 0.0) { - absl::StrAppend(&s, " + ", offset_); - } else { - absl::StrAppend(&s, " - ", -offset_); - } - } - absl::StrAppend(&s, ")"); - return s; - } - - std::string DebugString() const override { - std::string s = absl::StrCat( - "SumArray(", - absl::StrJoin(exprs_, ", ", - [](std::string* out, std::shared_ptr expr) { - absl::StrAppend(out, expr->DebugString()); - })); - if (offset_ != 0.0) { - absl::StrAppend(&s, ", offset=", offset_); - } - absl::StrAppend(&s, ")"); - return s; - } - - void AddInPlace(std::shared_ptr expr) { exprs_.push_back(expr); } - void AddFloatInPlace(double cst) { offset_ += cst; } - int num_exprs() const { return exprs_.size(); } - double offset() const { return offset_; } + std::string ToString() const override; + std::string DebugString() const override; + std::shared_ptr AddInPlace(std::shared_ptr expr); + std::shared_ptr AddFloatInPlace(double cst); + int num_exprs() const; + double offset() const; private: std::vector> exprs_; diff --git a/ortools/sat/python/cp_model_helper.cc b/ortools/sat/python/cp_model_helper.cc index 87ef120b8f..10e57c7657 100644 --- a/ortools/sat/python/cp_model_helper.cc +++ b/ortools/sat/python/cp_model_helper.cc @@ -897,11 +897,7 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other); - return expr; - } - return expr->Add(other); + return (num_uses == 4) ? expr->AddInPlace(other) : expr->Add(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Add)) @@ -911,11 +907,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(cst); - return expr; - } - return expr->AddInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(cst) + : expr->AddInt(cst); }, DOC(operations_research, sat, python, LinearExpr, AddInt)) .def( @@ -924,11 +917,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, AddFloat)) @@ -938,11 +928,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(cst); - return expr; - } - return expr->AddInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(cst) + : expr->AddInt(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddInt)) @@ -952,41 +939,31 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(cst); - return expr; - } - return expr->AddFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(cst) + : expr->AddFloat(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, AddFloat)) .def( "__iadd__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddInPlace(other); - return expr; + return expr->AddInPlace(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Add)) .def( "__iadd__", - [](py::object self, int64_t cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddIntInPlace(cst); - return expr; + [](std::shared_ptr expr, + int64_t cst) -> std::shared_ptr { + return expr->AddIntInPlace(cst); }, DOC(operations_research, sat, python, LinearExpr, AddInt)) .def( "__iadd__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, AddFloat)) @@ -997,11 +974,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddInPlace(other->Neg()); - return expr; - } - return expr->Sub(other); + return (num_uses == 4) ? expr->AddInPlace(other->Neg()) + : expr->Sub(other); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Sub)) @@ -1011,11 +985,8 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddIntInPlace(-cst); - return expr; - } - return expr->SubInt(cst); + return (num_uses == 4) ? expr->AddIntInPlace(-cst) + : expr->SubInt(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubInt)) @@ -1025,41 +996,31 @@ PYBIND11_MODULE(cp_model_helper, m) { const int num_uses = Py_REFCNT(self.ptr()); std::shared_ptr expr = self.cast>(); - if (num_uses == 4) { - expr->AddFloatInPlace(-cst); - return expr; - } - return expr->SubFloat(cst); + return (num_uses == 4) ? expr->AddFloatInPlace(-cst) + : expr->SubFloat(cst); }, py::arg("cst"), DOC(operations_research, sat, python, LinearExpr, SubFloat)) .def( "__isub__", - [](py::object self, + [](std::shared_ptr expr, std::shared_ptr other) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddInPlace(other->MulInt(-1)); - return expr; + return expr->AddInPlace(other->Neg()); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, Sub)) .def( "__isub__", - [](py::object self, int64_t cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddIntInPlace(-cst); - return expr; + [](std::shared_ptr expr, + int64_t cst) -> std::shared_ptr { + return expr->AddIntInPlace(-cst); }, DOC(operations_research, sat, python, LinearExpr, SubInt)) .def( "__isub__", - [](py::object self, double cst) -> std::shared_ptr { - std::shared_ptr expr = - self.cast>(); - expr->AddFloatInPlace(-cst); - return expr; + [](std::shared_ptr expr, + double cst) -> std::shared_ptr { + return expr->AddFloatInPlace(-cst); }, py::arg("other").none(false), DOC(operations_research, sat, python, LinearExpr, SubFloat)) @@ -1074,8 +1035,6 @@ PYBIND11_MODULE(cp_model_helper, m) { .def_property_readonly("coefficient", &FloatAffine::coefficient) .def_property_readonly("offset", &FloatAffine::offset); - // We adding an operator like __add__(int), we need to add all overloads, - // otherwise they are not found. py::class_, LinearExpr>( m, "IntAffine", DOC(operations_research, sat, python, IntAffine)) .def(py::init, int64_t, int64_t>()) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index aa06c59b2e..9bbaee5513 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -2461,6 +2461,14 @@ TRFM""" s -= model.new_bool_var("") model.add(s == 10) + def test_radd(self): + model = cp_model.CpModel() + x = [model.new_int_var(0, 10, f"x{i}") for i in range(10)] + expr = 1 + sum(x) + self.assertEqual( + str(expr), "(x0 + x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + 1)" + ) + def test_simplification1(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") diff --git a/ortools/sat/python/linear_expr.cc b/ortools/sat/python/linear_expr.cc index 42077ef46f..f8c2954f62 100644 --- a/ortools/sat/python/linear_expr.cc +++ b/ortools/sat/python/linear_expr.cc @@ -340,8 +340,20 @@ SumArray::SumArray(std::vector> exprs, DCHECK_GE(exprs_.size(), 2); } -void SumArray::AddInPlace(std::shared_ptr expr) { +std::shared_ptr SumArray::AddInPlace( + std::shared_ptr expr) { exprs_.push_back(std::move(expr)); + return shared_from_this(); +} + +std::shared_ptr SumArray::AddIntInPlace(int64_t cst) { + int_offset_ += cst; + return shared_from_this(); +} + +std::shared_ptr SumArray::AddFloatInPlace(double cst) { + double_offset_ += cst; + return shared_from_this(); } bool SumArray::VisitAsInt(IntExprVisitor& lin, int64_t c) { diff --git a/ortools/sat/python/linear_expr.h b/ortools/sat/python/linear_expr.h index ae92d1c676..06d973f9ea 100644 --- a/ortools/sat/python/linear_expr.h +++ b/ortools/sat/python/linear_expr.h @@ -286,9 +286,9 @@ class SumArray : public LinearExpr { std::string ToString() const override; std::string DebugString() const override; - void AddInPlace(std::shared_ptr expr); - void AddIntInPlace(int64_t cst) { int_offset_ += cst; } - void AddFloatInPlace(double cst) { double_offset_ += cst; } + std::shared_ptr AddInPlace(std::shared_ptr expr); + std::shared_ptr AddIntInPlace(int64_t cst); + std::shared_ptr AddFloatInPlace(double cst); int num_exprs() const { return exprs_.size(); } int64_t int_offset() const { return int_offset_; } double double_offset() const { return double_offset_; } From 30e2067a971ca1e5bccdc0a6ef4917ad67d47363 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 08:30:06 +0200 Subject: [PATCH 21/81] tools/release: Workaround for macos x86_64 python build failure --- tools/release/build_delivery_macos.sh | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 0babed86ad..5760772703 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -278,7 +278,17 @@ function build_python() { echo "DONE" | tee -a build.log echo -n "Build Python ${PY_VERSION}..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" - cmake --build "temp_python${PY_VERSION}" -j8 -v + cmake --build "temp_python${PY_VERSION}" --target ortools -j8 -v + + if [[ ${PLATFORM} == "x86_64" ]]; then + # on macos X86_64 stubgen will timeout -> need to build 2 times + cmake --build "temp_python${PY_VERSION}" -j8 -v || true + sleep 5 + cmake --build "temp_python${PY_VERSION}" -j8 -v + else + cmake --build "temp_python${PY_VERSION}" -j8 -v + fi + echo " Check libortools.dylib..." | tee -a build.log otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" echo " DONE" | tee -a build.log From 88931faad618b7e7f63ddbddef34a64669b77af9 Mon Sep 17 00:00:00 2001 From: Florian OMNES Date: Mon, 16 Jun 2025 10:24:21 +0200 Subject: [PATCH 22/81] Fix bz2.dll install path for windows-cpp archive --- patches/bzip2.patch | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/patches/bzip2.patch b/patches/bzip2.patch index ace4852290..ee1caf8d53 100644 --- a/patches/bzip2.patch +++ b/patches/bzip2.patch @@ -1,5 +1,5 @@ diff --git a/CMakeLists.txt b/CMakeLists.txt -index c4b0b6e..30f7652 100644 +index c4b0b6e..ee39341 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,10 @@ @@ -24,7 +24,7 @@ index c4b0b6e..30f7652 100644 # Windows resource file set(BZ2_RES "") -@@ -299,21 +304,30 @@ endif() +@@ -299,21 +304,32 @@ endif() if(ENABLE_SHARED_LIB) # The libbz2 shared library. @@ -59,13 +59,15 @@ index c4b0b6e..30f7652 100644 + ) + install(TARGETS BZip2 + EXPORT ${PROJECT_NAME}Targets -+ DESTINATION ${CMAKE_INSTALL_LIBDIR}) ++ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} # For Windows DLLs and executables ++ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} # For shared libraries on UNIX ++ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) # For static libs or import libs install(FILES bzlib.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) + add_library(BZip2::BZip2 ALIAS BZip2) if(USE_OLD_SONAME) # Hack to support the old libbz2.so.1.0 version by including an extra copy. -@@ -323,16 +337,22 @@ if(ENABLE_SHARED_LIB) +@@ -323,16 +339,22 @@ if(ENABLE_SHARED_LIB) add_library(bz2_old_soname SHARED ${BZ2_RES}) target_sources(bz2_old_soname PRIVATE ${BZ2_SOURCES} @@ -92,7 +94,7 @@ index c4b0b6e..30f7652 100644 endif() endif() endif() -@@ -341,9 +361,13 @@ if(ENABLE_STATIC_LIB) +@@ -341,9 +363,13 @@ if(ENABLE_STATIC_LIB) # The libbz2 static library. add_library(bz2_static STATIC) target_sources(bz2_static @@ -109,7 +111,7 @@ index c4b0b6e..30f7652 100644 # Use '-fPIC'/'-fPIE' option for static libraries by default. # You may build with ENABLE_STATIC_LIB_IS_PIC=OFF to disable PIC for the static library. -@@ -357,8 +381,13 @@ if(ENABLE_STATIC_LIB) +@@ -357,8 +383,13 @@ if(ENABLE_STATIC_LIB) SOVERSION ${LT_SOVERSION} ARCHIVE_OUTPUT_NAME bz2_static) target_compile_definitions(bz2_static PUBLIC BZ2_STATICLIB) @@ -124,7 +126,7 @@ index c4b0b6e..30f7652 100644 endif() if(ENABLE_APP) -@@ -373,7 +402,9 @@ if(ENABLE_APP) +@@ -373,7 +404,9 @@ if(ENABLE_APP) else() target_compile_definitions(bzip2 PUBLIC BZ_LCCWIN32=0 BZ_UNIX) endif() @@ -135,7 +137,7 @@ index c4b0b6e..30f7652 100644 # Create bzip2 copies bzcat and bunzip. # The default behavior is altered in bzip2.c code by checking the program name. -@@ -391,7 +422,9 @@ if(ENABLE_APP) +@@ -391,7 +424,9 @@ if(ENABLE_APP) else() target_compile_definitions(bzip2recover PUBLIC BZ_LCCWIN32=0 BZ_UNIX) endif() @@ -146,7 +148,7 @@ index c4b0b6e..30f7652 100644 if(ENABLE_EXAMPLES) if(ENABLE_SHARED_LIB) -@@ -399,8 +432,10 @@ if(ENABLE_APP) +@@ -399,8 +434,10 @@ if(ENABLE_APP) add_executable(dlltest) target_sources(dlltest PRIVATE dlltest.c) @@ -159,7 +161,7 @@ index c4b0b6e..30f7652 100644 endif() endif() -@@ -419,6 +454,10 @@ if(ENABLE_APP) +@@ -419,6 +456,10 @@ if(ENABLE_APP) endif() From 808c7d5e0b7df363cb5f3bf0efe7844b312803dd Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:40:09 +0200 Subject: [PATCH 23/81] tools/release: add python build log --- tools/release/build_delivery_macos.sh | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 5760772703..c6e62e3aa7 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -276,23 +276,35 @@ function build_python() { echo -n "Cleaning Python ${PY_VERSION}..." | tee -a build.log rm -rf "temp_python${PY_VERSION}" echo "DONE" | tee -a build.log - echo -n "Build Python ${PY_VERSION}..." | tee -a build.log + + echo "Build Python ${PY_VERSION}..." | tee -a build.log + echo -n " CMake configure..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" + echo "DONE" | tee -a build.log + + echo -n " Build libortools..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" --target ortools -j8 -v + echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "x86_64" ]]; then + echo -n " Build all..." | tee -a build.log # on macos X86_64 stubgen will timeout -> need to build 2 times cmake --build "temp_python${PY_VERSION}" -j8 -v || true + echo "DONE" | tee -a build.log sleep 5 + echo -n " ReBuild all..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" -j8 -v + echo "DONE" | tee -a build.log else + echo -n " Build all..." | tee -a build.log cmake --build "temp_python${PY_VERSION}" -j8 -v + echo "DONE" | tee -a build.log fi - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "temp_python${PY_VERSION}/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log echo "DONE" | tee -a build.log + echo "Build Python ${PY_VERSION}...DONE" | tee -a build.log #cmake --build temp_python${PY_VERSION} --target test #echo "cmake test_python${PY_VERSION}: DONE" | tee -a build.log From 863ba8a91d02e541326af9735aace86f17424c56 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:44:01 +0200 Subject: [PATCH 24/81] ortools: bump version from v9.13 to v9.14 --- MODULE.bazel | 2 +- Version.txt | 2 +- ortools/base/BUILD.bazel | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/MODULE.bazel b/MODULE.bazel index df19d71a17..8bac0ad7d2 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -5,7 +5,7 @@ # For more details, please check https://github.com/bazelbuild/bazel/issues/18958 ############################################################################### -OR_TOOLS_VERSION = "9.13" +OR_TOOLS_VERSION = "9.14" module( name = "or-tools", diff --git a/Version.txt b/Version.txt index 78863f2d95..1d4910fae0 100644 --- a/Version.txt +++ b/Version.txt @@ -1,3 +1,3 @@ OR_TOOLS_MAJOR=9 -OR_TOOLS_MINOR=13 +OR_TOOLS_MINOR=14 #PRE_RELEASE=YES diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index f5750c3e04..a6ac333dd8 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -75,7 +75,7 @@ cc_library( ], copts = [ "-DOR_TOOLS_MAJOR=9", - "-DOR_TOOLS_MINOR=13", + "-DOR_TOOLS_MINOR=14", "-DOR_TOOLS_PATCH=9999", ], linkopts = select({ From 91eae80e1c0c3d8f3db97df4b4fce22cd9ac89f3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:47:36 +0200 Subject: [PATCH 25/81] cmake: Format samples --- cmake/samples/dotnet/CPSample.cs | 28 +++++++----- cmake/samples/dotnet/LPSample.cs | 22 +++++----- cmake/samples/dotnet/RoutingSample.cs | 63 ++++++++++++++------------- cmake/samples/dotnet/SATSample.cs | 34 ++++++++------- cmake/samples/python/sample.py | 49 +++++++++++---------- 5 files changed, 106 insertions(+), 90 deletions(-) diff --git a/cmake/samples/dotnet/CPSample.cs b/cmake/samples/dotnet/CPSample.cs index 35cb078ddc..f0a64857f6 100644 --- a/cmake/samples/dotnet/CPSample.cs +++ b/cmake/samples/dotnet/CPSample.cs @@ -16,22 +16,26 @@ using Xunit; using Google.OrTools.ConstraintSolver; -namespace Google.OrTools.Tests { - public class ConstraintSolverTest { +namespace Google.OrTools.Tests +{ +public class ConstraintSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - Solver solver = new Solver("Solver"); - IntVar x = solver.MakeIntVar(3, 7, "x"); + public void SolverTest(bool callGC) + { + Solver solver = new Solver("Solver"); + IntVar x = solver.MakeIntVar(3, 7, "x"); - if (callGC) { - GC.Collect(); - } + if (callGC) + { + GC.Collect(); + } - Assert.Equal(3, x.Min()); - Assert.Equal(7, x.Max()); - Assert.Equal("x(3..7)", x.ToString()); + Assert.Equal(3, x.Min()); + Assert.Equal(7, x.Max()); + Assert.Equal("x(3..7)", x.ToString()); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/LPSample.cs b/cmake/samples/dotnet/LPSample.cs index 523a3db9f2..fecda50312 100644 --- a/cmake/samples/dotnet/LPSample.cs +++ b/cmake/samples/dotnet/LPSample.cs @@ -16,19 +16,21 @@ using Xunit; using Google.OrTools.LinearSolver; -namespace Google.OrTools.Tests { - public class LinearSolverTest { +namespace Google.OrTools.Tests +{ +public class LinearSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - Solver solver = new Solver( - "Solver", - Solver.OptimizationProblemType.CLP_LINEAR_PROGRAMMING); + public void SolverTest(bool callGC) + { + Solver solver = new Solver("Solver", Solver.OptimizationProblemType.CLP_LINEAR_PROGRAMMING); - if (callGC) { - GC.Collect(); - } + if (callGC) + { + GC.Collect(); + } } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/RoutingSample.cs b/cmake/samples/dotnet/RoutingSample.cs index 7a1a44f419..05db8db60e 100644 --- a/cmake/samples/dotnet/RoutingSample.cs +++ b/cmake/samples/dotnet/RoutingSample.cs @@ -16,38 +16,41 @@ using Xunit; using Google.OrTools.ConstraintSolver; -namespace Google.OrTools.Tests { - public class RoutingSolverTest { +namespace Google.OrTools.Tests +{ +public class RoutingSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - // Create Routing Index Manager - RoutingIndexManager manager = new RoutingIndexManager( - 5/*locations*/, 1/*vehicle*/, 0/*depot*/); - // Create Routing Model. - RoutingModel routing = new RoutingModel(manager); - // Create a distance callback. - int transitCallbackIndex = routing.RegisterTransitCallback( - (long fromIndex, long toIndex) => { - // Convert from routing variable Index to distance matrix NodeIndex. - var fromNode = manager.IndexToNode(fromIndex); - var toNode = manager.IndexToNode(toIndex); - return Math.Abs(toNode - fromNode); - }); - // Define cost of each arc. - routing.SetArcCostEvaluatorOfAllVehicles(transitCallbackIndex); - if (callGC) { - GC.Collect(); - } - // Setting first solution heuristic. - RoutingSearchParameters searchParameters = - operations_research_constraint_solver.DefaultRoutingSearchParameters(); - searchParameters.FirstSolutionStrategy = - FirstSolutionStrategy.Types.Value.PathCheapestArc; - Assignment solution = routing.SolveWithParameters(searchParameters); - // 0 --(+1)-> 1 --(+1)-> 2 --(+1)-> 3 --(+1)-> 4 --(+4)-> 0 := +8 - Assert.Equal(8, solution.ObjectiveValue()); + public void SolverTest(bool callGC) + { + // Create Routing Index Manager + RoutingIndexManager manager = new RoutingIndexManager(5 /*locations*/, 1 /*vehicle*/, 0 /*depot*/); + // Create Routing Model. + RoutingModel routing = new RoutingModel(manager); + // Create a distance callback. + int transitCallbackIndex = routing.RegisterTransitCallback((long fromIndex, long toIndex) => + { + // Convert from routing variable Index to + // distance matrix NodeIndex. + var fromNode = manager.IndexToNode(fromIndex); + var toNode = manager.IndexToNode(toIndex); + return Math.Abs(toNode - fromNode); + }); + // Define cost of each arc. + routing.SetArcCostEvaluatorOfAllVehicles(transitCallbackIndex); + if (callGC) + { + GC.Collect(); + } + // Setting first solution heuristic. + RoutingSearchParameters searchParameters = + operations_research_constraint_solver.DefaultRoutingSearchParameters(); + searchParameters.FirstSolutionStrategy = FirstSolutionStrategy.Types.Value.PathCheapestArc; + Assignment solution = routing.SolveWithParameters(searchParameters); + // 0 --(+1)-> 1 --(+1)-> 2 --(+1)-> 3 --(+1)-> 4 --(+4)-> 0 := +8 + Assert.Equal(8, solution.ObjectiveValue()); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/dotnet/SATSample.cs b/cmake/samples/dotnet/SATSample.cs index 51e287e804..7e74e24860 100644 --- a/cmake/samples/dotnet/SATSample.cs +++ b/cmake/samples/dotnet/SATSample.cs @@ -16,26 +16,30 @@ using Xunit; using Google.OrTools.Sat; -namespace Google.OrTools.Tests { - public class SatSolverTest { +namespace Google.OrTools.Tests +{ +public class SatSolverTest +{ [Theory] [InlineData(false)] [InlineData(true)] - public void SolverTest(bool callGC) { - CpModel model = new CpModel(); + public void SolverTest(bool callGC) + { + CpModel model = new CpModel(); - int num_vals = 3; - IntVar x = model.NewIntVar(0, num_vals - 1, "x"); - IntVar y = model.NewIntVar(0, num_vals - 1, "y"); - IntVar z = model.NewIntVar(0, num_vals - 1, "z"); + int num_vals = 3; + IntVar x = model.NewIntVar(0, num_vals - 1, "x"); + IntVar y = model.NewIntVar(0, num_vals - 1, "y"); + IntVar z = model.NewIntVar(0, num_vals - 1, "z"); - model.Add(x != y); + model.Add(x != y); - CpSolver solver = new CpSolver(); - if (callGC) { - GC.Collect(); - } - CpSolverStatus status = solver.Solve(model); + CpSolver solver = new CpSolver(); + if (callGC) + { + GC.Collect(); + } + CpSolverStatus status = solver.Solve(model); } - } +} } // namespace Google.Sample.Tests diff --git a/cmake/samples/python/sample.py b/cmake/samples/python/sample.py index 758346bcf1..ebeb7be394 100644 --- a/cmake/samples/python/sample.py +++ b/cmake/samples/python/sample.py @@ -14,12 +14,15 @@ """Sample to test or-tools installation.""" import ortools + # from ortools.algorithms import knapsack_solver from ortools.constraint_solver import pywrapcp + # from ortools.graph.python import linear_sum_assignment # from ortools.graph.python import max_flow # from ortools.graph.python import min_cost_flow from ortools.linear_solver import pywraplp + # from ortools.linear_solver import linear_solver_pb2 # from ortools.sat.python import cp_model_helper # from ortools.sat.python import cp_model @@ -28,34 +31,34 @@ from ortools.linear_solver import pywraplp def lpsolver_test(): - """Test pywraplp.""" - print('Test lpsolver...') - lpsolver = pywraplp.Solver('LinearTest', - pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) - lpsolver.Solve() - print('Test lpsolver...DONE') + """Test pywraplp.""" + print("Test lpsolver...") + lpsolver = pywraplp.Solver("LinearTest", pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) + lpsolver.Solve() + print("Test lpsolver...DONE") def cpsolver_test(): - """Test pywrapcp.""" - print('Test cpsolver...') - cpsolver = pywrapcp.Solver('ConstraintTest') - num_vals = 3 - x = cpsolver.IntVar(0, num_vals - 1, 'x') - y = cpsolver.IntVar(0, num_vals - 1, 'y') - z = cpsolver.IntVar(0, num_vals - 1, 'z') - cpsolver.Add(x != y) - db = cpsolver.Phase([x, y, z], cpsolver.CHOOSE_FIRST_UNBOUND, - cpsolver.ASSIGN_MIN_VALUE) - cpsolver.Solve(db) - print('Test cpsolver...DONE') + """Test pywrapcp.""" + print("Test cpsolver...") + cpsolver = pywrapcp.Solver("ConstraintTest") + num_vals = 3 + x = cpsolver.IntVar(0, num_vals - 1, "x") + y = cpsolver.IntVar(0, num_vals - 1, "y") + z = cpsolver.IntVar(0, num_vals - 1, "z") + cpsolver.Add(x != y) + db = cpsolver.Phase( + [x, y, z], cpsolver.CHOOSE_FIRST_UNBOUND, cpsolver.ASSIGN_MIN_VALUE + ) + cpsolver.Solve(db) + print("Test cpsolver...DONE") def main(): - print(ortools.__version__) - lpsolver_test() - cpsolver_test() + print(ortools.__version__) + lpsolver_test() + cpsolver_test() -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() From 56e565a2e733f9037ddda77d7303b6ac15e43508 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 11:54:43 +0200 Subject: [PATCH 26/81] cmake: Fix internal README.md --- cmake/README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/cmake/README.md b/cmake/README.md index a233fb8142..816f67d067 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -193,10 +193,10 @@ CMake Option | Default Value | Note `BUILD_DOTNET` | OFF | Build .Net wrapper and packages `BUILD_JAVA` | OFF | Build Java wrapper and packages `BUILD_PYTHON` | OFF | Build Python wrapper and package - | | +| | `BUILD_FLATZINC` | ON\* | Build the flatzinc library
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLOP` | OFF\* | Build the standalone Glop library
**Forced** to OFF if `BUILD_CXX=ON`, otherwise default to ON - | | +| **Dependencies** | `BUILD_DEPS` | OFF* | Default to ON if `BUILD_JAVA=ON` or `BUILD_PYTHON=ON` or `BUILD_DOTNET=ON` `BUILD_ZLIB` | OFF* | Build the zlib dynamic library
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_BZip2` | OFF* | Build the bzip2 dynamic library
**Forced** to ON if `BUILD_DEPS=ON` @@ -204,44 +204,44 @@ CMake Option | Default Value | Note `BUILD_Protobuf` | OFF* | Build the protobuf dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_re2` | OFF* | Build the re2 dynamic libraries
**Forced** to ON if `BUILD_DEPS=ON` `BUILD_Eigen3` | OFF* | Build the Eigen3 libraries
**Forced** to ON if `BUILD_DEPS=ON` - | | +| Coin-OR | `USE_COINOR` | ON\* | Enable Coin-OR support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_CoinUtils` | OFF\* | Build the CoinUtils dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Osi` | OFF\* | Build the Osi dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Clp` | OFF\* | Build the Clp dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cgl` | OFF\* | Build the Cgl dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` `BUILD_Cbc` | OFF\* | Build the Cbc dynamic library
**Forced** to ON if `USE_COINOR=ON` **and** `BUILD_DEPS=ON` - | | +| GLPK | `USE_GLPK` | OFF\* | Enable GLPK support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_GLPK` | OFF\* | Build the GLPK dynamic libraries
**Forced** to ON if `USE_GLPK=ON` **and** `BUILD_DEPS=ON` - | | +| HiGHS | `USE_HIGHS` | ON\* | Enable HIGHS support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_HIGHS` | OFF\* | Build the HiGHS dynamic libraries
**Forced** to ON if `USE_HIGHS=ON` **and** `BUILD_DEPS=ON` - | | +| SCIP | `USE_SCIP` | ON\* | Enable SCIP support
**Forced** to OFF if `BUILD_CXX=OFF` `BUILD_SCIP` | OFF\* | Build the SCIP dynamic libraries
**Forced** to ON if `USE_SCIP=ON` **and** `BUILD_DEPS=ON` - | | +| CPLEX `USE_CPLEX` | OFF | Enable CPLEX support - | | +| **Documentation** | `BUILD_DOC` | OFF\* | Build all documentations `BUILD_CXX_DOC` | OFF\* | Build C++ documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_DOTNET_DOC` | OFF\* | Build .Net documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_JAVA_DOC` | OFF\* | Build Java documentation
**Forced** to ON if `BUILD_DOC=ON` `BUILD_PYTHON_DOC` | OFF\* | Build Python documentation
**Forced** to ON if `BUILD_DOC=ON` `INSTALL_DOC` | OFF\* | Install all documentations
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_DOC=OFF` - | | +| **Samples** | `BUILD_SAMPLES` | ON\* | Build all samples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_SAMPLES` | ON\* | Build all C++ samples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_SAMPLES` | ON\* | Build all .Net samples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_SAMPLES` | ON\* | Build all Java samples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_SAMPLES` | ON\* | Build all Python samples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` - | | +| **Examples** | `BUILD_EXAMPLES` | ON\* | Build all examples
Default to ON if `BUILD_DEPS=ON` `BUILD_CXX_EXAMPLES` | ON\* | Build all C++ examples
**Forced** to OFF if `BUILD_CXX=OFF` or `BUILD_SAMPLE=OFF` `BUILD_DOTNET_EXAMPLES` | ON\* | Build all .Net examples
**Forced** to OFF if `BUILD_DOTNET=OFF` or `BUILD_SAMPLE=OFF` `BUILD_JAVA_EXAMPLES` | ON\* | Build all Java examples
**Forced** to OFF if `BUILD_JAVA=OFF` or `BUILD_SAMPLE=OFF` `BUILD_PYTHON_EXAMPLES` | ON\* | Build all Python examples
**Forced** to OFF if `BUILD_PYTHON=OFF` or `BUILD_SAMPLE=OFF` - | | +| **.Net** | `USE_DOTNET_46` | OFF | Enable .Net Framework 4.6 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_461` | OFF | Enable .Net Framework 4.6.1 support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_462` | OFF | Enable .Net Framework 4.6.2 support
Only available if `BUILD_DOTNET=ON` @@ -253,11 +253,11 @@ CMake Option | Default Value | Note `USE_DOTNET_8` | ON | Enable .Net 8 LTS support
Only available if `BUILD_DOTNET=ON` `USE_DOTNET_9` | OFF | Enable .Net 9 support
Only available if `BUILD_DOTNET=ON` `UNIVERSAL_DOTNET_PACKAGE` | OFF | Build a multi platform package (i.e. `Google.OrTools` will depends on all runtime packages)
Only available if `BUILD_DOTNET=ON` - | | +| **Java** | `SKIP_GPG` | ON | Disable GPG sign
Only available if `BUILD_JAVA=ON` `UNIVERSAL_JAVA_PACKAGE` | OFF | Build a multi platform package (i.e. `ortools-java` will depends on all native packages)
Only available if `BUILD_JAVA=ON` `BUILD_FAT_JAR` | OFF | Build a `ortools-java` .jar that includes all of its own Maven dependencies, including the native package
Only available if `BUILD_JAVA=ON` - | | +| **Python** | `BUILD_pybind11` | `BUILD_DEPS` | Static build the pybind11 libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_abseil` | `BUILD_DEPS` | Static build the pybind11_abseil libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` `BUILD_pybind11_protobuf` | `BUILD_DEPS` | Static build the pybind11_protobuf libraries
**Forced** to ON if `BUILD_DEPS=ON`
Only available if `BUILD_PYTHON=ON` @@ -265,7 +265,7 @@ CMake Option | Default Value | Note `BUILD_VENV` | `BUILD_TESTING` | Create python venv in `BINARY_DIR/python/venv`
**Forced** to ON if `BUILD_TESTING=ON`
Only available if `BUILD_PYTHON=ON` `VENV_USE_SYSTEM_SITE_PACKAGES` | OFF | Python venv can use system site package (e.g. `py3-numpy` on Alpine)
Only available if `BUILD_PYTHON=ON` and `BUILD_VENV=ON` `FETCH_PYTHON_DEPS` | `BUILD_DEPS` | Fetch python modules needed to build ortools package
Only available if `BUILD_PYTHON=ON` - | | +| | ## Integrating OR-Tools in your CMake Project From 7096031050178bd6749718424d1c59db613c8579 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 14:54:04 +0200 Subject: [PATCH 27/81] graph: export from google3 dump_vars: Add support for StrongInt and StrongVector --- ortools/base/BUILD.bazel | 4 + ortools/base/dump_vars.h | 11 + ortools/base/dump_vars_test.cc | 18 + ortools/graph/BUILD.bazel | 7 +- ortools/graph/bounded_dijkstra.h | 252 ++++---- ortools/graph/bounded_dijkstra_test.cc | 542 ++++++++++-------- ortools/graph/graph.h | 50 +- ortools/graph/graph_io.h | 4 +- ortools/graph/graph_test.cc | 240 +++++--- .../assignment_linear_sum_assignment.py | 1 + ortools/graph/samples/assignment_min_flow.py | 1 + ortools/graph/samples/balance_min_flow.py | 1 + ortools/graph/samples/dijkstra_directed.cc | 4 +- ortools/graph/samples/dijkstra_undirected.cc | 4 +- .../graph/samples/simple_max_flow_program.py | 1 + .../samples/simple_min_cost_flow_program.py | 1 + 16 files changed, 674 insertions(+), 467 deletions(-) diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel index a6ac333dd8..c118a14e85 100644 --- a/ortools/base/BUILD.bazel +++ b/ortools/base/BUILD.bazel @@ -183,6 +183,8 @@ cc_library( "//conditions:default": [], }), deps = [ + ":strong_int", + ":strong_vector", "@abseil-cpp//absl/container:inlined_vector", ], ) @@ -199,6 +201,8 @@ cc_test( }), deps = [ ":dump_vars", + ":strong_int", + ":strong_vector", "@abseil-cpp//absl/strings", "@googletest//:gtest_main", ], diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h index 8413948cd3..61e6073084 100644 --- a/ortools/base/dump_vars.h +++ b/ortools/base/dump_vars.h @@ -48,6 +48,8 @@ #include #include "absl/container/inlined_vector.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" /* need extra level to force extra eval */ #define DUMP_FOR_EACH_N0(F) @@ -138,6 +140,15 @@ std::ostream& operator<<(std::ostream& os, const ::std::optional& opt) { return os; } +// needed by graph tests +template +std::ostream& operator<<(std::ostream& os, const ::util_intops::StrongVector& vec) { + for (U it : vec) { + os << ::std::to_string(it) << ','; + } + return os; +} + using DumpNames = ::std::vector<::std::string>; struct print_fields { diff --git a/ortools/base/dump_vars_test.cc b/ortools/base/dump_vars_test.cc index 1a295f386a..2dccc6381d 100644 --- a/ortools/base/dump_vars_test.cc +++ b/ortools/base/dump_vars_test.cc @@ -21,6 +21,12 @@ #include #include "gtest/gtest.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" + +namespace util_intops { +DEFINE_STRONG_INT_TYPE(CustomStrongInt, uint32_t); +} // namespace util_intops namespace operations_research::base { namespace { @@ -124,6 +130,18 @@ TEST(DumpVars, Vector) { EXPECT_EQ("vec = 49.299999,3.140000,", DUMP_VARS(vec).str()); } +TEST(DumpVars, StrongInt) { + ::util_intops::CustomStrongInt val(42); + EXPECT_EQ(R"(val = 42)", ToString(DUMP_VARS(val))); + EXPECT_EQ(R"(val = 42)", DUMP_VARS(val).str()); +} + +TEST(DumpVars, StrongVector) { + ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = {49.3, 3.14}; + EXPECT_EQ(R"(vec = 49.299999,3.140000,)", ToString(DUMP_VARS(vec))); + EXPECT_EQ(R"(vec = 49.299999,3.140000,)", DUMP_VARS(vec).str()); +} + TEST(DumpVars, Optional) { std::optional of = {}; EXPECT_EQ("of = (none)", ToString(DUMP_VARS(of))); diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel index d8d0a5c07d..fac523588a 100644 --- a/ortools/graph/BUILD.bazel +++ b/ortools/graph/BUILD.bazel @@ -52,6 +52,8 @@ cc_test( ":graph", "//ortools/base:gmock_main", "//ortools/base:intops", + "//ortools/base:strong_vector", + "@abseil-cpp//absl/algorithm:container", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", "@abseil-cpp//absl/strings", @@ -86,7 +88,9 @@ cc_library( hdrs = ["bounded_dijkstra.h"], deps = [ ":graph", + "//ortools/base:intops", "//ortools/base:iterator_adaptors", + "//ortools/base:strong_vector", "//ortools/base:threadpool", "//ortools/base:top_n", "@abseil-cpp//absl/algorithm:container", @@ -107,6 +111,7 @@ cc_test( ":test_util", "//ortools/base:dump_vars", "//ortools/base:gmock_main", + "//ortools/base:intops", "//ortools/util:flat_matrix", "@abseil-cpp//absl/log:check", "@abseil-cpp//absl/random", @@ -858,7 +863,7 @@ cc_test( deps = [ ":iterators", "//ortools/base:gmock_main", - "//ortools/base:strong_int", + "//ortools/base:intops", ], ) diff --git a/ortools/graph/bounded_dijkstra.h b/ortools/graph/bounded_dijkstra.h index e4522e5d90..98a7fc7e5f 100644 --- a/ortools/graph/bounded_dijkstra.h +++ b/ortools/graph/bounded_dijkstra.h @@ -15,8 +15,10 @@ #define OR_TOOLS_GRAPH_BOUNDED_DIJKSTRA_H_ #include +#include #include #include +#include #include #include @@ -25,6 +27,8 @@ #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/iterator_adaptors.h" +#include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" #include "ortools/base/top_n.h" #include "ortools/graph/graph.h" @@ -54,22 +58,40 @@ namespace operations_research { // is >= limit we will return {limit, {}}. As a consequence any arc length >= // limit is the same as no arc. The code is also overflow-safe and will behave // correctly if the limit is int64max or infinity. -template -std::pair> SimpleOneToOneShortestPath( - int source, int destination, absl::Span tails, - absl::Span heads, absl::Span lengths, +template +std::pair> SimpleOneToOneShortestPath( + NodeIndex source, NodeIndex destination, absl::Span tails, + absl::Span heads, absl::Span lengths, DistanceType limit = std::numeric_limits::max()); -template +namespace internal { + +// TODO(user): We should move `is_strong_int` to util/intops/strong_int.h. +template +struct is_strong_int : std::false_type {}; + +template +struct is_strong_int<::util_intops::StrongInt> + : std::true_type {}; + +template +using IndexedVector = + std::conditional_t::value, + ::util_intops::StrongVector, + std::vector>; + +template class ElementGetter { public: - explicit ElementGetter(const std::vector& c) : c_(c) {} - const T& operator()(int index) const { return c_[index]; } + explicit ElementGetter(const IndexedVector& c) : c_(c) {} + const T& operator()(ArcIndex index) const { return c_[index]; } private: - const std::vector& c_; + const IndexedVector& c_; }; +} // namespace internal + // A wrapper that holds the memory needed to run many bounded shortest path // computations on the given graph. The graph must implement the // interface described in graph.h (without the need for reverse arcs). @@ -92,12 +114,20 @@ class ElementGetter { // negative source_offset, arc with a length greater than the distance_limit can // still be considered! template > + class ArcLengthFunctor = internal::ElementGetter< + DistanceType, typename GraphType::ArcIndex>> class BoundedDijkstraWrapper { public: - typedef typename GraphType::NodeIndex node_type; + typedef typename GraphType::NodeIndex NodeIndex; + typedef typename GraphType::ArcIndex ArcIndex; typedef DistanceType distance_type; + // A vector of T, indexed by NodeIndex/ArcIndex. + template + using ByNode = internal::IndexedVector; + template + using ByArc = internal::IndexedVector; + // IMPORTANT: Both arguments must outlive the class. The arc lengths cannot be // negative and the vector must be of the correct size (both preconditions are // CHECKed). @@ -106,7 +136,7 @@ class BoundedDijkstraWrapper { // RunBoundedDijkstra(). That's fine. Doing so will obviously invalidate the // reader API of the last Dijkstra run, which could return junk, or crash. BoundedDijkstraWrapper(const GraphType* graph, - const std::vector* arc_lengths); + const ByArc* arc_lengths); // Variant that takes a custom arc length functor and copies it locally. BoundedDijkstraWrapper(const GraphType* graph, @@ -116,8 +146,8 @@ class BoundedDijkstraWrapper { // of the graph within the distance limit (exclusive). The first element of // the returned vector will always be the source_node with a distance of zero. // See RunBoundedDijkstraFromMultipleSources() for more information. - const std::vector& RunBoundedDijkstra(int source_node, - DistanceType distance_limit) { + const std::vector& RunBoundedDijkstra( + NodeIndex source_node, DistanceType distance_limit) { return RunBoundedDijkstraFromMultipleSources({{source_node, 0}}, distance_limit); } @@ -127,7 +157,8 @@ class BoundedDijkstraWrapper { // // If this returns true, you can get the path distance with distances()[to] // and the path with ArcPathTo(to) or NodePathTo(to). - bool OneToOneShortestPath(int from, int to, DistanceType distance_limit); + bool OneToOneShortestPath(NodeIndex from, NodeIndex to, + DistanceType distance_limit); // Returns the list of all the nodes which are under the given distance limit // (exclusive) from at least one of the given source nodes (which also have @@ -136,8 +167,8 @@ class BoundedDijkstraWrapper { // By "distance", we mean the length of the shortest path from any source // plus the source's distance offset, where the length of a path is the // sum of the length of its arcs - const std::vector& RunBoundedDijkstraFromMultipleSources( - const std::vector>& + const std::vector& RunBoundedDijkstraFromMultipleSources( + const std::vector>& sources_with_distance_offsets, DistanceType distance_limit); @@ -162,10 +193,11 @@ class BoundedDijkstraWrapper { // // Note that the distances() will take the source offsets into account, // but not the destination offsets. - std::vector RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( - const std::vector>& + std::vector + RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( + const std::vector>& sources_with_distance_offsets, - const std::vector>& + const std::vector>& destinations_with_distance_offsets, int num_destinations_to_reach, DistanceType distance_limit); @@ -174,19 +206,19 @@ class BoundedDijkstraWrapper { // happens at most once per node, when popping it from the Dijkstra queue, // meaning that the node has been fully 'processed'). This callback may modify // the distance limit dynamically, thus affecting the stopping criterion. - const std::vector& RunBoundedDijkstraWithSettledNodeCallback( - const std::vector>& + const std::vector& RunBoundedDijkstraWithSettledNodeCallback( + const std::vector>& sources_with_distance_offsets, - std::function settled_node_callback, DistanceType distance_limit); // Returns true if `node` was reached by the last Run*() call. - bool IsReachable(int node) const { return is_reached_[node]; } + bool IsReachable(NodeIndex node) const { return is_reached_[node]; } // Returns all the reached nodes form the previous Run*() call. - const std::vector& reached_nodes() const { return reached_nodes_; } + const ByNode& reached_nodes() const { return reached_nodes_; } // The following vectors are all indexed by graph node indices. // @@ -194,7 +226,7 @@ class BoundedDijkstraWrapper { // reached nodes are updated, the others will contain junk. // The distance of the nodes from their source. - const std::vector& distances() const { return distances_; } + const ByNode& distances() const { return distances_; } // The parent of the nodes in the shortest path from their source. // When a node doesn't have any parent (it has to be a source), its parent @@ -203,27 +235,29 @@ class BoundedDijkstraWrapper { // arcs have a length of zero. // Note also that some sources may have parents, because of the initial // distances. - const std::vector& parents() const { return parents_; } + const ByNode& parents() const { return parents_; } // The arc reaching a given node in the path from their source. // arc_from_source()[x] is undefined (i.e. junk) when parents()[x] == x. - const std::vector& arc_from_source() const { return arc_from_source_; } + const ByNode& arc_from_source() const { return arc_from_source_; } // Returns the list of all the arcs in the shortest path from the node's // source to the node. - std::vector ArcPathTo(int node) const; + std::vector ArcPathTo(NodeIndex node) const; ABSL_DEPRECATED("Use ArcPathTo() instead.") - std::vector ArcPathToNode(int node) const { return ArcPathTo(node); } + std::vector ArcPathToNode(NodeIndex node) const { + return ArcPathTo(node); + } // Returns the list of all the nodes in the shortest path from the node's // source to the node. This always start by the node's source, and end by // the given node. In the case that source == node, returns {node}. - std::vector NodePathTo(int node) const; + std::vector NodePathTo(NodeIndex node) const; // Returns the node's source. This is especially useful when running // Dijkstras from multiple sources. - int SourceOfShortestPathToNode(int node) const; + NodeIndex SourceOfShortestPathToNode(NodeIndex node) const; // Original Source/Destination index extraction, after a call to the // multi-source and/or multi-destination variants: @@ -239,16 +273,16 @@ class BoundedDijkstraWrapper { // rely on the value. // // These methods are invalidated by the next RunBoundedDijkstra*() call. - int GetSourceIndex(int node) const; - int GetDestinationIndex(int node) const; + int GetSourceIndex(NodeIndex node) const; + int GetDestinationIndex(NodeIndex node) const; // Trivial accessors to the underlying graph and arc lengths. const GraphType& graph() const { return *graph_; } - const std::vector& arc_lengths() const { + const ByArc& arc_lengths() const { CHECK(arc_lengths_); return *arc_lengths_; } - DistanceType GetArcLength(int arc) const { + DistanceType GetArcLength(ArcIndex arc) const { const DistanceType length = arc_length_functor_(arc); DCHECK_GE(length, 0); return length; @@ -262,18 +296,18 @@ class BoundedDijkstraWrapper { // The Graph and length of each arc. const GraphType* const graph_; ArcLengthFunctor arc_length_functor_; - const std::vector* const arc_lengths_; + const ByArc* const arc_lengths_; // Data about the last Dijkstra run. - std::vector distances_; - std::vector parents_; - std::vector arc_from_source_; - std::vector is_reached_; - std::vector reached_nodes_; + ByNode distances_; + ByNode parents_; + ByNode arc_from_source_; + ByNode is_reached_; + std::vector reached_nodes_; // Priority queue of nodes, ordered by their distance to the source. struct NodeDistance { - node_type node; // The target node. + NodeIndex node; // The target node. DistanceType distance; // Its distance from the source. bool operator<(const NodeDistance& other) const { @@ -287,7 +321,7 @@ class BoundedDijkstraWrapper { // or ieee754 floating-point, when the machine is little endian, and // when the total size of NodeDistance equals 16 bytes). // And here are the speeds of the BM_GridGraph benchmark (in which - // DistanceType=int64_t and node_type=int32_t), done with benchy + // DistanceType=int64_t and NodeIndex=int32_t), done with benchy // --runs=20: 0) BM_GridGraph 9.22ms ± 5% BM_GridGraph 3.19ms // ± 6% 1) BM_GridGraph 8.89ms ± 4% BM_GridGraph 3.07ms ± // 3% 2) BM_GridGraph 8.61ms ± 3% BM_GridGraph 3.13ms ± 6% @@ -303,8 +337,8 @@ class BoundedDijkstraWrapper { // The vectors are only allocated after they are first used. // Between calls, is_destination_ is all false, and the rest is junk. std::vector is_destination_; - std::vector node_to_source_index_; - std::vector node_to_destination_index_; + ByNode node_to_source_index_; + ByNode node_to_destination_index_; }; // ----------------------------------------------------------------------------- @@ -314,12 +348,12 @@ class BoundedDijkstraWrapper { template BoundedDijkstraWrapper:: BoundedDijkstraWrapper(const GraphType* graph, - const std::vector* arc_lengths) + const ByArc* arc_lengths) : graph_(graph), arc_length_functor_(*arc_lengths), arc_lengths_(arc_lengths) { CHECK(arc_lengths_ != nullptr); - CHECK_EQ(arc_lengths_->size(), graph->num_arcs()); + CHECK_EQ(ArcIndex(arc_lengths_->size()), graph->num_arcs()); for (const DistanceType length : *arc_lengths) { CHECK_GE(length, 0); } @@ -341,10 +375,10 @@ BoundedDijkstraWrapper:: arc_lengths_(other.arc_lengths_) {} template -const std::vector& +const std::vector& BoundedDijkstraWrapper:: RunBoundedDijkstraFromMultipleSources( - const std::vector>& + const std::vector>& sources_with_distance_offsets, DistanceType distance_limit) { return RunBoundedDijkstraWithSettledNodeCallback( @@ -352,12 +386,12 @@ BoundedDijkstraWrapper:: } template -std::vector +std::vector BoundedDijkstraWrapper:: RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( - const std::vector>& + const std::vector>& sources_with_distance_offsets, - const std::vector>& + const std::vector>& destinations_with_distance_offsets, int num_destinations_to_reach, DistanceType distance_limit) { if (destinations_with_distance_offsets.empty()) return {}; @@ -368,22 +402,22 @@ BoundedDijkstraWrapper:: // to reduce the search space. DCHECK_GE(num_destinations_to_reach, 0); int num_destinations = 0; - is_destination_.resize(graph_->num_nodes(), false); + is_destination_.resize(static_cast(graph_->num_nodes()), false); node_to_destination_index_.resize(graph_->num_nodes(), -1); DistanceType min_destination_distance_offset = destinations_with_distance_offsets[0].second; for (int i = 0; i < destinations_with_distance_offsets.size(); ++i) { - const int node = destinations_with_distance_offsets[i].first; + const NodeIndex node = destinations_with_distance_offsets[i].first; const DistanceType distance = destinations_with_distance_offsets[i].second; - if (!is_destination_[node]) ++num_destinations; + if (!is_destination_[static_cast(node)]) ++num_destinations; // Skip useless repetitions. - if (is_destination_[node] && + if (is_destination_[static_cast(node)] && distance >= destinations_with_distance_offsets[node_to_destination_index_[node]] .second) { continue; } - is_destination_[node] = true; + is_destination_[static_cast(node)] = true; node_to_destination_index_[node] = i; min_destination_distance_offset = std::min(min_destination_distance_offset, distance); @@ -395,13 +429,13 @@ BoundedDijkstraWrapper:: gtl::TopN> closest_destinations( /*limit=*/num_destinations_to_reach); - std::function + std::function settled_node_callback = [this, num_destinations_to_reach, min_destination_distance_offset, &destinations_with_distance_offsets, &closest_destinations]( - node_type settled_node, DistanceType settled_distance, + NodeIndex settled_node, DistanceType settled_distance, DistanceType* distance_limit) { - if (!is_destination_[settled_node]) return; + if (!is_destination_[static_cast(settled_node)]) return; const DistanceType distance = settled_distance + destinations_with_distance_offsets @@ -423,12 +457,12 @@ BoundedDijkstraWrapper:: // Clean up, sparsely, for the next call. for (const auto& [node, _] : destinations_with_distance_offsets) { - is_destination_[node] = false; + is_destination_[static_cast(node)] = false; } // Return the closest "num_destinations_to_reach" reached destinations, // sorted by distance. - std::vector sorted_destinations; + std::vector sorted_destinations; sorted_destinations.reserve(closest_destinations.size()); for (const NodeDistance& d : closest_destinations.Take()) { sorted_destinations.push_back(d.node); @@ -438,10 +472,11 @@ BoundedDijkstraWrapper:: template bool BoundedDijkstraWrapper:: - OneToOneShortestPath(int from, int to, DistanceType distance_limit) { + OneToOneShortestPath(NodeIndex from, NodeIndex to, + DistanceType distance_limit) { bool reached = false; - std::function - settled_node_callback = [to, &reached](node_type node, + std::function + settled_node_callback = [to, &reached](NodeIndex node, DistanceType distance, DistanceType* distance_limit) { if (node != to) return; @@ -456,18 +491,18 @@ bool BoundedDijkstraWrapper:: } template -const std::vector& +const std::vector& BoundedDijkstraWrapper:: RunBoundedDijkstraWithSettledNodeCallback( - const std::vector>& + const std::vector>& sources_with_distance_offsets, - std::function settled_node_callback, DistanceType distance_limit) { // Sparse clear is_reached_ from the last call. - for (const int node : reached_nodes_) { + for (const NodeIndex node : reached_nodes_) { is_reached_[node] = false; } reached_nodes_.clear(); @@ -475,15 +510,15 @@ BoundedDijkstraWrapper:: is_reached_.resize(graph_->num_nodes(), false); distances_.resize(graph_->num_nodes(), distance_limit); - parents_.resize(graph_->num_nodes(), std::numeric_limits::min()); - arc_from_source_.resize(graph_->num_nodes(), -1); + parents_.resize(graph_->num_nodes(), std::numeric_limits::min()); + arc_from_source_.resize(graph_->num_nodes(), GraphType::kNilArc); // Initialize sources. CHECK(queue_.empty()); node_to_source_index_.resize(graph_->num_nodes(), -1); for (int i = 0; i < sources_with_distance_offsets.size(); ++i) { - const int node = sources_with_distance_offsets[i].first; - DCHECK_GE(node, 0); + const NodeIndex node = sources_with_distance_offsets[i].first; + DCHECK_GE(node, NodeIndex(0)); DCHECK_LT(node, graph_->num_nodes()); const DistanceType distance = sources_with_distance_offsets[i].second; // Sources with an initial distance ≥ limit are *not* reached. @@ -498,7 +533,7 @@ BoundedDijkstraWrapper:: node_to_source_index_[node] = i; distances_[node] = distance; } - for (const int source : reached_nodes_) { + for (const NodeIndex source : reached_nodes_) { queue_.push_back({source, distances_[source]}); } std::make_heap(queue_.begin(), queue_.end(), std::greater()); @@ -533,7 +568,8 @@ BoundedDijkstraWrapper:: // Visit the neighbors. const DistanceType limit = distance_limit - top.distance; - for (const int arc : graph_->OutgoingArcs(top.node)) { + for (const typename GraphType::ArcIndex arc : + graph_->OutgoingArcs(top.node)) { // Overflow-safe check of top.distance + arc_length >= distance_limit. // This works since we know top.distance < distance_limit, as long as we // don't have negative top.distance (which might happen with negative @@ -543,7 +579,7 @@ BoundedDijkstraWrapper:: if (arc_length >= limit) continue; const DistanceType candidate_distance = top.distance + arc_length; - const int head = graph_->Head(arc); + const NodeIndex head = graph_->Head(arc); if (is_reached_[head]) { if (candidate_distance >= distances_[head]) continue; } else { @@ -563,14 +599,14 @@ BoundedDijkstraWrapper:: } template -std::vector +std::vector BoundedDijkstraWrapper::ArcPathTo( - int node) const { - std::vector output; + NodeIndex node) const { + std::vector output; int loop_detector = 0; while (true) { - DCHECK_GE(node, 0); - DCHECK_LT(node, parents_.size()); + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(parents_.size())); CHECK_LT(loop_detector++, parents_.size()); if (parents_[node] == node) break; output.push_back(arc_from_source_[node]); @@ -581,14 +617,14 @@ BoundedDijkstraWrapper::ArcPathTo( } template -std::vector +std::vector BoundedDijkstraWrapper::NodePathTo( - int node) const { - std::vector output; + NodeIndex node) const { + std::vector output; int loop_detector = 0; while (true) { - DCHECK_GE(node, 0); - DCHECK_LT(node, parents_.size()); + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(parents_.size())); CHECK_LT(loop_detector++, parents_.size()); output.push_back(node); if (parents_[node] == node) break; @@ -599,27 +635,28 @@ BoundedDijkstraWrapper::NodePathTo( } template -int BoundedDijkstraWrapper:: - SourceOfShortestPathToNode(int node) const { - int parent = node; +typename GraphType::NodeIndex BoundedDijkstraWrapper< + GraphType, DistanceType, + ArcLengthFunctor>::SourceOfShortestPathToNode(NodeIndex node) const { + NodeIndex parent = node; while (parents_[parent] != parent) parent = parents_[parent]; return parent; } template int BoundedDijkstraWrapper::GetSourceIndex(int node) const { - DCHECK_GE(node, 0); - DCHECK_LT(node, node_to_source_index_.size()); + ArcLengthFunctor>::GetSourceIndex(NodeIndex node) + const { + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(node_to_source_index_.size())); return node_to_source_index_[node]; } template -int BoundedDijkstraWrapper::GetDestinationIndex(int node) - const { - DCHECK_GE(node, 0); - DCHECK_LT(node, node_to_destination_index_.size()); +int BoundedDijkstraWrapper:: + GetDestinationIndex(NodeIndex node) const { + DCHECK_GE(node, NodeIndex(0)); + DCHECK_LT(node, NodeIndex(node_to_destination_index_.size())); return node_to_destination_index_[node]; } @@ -627,37 +664,38 @@ int BoundedDijkstraWrapper -std::pair> SimpleOneToOneShortestPath( - int source, int destination, absl::Span tails, - absl::Span heads, absl::Span lengths, +template +std::pair> SimpleOneToOneShortestPath( + NodeIndex source, NodeIndex destination, absl::Span tails, + absl::Span heads, absl::Span lengths, DistanceType limit) { + using ArcIndex = NodeIndex; // Compute the number of nodes. // // This is not necessary, but is a good practice to allocate the graph size in // one go. We also do some basic validation. CHECK_GE(source, 0); CHECK_GE(destination, 0); - int num_nodes = std::max(source + 1, destination + 1); - for (const int tail : tails) { + NodeIndex num_nodes = std::max(source + 1, destination + 1); + for (const NodeIndex tail : tails) { CHECK_GE(tail, 0); num_nodes = std::max(tail + 1, num_nodes); } - for (const int head : heads) { + for (const NodeIndex head : heads) { CHECK_GE(head, 0); num_nodes = std::max(head + 1, num_nodes); } // The number of arcs. - const int num_arcs = tails.size(); + const ArcIndex num_arcs = tails.size(); CHECK_EQ(num_arcs, heads.size()); CHECK_EQ(num_arcs, lengths.size()); // Build the graph. Note that this permutes arc indices for speed, but we // don't care here since we will return a node path. - util::StaticGraph<> graph(num_nodes, num_arcs); + util::StaticGraph graph(num_nodes, num_arcs); std::vector arc_lengths(lengths.begin(), lengths.end()); - for (int a = 0; a < num_arcs; ++a) { + for (ArcIndex a = 0; a < num_arcs; ++a) { // Negative length can cause the algo to loop forever and/or use a lot of // memory. So it should be validated. CHECK_GE(lengths[a], 0); diff --git a/ortools/graph/bounded_dijkstra_test.cc b/ortools/graph/bounded_dijkstra_test.cc index 07a21f5a8d..a5f256cce1 100644 --- a/ortools/graph/bounded_dijkstra_test.cc +++ b/ortools/graph/bounded_dijkstra_test.cc @@ -30,6 +30,7 @@ #include "gtest/gtest.h" #include "ortools/base/dump_vars.h" #include "ortools/base/gmock.h" +#include "ortools/base/strong_int.h" #include "ortools/graph/graph.h" #include "ortools/graph/graph_io.h" #include "ortools/graph/test_util.h" @@ -45,122 +46,140 @@ using ::testing::Pair; using ::testing::UnorderedElementsAreArray; using ::util::ListGraph; +DEFINE_STRONG_INT_TYPE(NodeIndex, int32_t); +DEFINE_STRONG_INT_TYPE(ArcIndex, int64_t); + +using TestGraph = ListGraph; +template +using DijkstraWrapper = BoundedDijkstraWrapper; + TEST(BoundedDijkstraWrapperDeathTest, Accessors) { - ListGraph<> graph; - graph.AddArc(1, 3); - std::vector arc_lengths = {2.5}; - BoundedDijkstraWrapper, float> dijkstra(&graph, &arc_lengths); + TestGraph graph; + graph.AddArc(NodeIndex(1), NodeIndex(3)); + DijkstraWrapper::ByArc arc_lengths = {2.5}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); const std::is_same same_type; ASSERT_TRUE(same_type.value); ASSERT_EQ(&dijkstra.graph(), &graph); - ASSERT_EQ(dijkstra.GetArcLength(0), 2.5); + ASSERT_EQ(dijkstra.GetArcLength(ArcIndex(0)), 2.5); } TEST(BoundedDijkstraWrapperDeathTest, WithArcLengthFunctor) { - ListGraph<> graph; - graph.AddArc(1, 3); - BoundedDijkstraWrapper, float, std::function> - dijkstra(&graph, [](int) { return 2.34; }); - ASSERT_FLOAT_EQ(dijkstra.GetArcLength(0), 2.34f); + TestGraph graph; + graph.AddArc(NodeIndex(1), NodeIndex(3)); + BoundedDijkstraWrapper> + dijkstra(&graph, [](ArcIndex) { return 2.34; }); + ASSERT_FLOAT_EQ(dijkstra.GetArcLength(ArcIndex(0)), 2.34f); } TEST(BoundedDijkstraWrapperDeathTest, ConstructorPreconditions) { - ListGraph<> graph; - for (int i = 0; i < 50; ++i) graph.AddArc(i, i + 1); + TestGraph graph; + for (int i = 0; i < 50; ++i) graph.AddArc(NodeIndex(i), NodeIndex(i + 1)); - std::vector arc_lengths(13, 0); - typedef BoundedDijkstraWrapper, int> TestedClass; + typedef DijkstraWrapper TestedClass; + TestedClass::ByArc arc_lengths(13, 0); EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "13"); arc_lengths.resize(50, 0); - arc_lengths[20] = -132; + arc_lengths[ArcIndex(20)] = -132; EXPECT_DEATH(new TestedClass(&graph, &arc_lengths), "-132"); } TEST(BoundedDijkstraWrapper, ArcPathToAndSourceOfShortestPathToNode) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2, 3, 4, 6, 5}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(1, 2); - graph.AddArc(2, 3); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2, 3, 4, 6, 5}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 10); - EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(9, dijkstra.distances()[3]); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(0, 2, 5)); - EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 10); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), + NodeIndex(3))); + EXPECT_EQ(9, dijkstra.distances()[NodeIndex(3)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(0), ArcIndex(2), ArcIndex(5))); + EXPECT_THAT( + dijkstra.NodePathTo(NodeIndex(3)), + ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); } TEST(BoundedDijkstraWrapper, EmptyPath) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2}; - graph.AddArc(0, 1); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 10); - EXPECT_THAT(reached, ElementsAre(0, 1)); + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 10); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1))); - EXPECT_EQ(0, dijkstra.distances()[0]); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre()); - EXPECT_THAT(dijkstra.NodePathTo(0), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0)); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(0)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre()); + EXPECT_THAT(dijkstra.NodePathTo(NodeIndex(0)), ElementsAre(NodeIndex(0))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); } TEST(BoundedDijkstraWrapper, OverflowSafe) { - ListGraph<> graph; + TestGraph graph; const int64_t int_max = std::numeric_limits::max(); - std::vector arc_lengths = {int_max, int_max / 2, int_max / 2, 1}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(2, 3); + DijkstraWrapper::ByArc arc_lengths = {int_max, int_max / 2, + int_max / 2, 1}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); - BoundedDijkstraWrapper, int64_t> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, int_max); + BoundedDijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), int_max); // This works because int_max is odd, i.e. 2 * (int_max / 2) = int_max - 1 - EXPECT_THAT(reached, ElementsAre(0, 1, 2)); - EXPECT_EQ(0, dijkstra.distances()[0]); - EXPECT_EQ(int_max / 2, dijkstra.distances()[1]); - EXPECT_EQ(int_max - 1, dijkstra.distances()[2]); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2))); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(0)]); + EXPECT_EQ(int_max / 2, dijkstra.distances()[NodeIndex(1)]); + EXPECT_EQ(int_max - 1, dijkstra.distances()[NodeIndex(2)]); } TEST(BoundedDijkstraWrapper, ArcPathToAndSourceOfShortestPathToNode_WithArcLengthFunction) { - ListGraph<> graph; - std::vector arc_lengths = {1, 2, 3, 4, 6, 5}; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - graph.AddArc(1, 2); - graph.AddArc(1, 2); - graph.AddArc(2, 3); - graph.AddArc(2, 3); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths = {1, 2, 3, 4, 6, 5}; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(1), NodeIndex(2)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); + graph.AddArc(NodeIndex(2), NodeIndex(3)); class MyArcLengthFunctor { public: - explicit MyArcLengthFunctor(const std::vector& arc_lengths) + explicit MyArcLengthFunctor( + const DijkstraWrapper::ByArc& arc_lengths) : arc_lengths_(arc_lengths) {} - int operator()(int arc) const { - return arc % 2 == 1 ? arc_lengths_[arc] : 100; + + int operator()(ArcIndex arc) const { + return arc.value() % 2 == 1 ? arc_lengths_[arc] : 100; } private: - const std::vector& arc_lengths_; + const DijkstraWrapper::ByArc& arc_lengths_; }; - BoundedDijkstraWrapper, int, MyArcLengthFunctor> dijkstra( + BoundedDijkstraWrapper dijkstra( &graph, MyArcLengthFunctor(arc_lengths)); - const std::vector reached = dijkstra.RunBoundedDijkstra(0, 20); - EXPECT_THAT(reached, ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(11, dijkstra.distances()[3]); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(1, 3, 5)); - EXPECT_THAT(dijkstra.NodePathTo(3), ElementsAre(0, 1, 2, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(0), 20); + EXPECT_THAT(reached, ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), + NodeIndex(3))); + EXPECT_EQ(11, dijkstra.distances()[NodeIndex(3)]); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(1), ArcIndex(3), ArcIndex(5))); + EXPECT_THAT( + dijkstra.NodePathTo(NodeIndex(3)), + ElementsAre(NodeIndex(0), NodeIndex(1), NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); } TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { @@ -168,12 +187,12 @@ TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { const int num_nodes = 50; std::vector> lengths(num_nodes, std::vector(num_nodes)); - ListGraph<> graph; - std::vector arc_lengths; + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; for (int i = 0; i < num_nodes; ++i) { for (int j = 0; j < num_nodes; ++j) { lengths[i][j] = (i == j) ? 0 : absl::Uniform(random, 0, 1000); - graph.AddArc(i, j); + graph.AddArc(NodeIndex(i), NodeIndex(j)); arc_lengths.push_back(lengths[i][j]); } } @@ -191,15 +210,15 @@ TEST(BoundedDijkstraWrapperTest, RandomDenseGraph) { std::vector reached_sizes; for (int source = 0; source < num_nodes; ++source) { const int limit = 100; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - const std::vector reached = dijkstra.RunBoundedDijkstra(source, limit); - for (const int node : reached) { + DijkstraWrapper dijkstra(&graph, &arc_lengths); + const auto reached = dijkstra.RunBoundedDijkstra(NodeIndex(source), limit); + for (const NodeIndex node : reached) { EXPECT_LT(dijkstra.distances()[node], limit); - EXPECT_EQ(dijkstra.distances()[node], lengths[source][node]); + EXPECT_EQ(dijkstra.distances()[node], lengths[source][node.value()]); // Check that we never have the same node twice in the paths. - std::vector path = {node}; - int parent = node; + std::vector path = {node}; + NodeIndex parent = node; while (dijkstra.parents()[parent] != parent) { parent = dijkstra.parents()[parent]; path.push_back(parent); @@ -230,7 +249,7 @@ TEST(SimpleOneToOneShortestPathTest, PathTooLong) { { const auto [distance, path] = - SimpleOneToOneShortestPath(0, 3, tails, heads, lengths); + SimpleOneToOneShortestPath(0, 3, tails, heads, lengths); EXPECT_EQ(distance, std::numeric_limits::max()); EXPECT_TRUE(path.empty()); } @@ -238,7 +257,7 @@ TEST(SimpleOneToOneShortestPathTest, PathTooLong) { { // from 0 to 2 work because 2 * big_length < int_max. const auto [distance, path] = - SimpleOneToOneShortestPath(0, 2, tails, heads, lengths); + SimpleOneToOneShortestPath(0, 2, tails, heads, lengths); EXPECT_EQ(distance, std::numeric_limits::max() - 1); EXPECT_THAT(path, ElementsAre(0, 1, 2)); } @@ -256,7 +275,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // This will be the "sparse" representation. std::vector tails; std::vector heads; - std::vector arc_lengths; + DijkstraWrapper::ByArc arc_lengths; // We permutes the arc order to properly test that it do not matter. std::vector nodes(num_nodes); @@ -292,8 +311,8 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // No limit. There should always be a path with our generated data. { - const auto [distance, path] = - SimpleOneToOneShortestPath(from, to, tails, heads, arc_lengths); + const auto [distance, path] = SimpleOneToOneShortestPath( + from, to, tails, heads, arc_lengths); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_FALSE(path.empty()); EXPECT_EQ(path.front(), from); @@ -302,7 +321,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // A limit of shortest_distance[from][to] + 1 works too. { - const auto [distance, path] = SimpleOneToOneShortestPath( + const auto [distance, path] = SimpleOneToOneShortestPath( from, to, tails, heads, arc_lengths, shortest_distance[from][to] + 1); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_FALSE(path.empty()); @@ -312,7 +331,7 @@ TEST(SimpleOneToOneShortestPathTest, Random) { // But a limit of shortest_distance[from][to] should fail. { - const auto [distance, path] = SimpleOneToOneShortestPath( + const auto [distance, path] = SimpleOneToOneShortestPath( from, to, tails, heads, arc_lengths, shortest_distance[from][to]); EXPECT_EQ(distance, shortest_distance[from][to]); EXPECT_TRUE(path.empty()); @@ -321,101 +340,116 @@ TEST(SimpleOneToOneShortestPathTest, Random) { } TEST(BoundedDijkstraWrapperTest, MultiRunsOverDynamicGraphAndLengths) { - ListGraph<> graph; - graph.AddArc(0, 1); - graph.AddArc(0, 1); - std::vector arc_lengths = {4, 3}; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + TestGraph graph; + graph.AddArc(NodeIndex(0), NodeIndex(1)); + graph.AddArc(NodeIndex(0), NodeIndex(1)); + DijkstraWrapper::ByArc arc_lengths = {4, 3}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 5), ElementsAre(0, 1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(1)); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 5), + ElementsAre(NodeIndex(0), NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), ElementsAre(ArcIndex(1))); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 2), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(0)); - EXPECT_THAT(dijkstra.ArcPathTo(0), IsEmpty()); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 2), + ElementsAre(NodeIndex(0))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), IsEmpty()); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(1, 99), ElementsAre(1)); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty()); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(1), 99), + ElementsAre(NodeIndex(1))); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), IsEmpty()); // Add some arcs and nodes... - graph.AddArc(0, 2); + graph.AddArc(NodeIndex(0), NodeIndex(2)); arc_lengths.push_back(1); - graph.AddArc(1, 2); + graph.AddArc(NodeIndex(1), NodeIndex(2)); arc_lengths.push_back(0); - graph.AddArc(2, 1); + graph.AddArc(NodeIndex(2), NodeIndex(1)); arc_lengths.push_back(1); - graph.AddArc(1, 3); + graph.AddArc(NodeIndex(1), NodeIndex(3)); arc_lengths.push_back(5); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 10), ElementsAre(0, 2, 1, 3)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(3)); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(2, 4, 5)); + EXPECT_THAT( + dijkstra.RunBoundedDijkstra(NodeIndex(0), 10), + ElementsAre(NodeIndex(0), NodeIndex(2), NodeIndex(1), NodeIndex(3))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), + ElementsAre(ArcIndex(2), ArcIndex(4), ArcIndex(5))); - EXPECT_THAT(dijkstra.RunBoundedDijkstra(0, 6), ElementsAre(0, 2, 1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(2, 4)); + EXPECT_THAT(dijkstra.RunBoundedDijkstra(NodeIndex(0), 6), + ElementsAre(NodeIndex(0), NodeIndex(2), NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), + ElementsAre(ArcIndex(2), ArcIndex(4))); } TEST(BoundedDijkstraWrapperTest, MultipleSources) { // Use this graph. Source nodes have their initial distance in [ ]. // // N1[0] --(2)--> N0[4] --(1)--> N2 --(5)--> N3 <--(4)-- N4[3] --(5)--> N5 - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(1, 0); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(1), NodeIndex(0)); arc_lengths.push_back(2); - graph.AddArc(0, 2); + graph.AddArc(NodeIndex(0), NodeIndex(2)); arc_lengths.push_back(1); - graph.AddArc(2, 3); + graph.AddArc(NodeIndex(2), NodeIndex(3)); arc_lengths.push_back(5); - graph.AddArc(4, 3); + graph.AddArc(NodeIndex(4), NodeIndex(3)); arc_lengths.push_back(4); - graph.AddArc(4, 5); + graph.AddArc(NodeIndex(4), NodeIndex(5)); arc_lengths.push_back(5); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); // The distance limit is exclusive, so we can't reach Node 5. ASSERT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{1, 0}, {0, 4}, {4, 3}}, 8), + {{NodeIndex(1), 0}, {NodeIndex(0), 4}, {NodeIndex(4), 3}}, 8), // The order is deterministic: node 4 comes before node 2, despite // having equal distance and higher index, because it's a source. - ElementsAre(1, 0, 4, 2, 3)); - EXPECT_EQ(2, dijkstra.distances()[0]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(0)); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre(0)); - EXPECT_EQ(0, dijkstra.distances()[1]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), IsEmpty()); - EXPECT_EQ(3, dijkstra.distances()[2]); - EXPECT_EQ(1, dijkstra.SourceOfShortestPathToNode(2)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1)); - EXPECT_EQ(7, dijkstra.distances()[3]); - EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(3)); - EXPECT_THAT(dijkstra.ArcPathTo(3), ElementsAre(3)); - EXPECT_EQ(3, dijkstra.distances()[4]); - EXPECT_EQ(4, dijkstra.SourceOfShortestPathToNode(4)); - EXPECT_THAT(dijkstra.ArcPathTo(4), IsEmpty()); + ElementsAre(NodeIndex(1), NodeIndex(0), NodeIndex(4), + NodeIndex(2), NodeIndex(3))); + EXPECT_EQ(2, dijkstra.distances()[NodeIndex(0)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(0))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre(ArcIndex(0))); + EXPECT_EQ(0, dijkstra.distances()[NodeIndex(1)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), IsEmpty()); + EXPECT_EQ(3, dijkstra.distances()[NodeIndex(2)]); + EXPECT_EQ(NodeIndex(1), dijkstra.SourceOfShortestPathToNode(NodeIndex(2))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), + ElementsAre(ArcIndex(0), ArcIndex(1))); + EXPECT_EQ(7, dijkstra.distances()[NodeIndex(3)]); + EXPECT_EQ(NodeIndex(4), dijkstra.SourceOfShortestPathToNode(NodeIndex(3))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(3)), ElementsAre(ArcIndex(3))); + EXPECT_EQ(3, dijkstra.distances()[NodeIndex(4)]); + EXPECT_EQ(NodeIndex(4), dijkstra.SourceOfShortestPathToNode(NodeIndex(4))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(4)), IsEmpty()); } TEST(BoundedDijkstraWrapperTest, SourcesAtOrBeyondDistanceLimitAreNotReached) { - ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/0); - std::vector arc_lengths; // No arcs. - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{0, 10}, {1, 11}, {2, 12}, {3, 13}}, 12), - ElementsAre(0, 1)); + TestGraph graph(/*num_nodes=*/NodeIndex(5), /*arc_capacity=*/ArcIndex(0)); + DijkstraWrapper::ByArc arc_lengths; // No arcs. + DijkstraWrapper dijkstra(&graph, &arc_lengths); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraFromMultipleSources({{NodeIndex(0), 10}, + {NodeIndex(1), 11}, + {NodeIndex(2), 12}, + {NodeIndex(3), 13}}, + 12), + ElementsAre(NodeIndex(0), NodeIndex(1))); } TEST(BoundedDijkstraWrapperTest, SourcesListedMultipleTimesKeepsMinDistance) { - ListGraph<> graph(/*num_nodes=*/5, /*arc_capacity=*/1); - graph.AddArc(1, 3); - std::vector arc_lengths = {20}; - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); - EXPECT_THAT(dijkstra.RunBoundedDijkstraFromMultipleSources( - {{1, 12}, {1, 10}, {1, 14}}, 31), - ElementsAre(1, 3)); - EXPECT_EQ(dijkstra.distances()[3], 30); + TestGraph graph(/*num_nodes=*/NodeIndex(5), /*arc_capacity=*/ArcIndex(1)); + graph.AddArc(NodeIndex(1), NodeIndex(3)); + DijkstraWrapper::ByArc arc_lengths = {20}; + DijkstraWrapper dijkstra(&graph, &arc_lengths); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraFromMultipleSources( + {{NodeIndex(1), 12}, {NodeIndex(1), 10}, {NodeIndex(1), 14}}, 31), + ElementsAre(NodeIndex(1), NodeIndex(3))); + EXPECT_EQ(dijkstra.distances()[NodeIndex(3)], 30); } TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { @@ -430,38 +464,45 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { // `------(0)-----' // // The shortest path is S0->D1->N5->D4, of distance 2 + 3 + 1 + 1 + 1 = 8. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(0, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(0), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(2, 3); + graph.AddArc(NodeIndex(2), NodeIndex(3)); arc_lengths.push_back(3); - graph.AddArc(1, 5); + graph.AddArc(NodeIndex(1), NodeIndex(5)); arc_lengths.push_back(1); - graph.AddArc(3, 5); + graph.AddArc(NodeIndex(3), NodeIndex(5)); arc_lengths.push_back(0); - graph.AddArc(5, 3); + graph.AddArc(NodeIndex(5), NodeIndex(3)); arc_lengths.push_back(0); - graph.AddArc(5, 4); + graph.AddArc(NodeIndex(5), NodeIndex(4)); arc_lengths.push_back(1); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); // Repeat the same source and destination multiple times, to verify that // it's supported. - std::vector> sources = {{0, 5}, {2, 4}, {0, 2}, {0, 9}}; - std::vector> destinations = { - {1, 7}, {4, 5}, {3, 3}, {4, 1}, {4, 3}}; + std::vector> sources = {{NodeIndex(0), 5}, + {NodeIndex(2), 4}, + {NodeIndex(0), 2}, + {NodeIndex(0), 9}}; + std::vector> destinations = {{NodeIndex(1), 7}, + {NodeIndex(4), 5}, + {NodeIndex(3), 3}, + {NodeIndex(4), 1}, + {NodeIndex(4), 3}}; EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/1000), - Contains(4)); - EXPECT_EQ(2 + 3 + 1 + 1, dijkstra.distances()[4]); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(4)); - EXPECT_THAT(dijkstra.ArcPathTo(4), - ElementsAre(/*0->1*/ 0, /*1->5*/ 2, /*5->4*/ 5)); - EXPECT_EQ(2, dijkstra.GetSourceIndex(0)); - EXPECT_EQ(3, dijkstra.GetDestinationIndex(4)); + Contains(NodeIndex(4))); + EXPECT_EQ(2 + 3 + 1 + 1, dijkstra.distances()[NodeIndex(4)]); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(4))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(4)), + ElementsAre(/*0->1*/ ArcIndex(0), /*1->5*/ ArcIndex(2), + /*5->4*/ ArcIndex(5))); + EXPECT_EQ(2, dijkstra.GetSourceIndex(NodeIndex(0))); + EXPECT_EQ(3, dijkstra.GetDestinationIndex(NodeIndex(4))); // Run it with a limit too small: it'll fail to discover any destination. EXPECT_THAT( @@ -475,18 +516,20 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/2, /*distance_limit=*/9), // Limit is exclusive. - ElementsAre(4)); + ElementsAre(NodeIndex(4))); // Slightly modify the graph and try again. We want a case where the best // destination isn't the one with the smallest distance offset. - destinations.push_back({1, 2}); // D1 will be the closest destination now. + destinations.push_back( + {NodeIndex(1), 2}); // D1 will be the closest destination now. EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/8), // Limit is exclusive. - ElementsAre(1)); - EXPECT_EQ(0, dijkstra.SourceOfShortestPathToNode(1)); - EXPECT_THAT(dijkstra.ArcPathTo(1), ElementsAre(/*0->1*/ 0)); + ElementsAre(NodeIndex(1))); + EXPECT_EQ(NodeIndex(0), dijkstra.SourceOfShortestPathToNode(NodeIndex(1))); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(1)), + ElementsAre(/*0->1*/ ArcIndex(0))); // Corner case: run with no destinations. EXPECT_THAT( @@ -505,8 +548,8 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { // Call Get{Source,Destination}Index() on nodes that aren't sources or // destinations. This returns junk; so we don't check the returned values, // but we do check that it doesn't crash. - dijkstra.GetDestinationIndex(4); - dijkstra.GetSourceIndex(1); + dijkstra.GetDestinationIndex(NodeIndex(4)); + dijkstra.GetSourceIndex(NodeIndex(1)); // Setting num_reached_destinations=1 now should make '1' the only reachable // destination, even if the limit is infinite. @@ -514,85 +557,88 @@ TEST(BoundedDijkstraWrapperTest, MultipleSourcesMultipleDestinations) { dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1, /*distance_limit=*/1000), - ElementsAre(1)); + ElementsAre(NodeIndex(1))); // Verify that if we set the number of destinations to infinity, they're all // explored, and the search still stops before exploring the whole graph. To // do that, we add one extra arc that's beyond the farthest destination's // distance (including its destination offset), i.e. 1 (distance 2+3+7 = 12). - graph.AddArc(5, 6); + graph.AddArc(NodeIndex(5), NodeIndex(6)); arc_lengths.push_back(2); - graph.AddArc(6, 7); + graph.AddArc(NodeIndex(6), NodeIndex(7)); arc_lengths.push_back(0); EXPECT_THAT( dijkstra.RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations( sources, destinations, /*num_destinations_to_reach=*/1000, /*distance_limit=*/1000), - ElementsAre(1, 4, 3)); - EXPECT_GE(dijkstra.distances()[1], 5); - EXPECT_GE(dijkstra.distances()[4], 7); - EXPECT_GE(dijkstra.distances()[3], 6); + ElementsAre(NodeIndex(1), NodeIndex(4), NodeIndex(3))); + EXPECT_GE(dijkstra.distances()[NodeIndex(1)], 5); + EXPECT_GE(dijkstra.distances()[NodeIndex(4)], 7); + EXPECT_GE(dijkstra.distances()[NodeIndex(3)], 6); // To verify that node #7 isn't reached, we can check its distance, which will // still be set to the initialized "distance_limit - min_destination_offset". - EXPECT_GE(dijkstra.distances()[7], 1000 - 1); + EXPECT_GE(dijkstra.distances()[NodeIndex(7)], 1000 - 1); } TEST(BoundedDijkstraWrapperTest, OneToOneShortestPath) { // Since we already tested the multiple sources - multiple destinations // variant, we only need to test the "plumbing" here. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(0, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(0), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(1, 2); + graph.AddArc(NodeIndex(1), NodeIndex(2)); arc_lengths.push_back(2); - BoundedDijkstraWrapper, int> dijkstra(&graph, &arc_lengths); + DijkstraWrapper dijkstra(&graph, &arc_lengths); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 2, 6)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(0, 1)); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(2), 6)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), + ElementsAre(ArcIndex(0), ArcIndex(1))); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(0, 0, 1)); - EXPECT_THAT(dijkstra.ArcPathTo(0), ElementsAre()); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(0), 1)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(0)), ElementsAre()); - EXPECT_TRUE(dijkstra.OneToOneShortestPath(1, 2, 3)); - EXPECT_THAT(dijkstra.ArcPathTo(2), ElementsAre(1)); + EXPECT_TRUE(dijkstra.OneToOneShortestPath(NodeIndex(1), NodeIndex(2), 3)); + EXPECT_THAT(dijkstra.ArcPathTo(NodeIndex(2)), ElementsAre(ArcIndex(1))); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 2, 5)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(0, 0, 0)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(1, 2, 2)); - EXPECT_FALSE(dijkstra.OneToOneShortestPath(2, 1, 1000)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(2), 5)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(0), NodeIndex(0), 0)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(1), NodeIndex(2), 2)); + EXPECT_FALSE(dijkstra.OneToOneShortestPath(NodeIndex(2), NodeIndex(0), 1000)); } TEST(BoundedDijkstraWrapperTest, CustomSettledNodeCallback) { // A small chain: 8 --[3]--> 1 --[2]--> 42 --[3]--> 3 --[2]--> 4. - ListGraph<> graph; - std::vector arc_lengths; - graph.AddArc(8, 1); + TestGraph graph; + DijkstraWrapper::ByArc arc_lengths; + graph.AddArc(NodeIndex(8), NodeIndex(1)); arc_lengths.push_back(3); - graph.AddArc(1, 42); + graph.AddArc(NodeIndex(1), NodeIndex(42)); arc_lengths.push_back(2); - graph.AddArc(42, 3); + graph.AddArc(NodeIndex(42), NodeIndex(3)); arc_lengths.push_back(3); - graph.AddArc(3, 4); + graph.AddArc(NodeIndex(3), NodeIndex(4)); arc_lengths.push_back(2); - typedef BoundedDijkstraWrapper, int> DijkstraType; + typedef DijkstraWrapper DijkstraType; DijkstraType dijkstra(&graph, &arc_lengths); // Tracks each NodeDistance it's called on, and sets the distance limit // to 10 if it gets called on node 42. - std::vector> settled_node_dists; - auto callback = [&settled_node_dists](int node, int distance, + std::vector> settled_node_dists; + auto callback = [&settled_node_dists](NodeIndex node, int distance, int* distance_limit) { settled_node_dists.push_back({node, distance}); - if (node == 42) *distance_limit = 10; + if (node == NodeIndex(42)) *distance_limit = 10; }; - EXPECT_THAT(dijkstra.RunBoundedDijkstraWithSettledNodeCallback({{8, 0}}, - callback, 999), - ElementsAre(8, 1, 42, 3)); + EXPECT_THAT( + dijkstra.RunBoundedDijkstraWithSettledNodeCallback({{NodeIndex(8), 0}}, + callback, 999), + ElementsAre(NodeIndex(8), NodeIndex(1), NodeIndex(42), NodeIndex(3))); EXPECT_THAT(settled_node_dists, - ElementsAre(Pair(8, 0), Pair(1, 3), Pair(42, 5), Pair(3, 8))); + ElementsAre(Pair(NodeIndex(8), 0), Pair(NodeIndex(1), 3), + Pair(NodeIndex(42), 5), Pair(NodeIndex(3), 8))); } TEST(BoundedDisjktraTest, RandomizedStressTest) { @@ -601,49 +647,51 @@ TEST(BoundedDisjktraTest, RandomizedStressTest) { constexpr int kint32max = std::numeric_limits::max(); for (int test = 0; test < kNumTests; ++test) { // Generate a random graph with random weights. - const int num_nodes = absl::Uniform(random, 1, 12); - const int num_arcs = - absl::Uniform(absl::IntervalClosed, random, 0, - std::min(num_nodes * (num_nodes - 1), 15)); - ListGraph<> graph(num_nodes, num_arcs); - for (int a = 0; a < num_arcs; ++a) { - graph.AddArc(absl::Uniform(random, 0, num_nodes), - absl::Uniform(random, 0, num_nodes)); + const NodeIndex num_nodes(absl::Uniform(random, 1, 12)); + const ArcIndex num_arcs(absl::Uniform( + absl::IntervalClosed, random, 0, + std::min(num_nodes.value() * (num_nodes.value() - 1), 15))); + TestGraph graph(num_nodes, num_arcs); + for (ArcIndex a(0); a < num_arcs; ++a) { + graph.AddArc(NodeIndex(absl::Uniform(random, 0, num_nodes.value())), + NodeIndex(absl::Uniform(random, 0, num_nodes.value()))); } - std::vector lengths(num_arcs); + DijkstraWrapper::ByArc lengths(num_arcs); for (int& w : lengths) w = absl::Uniform(random, 0, 5); // Run Floyd-Warshall as a 'reference' shortest path algorithm. - FlatMatrix ref_dist(num_nodes, num_nodes, kint32max); - for (int a = 0; a < num_arcs; ++a) { - int& d = ref_dist[graph.Tail(a)][graph.Head(a)]; + FlatMatrix ref_dist(num_nodes.value(), num_nodes.value(), kint32max); + for (ArcIndex a(0); a < num_arcs; ++a) { + int& d = ref_dist[graph.Tail(a).value()][graph.Head(a).value()]; if (lengths[a] < d) d = lengths[a]; } - for (int node = 0; node < num_nodes; ++node) { - ref_dist[node][node] = 0; + for (NodeIndex node(0); node < num_nodes; ++node) { + ref_dist[node.value()][node.value()] = 0; } - for (int k = 0; k < num_nodes; ++k) { - for (int i = 0; i < num_nodes; ++i) { - for (int j = 0; j < num_nodes; ++j) { + for (NodeIndex k(0); k < num_nodes; ++k) { + for (NodeIndex i(0); i < num_nodes; ++i) { + for (NodeIndex j(0); j < num_nodes; ++j) { const int64_t dist_through_k = - static_cast(ref_dist[i][k]) + ref_dist[k][j]; - if (dist_through_k < ref_dist[i][j]) ref_dist[i][j] = dist_through_k; + static_cast(ref_dist[i.value()][k.value()]) + + ref_dist[k.value()][j.value()]; + if (dist_through_k < ref_dist[i.value()][j.value()]) + ref_dist[i.value()][j.value()] = dist_through_k; } } } // Compute the graph's largest distance below kint32max. int max_distance = 0; - for (int i = 0; i < num_nodes; ++i) { - for (int j = 0; j < num_nodes; ++j) { - const int d = ref_dist[i][j]; + for (NodeIndex i(0); i < num_nodes; ++i) { + for (NodeIndex j(0); j < num_nodes; ++j) { + const int d = ref_dist[i.value()][j.value()]; if (d != kint32max && d > max_distance) max_distance = d; } } // Now, run some Dijkstras and verify that they match. To balance out the // FW (Floyd-Warshall) which is O(N³), we run more than one Dijkstra per FW. - BoundedDijkstraWrapper, int> dijkstra(&graph, &lengths); + DijkstraWrapper dijkstra(&graph, &lengths); for (int num_dijkstra = 0; num_dijkstra < 20; ++num_dijkstra) { // Draw the distance limit. const int limit = @@ -652,33 +700,34 @@ TEST(BoundedDisjktraTest, RandomizedStressTest) { : absl::Uniform(absl::IntervalClosed, random, 0, max_distance); // Draw sources (*with* repetition) with initial distances. const int num_sources = absl::Uniform(random, 1, 5); - std::vector> sources(num_sources); + std::vector> sources(num_sources); for (auto& [s, dist] : sources) { - s = absl::Uniform(random, 0, num_nodes); + s = NodeIndex(absl::Uniform(random, 0, num_nodes.value())); dist = absl::Uniform(absl::IntervalClosed, random, 0, max_distance + 1); } // Precompute the reference minimum distance to each node (using any of // the sources), and the expected reached nodes: any node whose distance // is < limit. That includes the sources: if a source's initial distance // is ≥ limit, it won't be reached.That includes the source themselves. - std::vector node_min_dist(num_nodes, kint32max); - std::vector expected_reached_nodes; - for (int node = 0; node < num_nodes; ++node) { + DijkstraWrapper::ByNode node_min_dist(num_nodes, kint32max); + DijkstraWrapper::ByNode expected_reached_nodes; + for (NodeIndex node(0); node < num_nodes; ++node) { int min_dist = kint32max; for (const auto& [src, dist] : sources) { // Cast to int64_t to avoid overflows. min_dist = std::min( - min_dist, static_cast(ref_dist[src][node]) + dist); + min_dist, + static_cast(ref_dist[src.value()][node.value()]) + dist); } node_min_dist[node] = min_dist; if (min_dist < limit) expected_reached_nodes.push_back(node); } - const std::vector reached_nodes = + const auto reached_nodes = dijkstra.RunBoundedDijkstraFromMultipleSources(sources, limit); EXPECT_THAT(reached_nodes, UnorderedElementsAreArray(expected_reached_nodes)); - for (const int node : reached_nodes) { + for (const NodeIndex node : reached_nodes) { EXPECT_EQ(dijkstra.distances()[node], node_min_dist[node]) << node; } ASSERT_FALSE(HasFailure()) @@ -697,7 +746,8 @@ void BM_GridGraph(benchmark::State& state) { const int kSourceNode = static_cast(kWidth * kHeight / 2); std::unique_ptr graph = util::Create2DGridGraph(/*width=*/kWidth, /*height=*/kHeight); - std::vector arc_lengths(graph->num_arcs(), 0); + BoundedDijkstraWrapper::ByArc arc_lengths( + graph->num_arcs(), 0); const int64_t min_length = arc_lengths_are_discrete ? 0 : 1; const int64_t max_length = arc_lengths_are_discrete ? 2 : 1000000000000000L; std::mt19937 random(12345); diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index 73af4c944a..c8b7ef0b83 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -420,6 +420,8 @@ class Vector : public std::vector { template class SVector { public: + using value_type = T; + SVector() : base_(nullptr), size_(0), capacity_(0) {} ~SVector() { clear_and_dealloc(); } @@ -434,7 +436,7 @@ class SVector { capacity_ = other.size_; base_ = Allocate(capacity_); CHECK(base_ != nullptr); - base_ += capacity_; + base_ += static_cast(capacity_); } else { // capacity_ >= other.size clear(); } @@ -488,6 +490,9 @@ class SVector { T* data() const { return base_; } + const T* begin() const { return base_; } + const T* end() const { return base_ + static_cast(size_); } + void swap(SVector& x) noexcept { std::swap(base_, x.base_); std::swap(size_, x.size_); @@ -564,8 +569,9 @@ class SVector { // Copies other.base_ to base_ in this SVector. Safe for all types as it uses // constructor for each entry. void CopyInternal(const SVector& other, std::false_type) { - for (int i = -size_; i < size_; ++i) { - new (base_ + i) T(other.base_[i]); + for (IndexT i = -size_; i < size_; ++i) { + new (base_ + static_cast(i)) + T(other.base_[static_cast(i)]); } } @@ -1091,41 +1097,21 @@ class ReverseArcStaticGraph // TODO(user): consider slower but more memory efficient implementations that // follow the cycles of the permutation and use a bitmap to indicate what has // been permuted or to mark the beginning of each cycle. - -// Some compiler do not know typeof(), so we have to use this extra function -// internally. -template -void PermuteWithExplicitElementType(const IntVector& permutation, - Array& array_to_permute, - ElementType unused) { - std::vector temp(permutation.size()); - for (size_t i = 0; i < permutation.size(); ++i) { - temp[i] = array_to_permute[i]; - } - for (size_t i = 0; i < permutation.size(); ++i) { - array_to_permute[static_cast(permutation[i])] = temp[i]; - } -} - template void Permute(const IntVector& permutation, Array* array_to_permute) { if (permutation.empty()) { return; } - PermuteWithExplicitElementType(permutation, *array_to_permute, - (*array_to_permute)[0]); -} - -// We need a specialization for vector, because the default code uses -// (*array_to_permute)[0] as ElementType, which isn't 'bool' in that case. -template -void Permute(const IntVector& permutation, - std::vector* array_to_permute) { - if (permutation.empty()) { - return; + const auto size = permutation.size(); + auto& array = *array_to_permute; + using ElementType = + typename std::iterator_traits::value_type; + std::vector temp(size); + auto array_begin = std::begin(array); + std::copy_n(array_begin, size, temp.begin()); + for (size_t i = 0; i < permutation.size(); ++i) { + *(array_begin + static_cast(permutation[i])) = temp[i]; } - bool unused = false; - PermuteWithExplicitElementType(permutation, *array_to_permute, unused); } // BaseGraph implementation ---------------------------------------------------- diff --git a/ortools/graph/graph_io.h b/ortools/graph/graph_io.h index ffe455028e..82a2002a5e 100644 --- a/ortools/graph/graph_io.h +++ b/ortools/graph/graph_io.h @@ -97,12 +97,12 @@ std::string GraphToString(const Graph& graph, GraphToStringFormat format) { } else { // PRINT_GRAPH_ADJACENCY_LISTS[_SORTED] adj.clear(); for (const typename Graph::ArcIndex arc : graph.OutgoingArcs(node)) { - adj.push_back(graph.Head(arc)); + adj.push_back(static_cast(graph.Head(arc))); } if (format == PRINT_GRAPH_ADJACENCY_LISTS_SORTED) { std::sort(adj.begin(), adj.end()); } - if (node != 0) out += '\n'; + if (node != typename Graph::NodeIndex(0)) out += '\n'; absl::StrAppend(&out, static_cast(node), ": ", absl::StrJoin(adj, " ")); } diff --git a/ortools/graph/graph_test.cc b/ortools/graph/graph_test.cc index 29e85240e2..e690197385 100644 --- a/ortools/graph/graph_test.cc +++ b/ortools/graph/graph_test.cc @@ -24,6 +24,7 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/log/check.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" @@ -32,9 +33,11 @@ #include "gtest/gtest.h" #include "ortools/base/gmock.h" #include "ortools/base/strong_int.h" +#include "ortools/base/strong_vector.h" namespace util { +using testing::ElementsAre; using testing::Pair; using testing::UnorderedElementsAre; @@ -289,98 +292,144 @@ void ConstructAndCheckGraph( // Return the size of the memory block allocated by malloc when asking for x // bytes. -inline int UpperBoundOfMallocBlockSizeOf(int x) { +template +inline IndexType UpperBoundOfMallocBlockSizeOf(IndexType x) { // Note(user): as of 2012-09, the rule seems to be: round x up to the // next multiple of 16. // WARNING: This may change, and may already be wrong for small values. - return 16 * ((x + 15) / 16); + return IndexType((16 * (static_cast(x) + 15)) / 16); } -TEST(SVectorTest, DynamicGrowth) { - internal::SVector v; - EXPECT_EQ(0, v.size()); - EXPECT_EQ(0, v.capacity()); - for (int i = 0; i < 100; i++) { +template +class SVectorTest : public ::testing::Test {}; + +typedef ::testing::Types, std::pair, + std::pair, + std::pair> + TestSVectorIndexTypes; + +TYPED_TEST_SUITE(SVectorTest, TestSVectorIndexTypes); + +TYPED_TEST(SVectorTest, CopyMoveIterate) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + using VectorT = internal::SVector; + VectorT v; + v.resize(IndexT(2)); + v[IndexT(0)] = ValueT(1); + v[IndexT(1)] = ValueT(2); + + { + EXPECT_THAT(VectorT(v), ElementsAre(ValueT(1), ValueT(2))); + VectorT v2 = v; + EXPECT_THAT(v2, ElementsAre(ValueT(1), ValueT(2))); + EXPECT_THAT(v, ElementsAre(ValueT(1), ValueT(2))); + } + + { + VectorT v2 = std::move(v); + EXPECT_THAT(v2, ElementsAre(ValueT(1), ValueT(2))); + EXPECT_THAT(VectorT(std::move(v2)), ElementsAre(ValueT(1), ValueT(2))); + } +} + +TYPED_TEST(SVectorTest, DynamicGrowth) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + EXPECT_EQ(IndexT(0), v.size()); + EXPECT_EQ(IndexT(0), v.capacity()); + for (ValueT i(0); i < ValueT(100); i++) { v.grow(-i, i); } - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { - EXPECT_EQ(-i, v[~i]); - EXPECT_EQ(i, v[i]); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(100); ++i) { + EXPECT_EQ(ValueT(static_cast(-i)), v[~i]); + EXPECT_EQ(ValueT(static_cast(i)), v[i]); } } -TEST(SVectorTest, Reserve) { - internal::SVector v; - v.reserve(100); - EXPECT_EQ(0, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { +TYPED_TEST(SVectorTest, Reserve) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.reserve(IndexT(100)); + EXPECT_EQ(IndexT(0), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (ValueT i(0); i < ValueT(100); i++) { v.grow(-i, i); } - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 10; i++) { - EXPECT_EQ(-i, v[~i]); - EXPECT_EQ(i, v[i]); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(10); i++) { + EXPECT_EQ(ValueT(static_cast(-i)), v[~i]); + EXPECT_EQ(ValueT(static_cast(i)), v[i]); } } -TEST(SVectorTest, Resize) { - internal::SVector v; - v.resize(100); - EXPECT_EQ(100, v.size()); - EXPECT_GE(v.capacity(), 100); - EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(100)); - for (int i = 0; i < 100; i++) { - EXPECT_EQ(0, v[-i - 1]); - EXPECT_EQ(0, v[i]); +TYPED_TEST(SVectorTest, Resize) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.resize(IndexT(100)); + EXPECT_EQ(IndexT(100), v.size()); + EXPECT_GE(v.capacity(), IndexT(100)); + EXPECT_LE(v.capacity(), UpperBoundOfMallocBlockSizeOf(IndexT(100))); + for (IndexT i(0); i < IndexT(100); ++i) { + EXPECT_EQ(ValueT(0), v[-i - IndexT(1)]); + EXPECT_EQ(ValueT(0), v[i]); } } -TEST(SVectorTest, ResizeToZero) { - internal::SVector s; - s.resize(1); - s.resize(0); - EXPECT_EQ(0, s.size()); +TYPED_TEST(SVectorTest, ResizeToZero) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector v; + v.resize(IndexT(1)); + v.resize(IndexT(0)); + EXPECT_EQ(IndexT(0), v.size()); } -TEST(SVectorTest, Swap) { - internal::SVector s; - internal::SVector t; - s.resize(1); - s[0] = 's'; - s[-1] = 's'; - t.resize(2); - for (int i = -2; i <= 1; ++i) { - t[i] = 't'; +TYPED_TEST(SVectorTest, Swap) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector s; + internal::SVector t; + s.resize(IndexT(1)); + s[IndexT(0)] = ValueT('s'); + s[IndexT(-1)] = ValueT('s'); + t.resize(IndexT(2)); + for (IndexT i(-2); i <= IndexT(1); ++i) { + t[i] = ValueT('t'); } s.swap(t); - EXPECT_EQ(1, t.size()); - EXPECT_EQ('s', t[-1]); - EXPECT_EQ('s', t[0]); - EXPECT_EQ(2, s.size()); - EXPECT_EQ('t', s[-2]); - EXPECT_EQ('t', s[-1]); - EXPECT_EQ('t', s[0]); - EXPECT_EQ('t', s[1]); + EXPECT_EQ(IndexT(1), t.size()); + EXPECT_EQ(ValueT('s'), t[IndexT(-1)]); + EXPECT_EQ(ValueT('s'), t[IndexT(0)]); + EXPECT_EQ(IndexT(2), s.size()); + EXPECT_EQ(ValueT('t'), s[IndexT(-2)]); + EXPECT_EQ(ValueT('t'), s[IndexT(-1)]); + EXPECT_EQ(ValueT('t'), s[IndexT(0)]); + EXPECT_EQ(ValueT('t'), s[IndexT(1)]); } -TEST(SVectorTest, SwapAndDestroy) { - internal::SVector s; +TYPED_TEST(SVectorTest, SwapAndDestroy) { + using IndexT = typename TypeParam::first_type; + using ValueT = typename TypeParam::second_type; + internal::SVector s; { - internal::SVector t; - t.resize(2); - t[-2] = 42; + internal::SVector t; + t.resize(IndexT(2)); + t[IndexT(-2)] = ValueT(42); t.swap(s); } - EXPECT_EQ(2, s.size()); - EXPECT_EQ(42, s[-2]); - EXPECT_EQ(0, s[1]); + EXPECT_EQ(IndexT(2), s.size()); + EXPECT_EQ(ValueT(42), s[IndexT(-2)]); + EXPECT_EQ(ValueT(0), s[IndexT(1)]); } // Use a more complex type to better check the invocations of @@ -458,7 +507,7 @@ class MoveOnlyObject { int MoveOnlyObject::sequence_ = 1; int MoveOnlyObject::object_count_ = 0; -TEST(SVectorTest, MoveWithMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, MoveWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); internal::SVector a; a.resize(10); @@ -472,7 +521,7 @@ TEST(SVectorTest, MoveWithMoveOnlyObject) { EXPECT_EQ(0, a.size()); // NOLINT } -TEST(SVectorTest, ShrinkWithMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, ShrinkWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -484,7 +533,7 @@ TEST(SVectorTest, ShrinkWithMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); } -TEST(SVectorTest, GrowMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, GrowMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -501,7 +550,7 @@ TEST(SVectorTest, GrowMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); } -TEST(SVectorTest, ReserveMoveOnlyObject) { +TEST(SVectorMoveOnlyTest, ReserveMoveOnlyObject) { EXPECT_EQ(0, MoveOnlyObject::GetObjectCount()); { internal::SVector a; @@ -554,7 +603,7 @@ int TrackedObject::num_destructions = 0; int TrackedObject::num_moves = 0; int TrackedObject::num_copies = 0; -TEST(SVectorTest, CopyConstructor) { +TEST(SVectorTrackingTest, CopyConstructor) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -573,7 +622,7 @@ TEST(SVectorTest, CopyConstructor) { ASSERT_EQ(v_copy.size(), 5); } -TEST(SVectorTest, AssignmentOperator) { +TEST(SVectorTrackingTest, AssignmentOperator) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -595,7 +644,7 @@ TEST(SVectorTest, AssignmentOperator) { ASSERT_EQ(other.size(), 5); } -TEST(SVectorTest, CopyConstructorIntegralType) { +TEST(SVectorTrackingTest, CopyConstructorIntegralType) { auto v = internal::SVector(); v.resize(3); v[-3] = 1; @@ -613,7 +662,7 @@ TEST(SVectorTest, CopyConstructorIntegralType) { } } -TEST(SVectorTest, AssignmentOperatorIntegralType) { +TEST(SVectorTrackingTest, AssignmentOperatorIntegralType) { internal::SVector other; auto v = internal::SVector(); v.resize(3); @@ -632,7 +681,7 @@ TEST(SVectorTest, AssignmentOperatorIntegralType) { } } -TEST(SVectorTest, MoveConstructor) { +TEST(SVectorTrackingTest, MoveConstructor) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -650,7 +699,7 @@ TEST(SVectorTest, MoveConstructor) { ASSERT_EQ(b.size(), 5); } -TEST(SVectorTest, MoveAssignmentOperator) { +TEST(SVectorTrackingTest, MoveAssignmentOperator) { TrackedObject::ResetCounters(); ASSERT_EQ(TrackedObject::Counters(), "constructions: 0, destructions: 0, moves: 0, copies: 0"); @@ -1011,6 +1060,28 @@ TEST(SVector, NoHeapCheckerFalsePositive) { EXPECT_EQ(kVector->size(), 5000); } +TEST(Permute, IntArray) { + int array[] = {4, 5, 6}; + std::vector permutation = {0, 2, 1}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(4, 6, 5)); +} + +TEST(Permute, BoolVector) { + std::vector array = {true, false, true}; + std::vector permutation = {0, 2, 1}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(true, true, false)); +} + +TEST(Permute, StrongVector) { + util_intops::StrongVector array = {4, 5, 6}; + std::vector permutation = {StrongArcId(0), StrongArcId(2), + StrongArcId(1)}; + util::Permute(permutation, &array); + EXPECT_THAT(array, ElementsAre(4, 6, 5)); +} + template static void BM_RandomArcs(benchmark::State& state) { const int kRandomSeed = 0; @@ -1304,4 +1375,23 @@ static void BM_CompleteBipartiteGraphTailHead(benchmark::State& state) { BENCHMARK_TEMPLATE(BM_CompleteBipartiteGraphTailHead, int32_t); BENCHMARK_TEMPLATE(BM_CompleteBipartiteGraphTailHead, int16_t); +template +void BM_Permute(benchmark::State& state) { + const int size = state.range(0); + ArrayT array(size); + + std::vector permutation(size); + absl::c_iota(permutation, IndexT(0)); + + for (const auto s : state) { + util::Permute(permutation, &array); + benchmark::DoNotOptimize(array); + benchmark::DoNotOptimize(permutation); + } +} +BENCHMARK(BM_Permute, StrongArcId>) + ->Arg(128); +BENCHMARK(BM_Permute, int>)->Arg(128); +BENCHMARK(BM_Permute, int>)->Arg(128); + } // namespace util diff --git a/ortools/graph/samples/assignment_linear_sum_assignment.py b/ortools/graph/samples/assignment_linear_sum_assignment.py index 82af30d560..c662741f64 100755 --- a/ortools/graph/samples/assignment_linear_sum_assignment.py +++ b/ortools/graph/samples/assignment_linear_sum_assignment.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import linear_sum_assignment + # [END import] diff --git a/ortools/graph/samples/assignment_min_flow.py b/ortools/graph/samples/assignment_min_flow.py index 0d55ed20c9..1e4f56387a 100755 --- a/ortools/graph/samples/assignment_min_flow.py +++ b/ortools/graph/samples/assignment_min_flow.py @@ -16,6 +16,7 @@ """Linear assignment example.""" # [START import] from ortools.graph.python import min_cost_flow + # [END import] diff --git a/ortools/graph/samples/balance_min_flow.py b/ortools/graph/samples/balance_min_flow.py index 923ff22a85..688c9c79ad 100755 --- a/ortools/graph/samples/balance_min_flow.py +++ b/ortools/graph/samples/balance_min_flow.py @@ -16,6 +16,7 @@ """Assignment with teams of workers.""" # [START import] from ortools.graph.python import min_cost_flow + # [END import] diff --git a/ortools/graph/samples/dijkstra_directed.cc b/ortools/graph/samples/dijkstra_directed.cc index 046f15d1d9..20c8a508c8 100644 --- a/ortools/graph/samples/dijkstra_directed.cc +++ b/ortools/graph/samples/dijkstra_directed.cc @@ -50,8 +50,8 @@ int main(int argc, char** argv) { // Solve the shortest path problem from 0 to 5. std::pair> result = - operations_research::SimpleOneToOneShortestPath(0, 5, tails, heads, - lengths); + operations_research::SimpleOneToOneShortestPath(0, 5, tails, + heads, lengths); // Print to length of the path and then the nodes in the path. std::cout << "Shortest path length: " << result.first << std::endl; diff --git a/ortools/graph/samples/dijkstra_undirected.cc b/ortools/graph/samples/dijkstra_undirected.cc index f51645691b..84bef36fee 100644 --- a/ortools/graph/samples/dijkstra_undirected.cc +++ b/ortools/graph/samples/dijkstra_undirected.cc @@ -59,8 +59,8 @@ int main(int argc, char** argv) { // Solve the shortest path problem from 0 to 4. std::pair> result = - operations_research::SimpleOneToOneShortestPath(0, 4, tails, heads, - lengths); + operations_research::SimpleOneToOneShortestPath(0, 4, tails, + heads, lengths); // Print to length of the path and then the nodes in the path. std::cout << "Shortest path length: " << result.first << std::endl; diff --git a/ortools/graph/samples/simple_max_flow_program.py b/ortools/graph/samples/simple_max_flow_program.py index 38bd192247..43820f3db8 100755 --- a/ortools/graph/samples/simple_max_flow_program.py +++ b/ortools/graph/samples/simple_max_flow_program.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import max_flow + # [END import] diff --git a/ortools/graph/samples/simple_min_cost_flow_program.py b/ortools/graph/samples/simple_min_cost_flow_program.py index 4e0e0afd56..390e7bdbae 100755 --- a/ortools/graph/samples/simple_min_cost_flow_program.py +++ b/ortools/graph/samples/simple_min_cost_flow_program.py @@ -18,6 +18,7 @@ import numpy as np from ortools.graph.python import min_cost_flow + # [END import] From 478c8b53094ba08b9de0fdd4a6c607d1372f0cfa Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 16 Jun 2025 17:55:44 +0200 Subject: [PATCH 28/81] cleanup and formatting --- ortools/base/dump_vars.h | 3 ++- ortools/base/dump_vars_test.cc | 3 ++- ortools/linear_solver/model_exporter.cc | 2 +- ortools/linear_solver/xpress_interface.cc | 5 +++-- ortools/linear_solver/xpress_interface_test.cc | 2 +- ortools/math_opt/io/proto_converter.cc | 2 +- 6 files changed, 10 insertions(+), 7 deletions(-) diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h index 61e6073084..b2814c2e53 100644 --- a/ortools/base/dump_vars.h +++ b/ortools/base/dump_vars.h @@ -142,7 +142,8 @@ std::ostream& operator<<(std::ostream& os, const ::std::optional& opt) { // needed by graph tests template -std::ostream& operator<<(std::ostream& os, const ::util_intops::StrongVector& vec) { +std::ostream& operator<<(std::ostream& os, + const ::util_intops::StrongVector& vec) { for (U it : vec) { os << ::std::to_string(it) << ','; } diff --git a/ortools/base/dump_vars_test.cc b/ortools/base/dump_vars_test.cc index 2dccc6381d..81b4e5ae8d 100644 --- a/ortools/base/dump_vars_test.cc +++ b/ortools/base/dump_vars_test.cc @@ -137,7 +137,8 @@ TEST(DumpVars, StrongInt) { } TEST(DumpVars, StrongVector) { - ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = {49.3, 3.14}; + ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = { + 49.3, 3.14}; EXPECT_EQ(R"(vec = 49.299999,3.140000,)", ToString(DUMP_VARS(vec))); EXPECT_EQ(R"(vec = 49.299999,3.140000,)", DUMP_VARS(vec).str()); } diff --git a/ortools/linear_solver/model_exporter.cc b/ortools/linear_solver/model_exporter.cc index 51a3727677..3554e1961d 100644 --- a/ortools/linear_solver/model_exporter.cc +++ b/ortools/linear_solver/model_exporter.cc @@ -57,7 +57,7 @@ class LineBreaker { // Returns true if string s will fit on the current line without adding a // carriage return. - bool WillFit(const std::string& s) { + bool WillFit(absl::string_view s) { return line_size_ + static_cast(s.size()) < max_line_size_; } diff --git a/ortools/linear_solver/xpress_interface.cc b/ortools/linear_solver/xpress_interface.cc index 0f88bc861c..89f6d4653f 100644 --- a/ortools/linear_solver/xpress_interface.cc +++ b/ortools/linear_solver/xpress_interface.cc @@ -20,8 +20,8 @@ #include #include #include -#include #include +#include #include "absl/strings/numbers.h" #include "absl/strings/str_format.h" @@ -1094,7 +1094,8 @@ void XpressInterface::SetCoefficient(MPConstraint* const constraint, double new_value, double) { InvalidateSolutionSynchronization(); - fixedOrderCoefficientsPerConstraint[constraint->index()][variable->index()] = new_value; + fixedOrderCoefficientsPerConstraint[constraint->index()][variable->index()] = + new_value; // Changing a single coefficient in the matrix is potentially pretty // slow since that coefficient has to be found in the sparse matrix diff --git a/ortools/linear_solver/xpress_interface_test.cc b/ortools/linear_solver/xpress_interface_test.cc index 16925a1e36..35c0cfcaf0 100644 --- a/ortools/linear_solver/xpress_interface_test.cc +++ b/ortools/linear_solver/xpress_interface_test.cc @@ -159,7 +159,7 @@ class XPRSGetter { std::string value(280, '\0'); int valueSize; EXPECT_STATUS(XPRSgetstringattrib(prob(), attrib, &value[0], value.size(), - &valueSize)); + &valueSize)); value.resize(valueSize - 1); return value; } diff --git a/ortools/math_opt/io/proto_converter.cc b/ortools/math_opt/io/proto_converter.cc index 96ffaf24d0..8dd21764c3 100644 --- a/ortools/math_opt/io/proto_converter.cc +++ b/ortools/math_opt/io/proto_converter.cc @@ -317,7 +317,7 @@ MPModelProtoToMathOptModel(const ::operations_research::MPModelProto& model) { for (const MPGeneralConstraintProto& general_constraint : model.general_constraint()) { - const std::string& in_name = general_constraint.name(); + absl::string_view in_name = general_constraint.name(); switch (general_constraint.general_constraint_case()) { case MPGeneralConstraintProto::kQuadraticConstraint: { (*output.mutable_quadratic_constraints()) From 8e1e6b5e97422bbf0a063246fb0b7c2348a467e1 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Tue, 17 Jun 2025 11:08:04 +0200 Subject: [PATCH 29/81] dependencies: bump Protobuf from v31.0 to v31.1 --- Dependencies.txt | 2 +- MODULE.bazel | 2 +- bazel/notebook_requirements.in | 2 +- bazel/notebook_requirements.txt | 2 +- bazel/ortools_requirements.in | 2 +- bazel/ortools_requirements.txt | 2 +- cmake/dependencies/CMakeLists.txt | 4 ++-- cmake/host.CMakeLists.txt | 4 ++-- ortools/dotnet/Google.OrTools-full.csproj.in | 2 +- ortools/dotnet/Google.OrTools-local.csproj.in | 2 +- ortools/java/pom-full.xml.in | 2 +- ortools/java/pom-local.xml.in | 2 +- ortools/python/setup.py.in | 2 +- patches/BUILD.bazel | 2 +- patches/fuzztest-2025-02-14.patch | 2 +- patches/{protobuf-v31.0.patch => protobuf-v31.1.patch} | 0 16 files changed, 17 insertions(+), 17 deletions(-) rename patches/{protobuf-v31.0.patch => protobuf-v31.1.patch} (100%) diff --git a/Dependencies.txt b/Dependencies.txt index f0813f2c1d..5d072a8acb 100644 --- a/Dependencies.txt +++ b/Dependencies.txt @@ -1,6 +1,6 @@ ZLIB=1.3.1 abseil-cpp=20250512.0 -Protobuf=v31.0 +Protobuf=v31.1 Eigen=3.4.0 Re2=2024-07-02 CoinUtils=2.11.12 diff --git a/MODULE.bazel b/MODULE.bazel index 8bac0ad7d2..4c1ba603ff 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -26,7 +26,7 @@ bazel_dep(name = "google_benchmark", version = "1.9.2") bazel_dep(name = "googletest", version = "1.17.0") bazel_dep(name = "highs", version = "1.11.0") bazel_dep(name = "platforms", version = "0.0.11") -bazel_dep(name = "protobuf", version = "31.0") +bazel_dep(name = "protobuf", version = "31.1") bazel_dep(name = "pybind11_abseil", version = "202402.0") bazel_dep(name = "pybind11_bazel", version = "2.13.6") bazel_dep(name = "pybind11_protobuf", version = "0.0.0-20240524-1d7a729") diff --git a/bazel/notebook_requirements.in b/bazel/notebook_requirements.in index c557b3cb6c..c2d02e6fcb 100644 --- a/bazel/notebook_requirements.in +++ b/bazel/notebook_requirements.in @@ -2,7 +2,7 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.31.0 +protobuf==6.31.1 requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/notebook_requirements.txt b/bazel/notebook_requirements.txt index b76f09dba1..b7f2e80591 100644 --- a/bazel/notebook_requirements.txt +++ b/bazel/notebook_requirements.txt @@ -215,7 +215,7 @@ prometheus-client==0.22.1 # via jupyter-server prompt-toolkit==3.0.51 # via ipython -protobuf==6.31.0 +protobuf==6.31.1 # via # -r bazel/notebook_requirements.in # mypy-protobuf diff --git a/bazel/ortools_requirements.in b/bazel/ortools_requirements.in index 3d3a8acfe2..e893a8b629 100644 --- a/bazel/ortools_requirements.in +++ b/bazel/ortools_requirements.in @@ -2,7 +2,7 @@ absl-py==2.2.2 immutabledict==4.2.1 numpy==2.2.0 -protobuf==6.31.0 +protobuf==6.31.1 requests==2.32.4 scipy==1.14.1 typing-extensions==4.13.1 diff --git a/bazel/ortools_requirements.txt b/bazel/ortools_requirements.txt index 2c99af6946..f9781ecf8d 100644 --- a/bazel/ortools_requirements.txt +++ b/bazel/ortools_requirements.txt @@ -45,7 +45,7 @@ platformdirs==3.10.0 # via # black # virtualenv -protobuf==6.31.0 +protobuf==6.31.1 # via # -r bazel/ortools_requirements.in # mypy-protobuf diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 926e51c047..80fdbb1b37 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -136,11 +136,11 @@ if(BUILD_Protobuf) FetchContent_Declare( Protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v31.0" + GIT_TAG "v31.1" GIT_SHALLOW TRUE GIT_SUBMODULES "" PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v31.0.patch" + "${CMAKE_CURRENT_LIST_DIR}/../../patches/protobuf-v31.1.patch" ) FetchContent_MakeAvailable(Protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/cmake/host.CMakeLists.txt b/cmake/host.CMakeLists.txt index 970c4e0e40..6b63f17257 100644 --- a/cmake/host.CMakeLists.txt +++ b/cmake/host.CMakeLists.txt @@ -125,11 +125,11 @@ set(protobuf_WITH_ZLIB OFF) FetchContent_Declare( protobuf GIT_REPOSITORY "https://github.com/protocolbuffers/protobuf.git" - GIT_TAG "v31.0" + GIT_TAG "v31.1" GIT_SHALLOW TRUE GIT_SUBMODULES "" PATCH_COMMAND git apply --ignore-whitespace - "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v31.0.patch" + "${CMAKE_CURRENT_LIST_DIR}/@PATCHES_PATH@/protobuf-v31.1.patch" ) FetchContent_MakeAvailable(protobuf) list(POP_BACK CMAKE_MESSAGE_INDENT) diff --git a/ortools/dotnet/Google.OrTools-full.csproj.in b/ortools/dotnet/Google.OrTools-full.csproj.in index 727092df98..9945e956c0 100644 --- a/ortools/dotnet/Google.OrTools-full.csproj.in +++ b/ortools/dotnet/Google.OrTools-full.csproj.in @@ -184,7 +184,7 @@ - + diff --git a/ortools/dotnet/Google.OrTools-local.csproj.in b/ortools/dotnet/Google.OrTools-local.csproj.in index eb5a3eff5b..e07af1825a 100644 --- a/ortools/dotnet/Google.OrTools-local.csproj.in +++ b/ortools/dotnet/Google.OrTools-local.csproj.in @@ -172,7 +172,7 @@ - + diff --git a/ortools/java/pom-full.xml.in b/ortools/java/pom-full.xml.in index ffde245eac..791f7b6f3f 100644 --- a/ortools/java/pom-full.xml.in +++ b/ortools/java/pom-full.xml.in @@ -109,7 +109,7 @@ com.google.protobuf protobuf-java - 4.31.0 + 4.31.1 diff --git a/ortools/java/pom-local.xml.in b/ortools/java/pom-local.xml.in index d03b19413b..64b2c51221 100644 --- a/ortools/java/pom-local.xml.in +++ b/ortools/java/pom-local.xml.in @@ -81,7 +81,7 @@ com.google.protobuf protobuf-java - 4.31.0 + 4.31.1 diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index befa15ce04..0aeaa900ef 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -46,7 +46,7 @@ setup( 'absl-py >= 2.0.0', 'numpy >= 1.13.3', 'pandas >= 2.0.0', - 'protobuf >= 6.31.0,<6.32', + 'protobuf >= 6.31.1,<6.32', 'typing-extensions >= 4.12', 'immutabledict >= 3.0.0', ], diff --git a/patches/BUILD.bazel b/patches/BUILD.bazel index 28b25b4abe..f73a6d5b4b 100644 --- a/patches/BUILD.bazel +++ b/patches/BUILD.bazel @@ -13,7 +13,7 @@ exports_files([ "abseil-cpp-20250512.0.patch", - "protobuf-v31.0.patch", + "protobuf-v31.1.patch", "pybind11_bazel.patch", "pybind11_abseil.patch", "pybind11_protobuf.patch", diff --git a/patches/fuzztest-2025-02-14.patch b/patches/fuzztest-2025-02-14.patch index 053736fbb7..d288eb5418 100644 --- a/patches/fuzztest-2025-02-14.patch +++ b/patches/fuzztest-2025-02-14.patch @@ -36,7 +36,7 @@ index 1f4f08d..cc4d0ba 100644 set(proto_URL https://github.com/protocolbuffers/protobuf.git) -set(proto_TAG v28.2) -+set(proto_TAG v31.0) ++set(proto_TAG v31.1) set(nlohmann_json_URL https://github.com/nlohmann/json.git) set(nlohmann_json_TAG v3.11.2) diff --git a/patches/protobuf-v31.0.patch b/patches/protobuf-v31.1.patch similarity index 100% rename from patches/protobuf-v31.0.patch rename to patches/protobuf-v31.1.patch From 0d60e8afe450ec817f510aae965ab8898310cb41 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Tue, 17 Jun 2025 16:25:08 +0200 Subject: [PATCH 30/81] tools/release: more log on macos --- tools/release/build_delivery_macos.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index c6e62e3aa7..7070cd050b 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -287,13 +287,14 @@ function build_python() { echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "x86_64" ]]; then - echo -n " Build all..." | tee -a build.log - # on macos X86_64 stubgen will timeout -> need to build 2 times - cmake --build "temp_python${PY_VERSION}" -j8 -v || true + echo -n " Build all few times..." | tee -a build.log + # on macos X86_64 stubgen will timeout -> need to build few times + cmake --build "temp_python${PY_VERSION}" -j4 -v || true + sleep 10 + cmake --build "temp_python${PY_VERSION}" -v || true echo "DONE" | tee -a build.log - sleep 5 echo -n " ReBuild all..." | tee -a build.log - cmake --build "temp_python${PY_VERSION}" -j8 -v + cmake --build "temp_python${PY_VERSION}" -j4 -v echo "DONE" | tee -a build.log else echo -n " Build all..." | tee -a build.log From 330a0efa28ae7aff2de0de9593786a5dee15883d Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 20 Jun 2025 15:11:01 +0200 Subject: [PATCH 31/81] print a solution after a SIGTERM --- ortools/sat/sat_runner.cc | 180 ++++++++++++++++++++++++++++---------- ortools/util/sigint.cc | 36 ++++++-- ortools/util/sigint.h | 20 ++++- 3 files changed, 176 insertions(+), 60 deletions(-) diff --git a/ortools/sat/sat_runner.cc b/ortools/sat/sat_runner.cc index c31a0e2b27..c1dceb038b 100644 --- a/ortools/sat/sat_runner.cc +++ b/ortools/sat/sat_runner.cc @@ -16,9 +16,11 @@ #include #include #include +#include #include #include +#include "absl/base/thread_annotations.h" #include "absl/flags/flag.h" #include "absl/flags/parse.h" #include "absl/flags/usage.h" @@ -30,6 +32,8 @@ #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" +#include "absl/synchronization/mutex.h" +#include "absl/types/span.h" #include "google/protobuf/arena.h" #include "google/protobuf/text_format.h" #include "ortools/base/helpers.h" @@ -45,6 +49,7 @@ #include "ortools/sat/synchronization.h" #include "ortools/util/file_util.h" #include "ortools/util/logging.h" +#include "ortools/util/sigint.h" #include "ortools/util/sorted_interval_list.h" ABSL_FLAG( @@ -102,8 +107,69 @@ std::string ExtractName(absl::string_view full_filename) { return filename; } -void LogInPbCompetitionFormat(int num_variables, bool has_objective, - Model* model, SatParameters* parameters) { +class LastSolutionPrinter { + public: + // Note that is prints the solution in the PB competition format. + void MaybePrintLastSolution() { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_printed_ = true; + + if (last_solution_.empty()) { + std::cout << "s UNKNOWN" << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + std::string line; + for (int i = 0; i < num_variables_; ++i) { + if (last_solution_[i]) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } + } + } + + void set_num_variables(int num_variables) { num_variables_ = num_variables; } + + void set_last_solution(absl::Span solution) { + absl::MutexLock lock(&mutex_); + if (last_solution_printed_) return; + last_solution_.assign(solution.begin(), solution.end()); + } + + // Returns false if the solution has already been printed, else mark it as + // printed by caller code. + bool mark_last_solution_printed() { + const absl::MutexLock lock(&mutex_); + if (last_solution_printed_) { + return false; + } + last_solution_printed_ = true; + return true; + } + + private: + int num_variables_ = 0; + std::vector last_solution_ ABSL_GUARDED_BY(mutex_); + bool last_solution_printed_ ABSL_GUARDED_BY(mutex_) = false; + absl::Mutex mutex_; +}; + +void LogInPbCompetitionFormat( + int num_variables, bool has_objective, Model* model, + SatParameters* parameters, + std::shared_ptr last_solution_printer) { + CHECK(last_solution_printer != nullptr); + last_solution_printer->set_num_variables(num_variables); + const auto log_callback = [](const std::string& multi_line_input) { if (multi_line_input.empty()) { std::cout << "c" << std::endl; @@ -118,55 +184,60 @@ void LogInPbCompetitionFormat(int num_variables, bool has_objective, model->GetOrCreate()->AddInfoLoggingCallback(log_callback); parameters->set_log_to_stdout(false); - const auto response_callback = [](const CpSolverResponse& r) { + const auto response_callback = [last_solution_printer]( + const CpSolverResponse& r) { std::cout << "o " << static_cast(r.objective_value()) << std::endl; + last_solution_printer->set_last_solution(r.solution()); }; model->Add(NewFeasibleSolutionObserver(response_callback)); - const auto final_response_callback = [num_variables, - has_objective](CpSolverResponse* r) { - switch (r->status()) { - case CpSolverStatus::OPTIMAL: - if (has_objective) { - std::cout << "s OPTIMUM FOUND " << std::endl; - } else { - std::cout << "s SATISFIABLE" << std::endl; + const auto final_response_callback = + [num_variables, has_objective, + last_solution_printer](CpSolverResponse* r) { + if (!last_solution_printer->mark_last_solution_printed()) return; + + switch (r->status()) { + case CpSolverStatus::OPTIMAL: + if (has_objective) { + std::cout << "s OPTIMUM FOUND " << std::endl; + } else { + std::cout << "s SATISFIABLE" << std::endl; + } + break; + case CpSolverStatus::FEASIBLE: + std::cout << "s SATISFIABLE" << std::endl; + break; + case CpSolverStatus::INFEASIBLE: + std::cout << "s UNSATISFIABLE" << std::endl; + break; + case CpSolverStatus::MODEL_INVALID: + std::cout << "s UNSUPPORTED" << std::endl; + break; + case CpSolverStatus::UNKNOWN: + std::cout << "s UNKNOWN" << std::endl; + break; + default: + break; } - break; - case CpSolverStatus::FEASIBLE: - std::cout << "s SATISFIABLE" << std::endl; - break; - case CpSolverStatus::INFEASIBLE: - std::cout << "s UNSATISFIABLE" << std::endl; - break; - case CpSolverStatus::MODEL_INVALID: - std::cout << "s UNSUPPORTED" << std::endl; - break; - case CpSolverStatus::UNKNOWN: - std::cout << "s UNKNOWN" << std::endl; - break; - default: - break; - } - if (r->status() == CpSolverStatus::OPTIMAL || - r->status() == CpSolverStatus::FEASIBLE) { - std::string line; - for (int i = 0; i < num_variables; ++i) { - if (r->solution(i)) { - absl::StrAppend(&line, "x", i + 1, " "); - } else { - absl::StrAppend(&line, "-x", i + 1, " "); + if (r->status() == CpSolverStatus::OPTIMAL || + r->status() == CpSolverStatus::FEASIBLE) { + std::string line; + for (int i = 0; i < num_variables; ++i) { + if (r->solution(i)) { + absl::StrAppend(&line, "x", i + 1, " "); + } else { + absl::StrAppend(&line, "-x", i + 1, " "); + } + if (line.size() >= 75) { + std::cout << "v " << line << std::endl; + line.clear(); + } + } + if (!line.empty()) { + std::cout << "v " << line << std::endl; + } } - if (line.size() >= 75) { - std::cout << "v " << line << std::endl; - line.clear(); - } - } - if (!line.empty()) { - std::cout << "v " << line << std::endl; - } - } - }; + }; model->GetOrCreate()->AddFinalResponsePostprocessor( final_response_callback); } @@ -186,7 +257,8 @@ void SetInterleavedWorkers(SatParameters* parameters) { bool LoadProblem(const std::string& filename, absl::string_view hint_file, absl::string_view domain_file, CpModelProto* cp_model, - Model* model, SatParameters* parameters) { + Model* model, SatParameters* parameters, + std::shared_ptr last_solution_printer) { if (absl::EndsWith(filename, ".opb") || absl::EndsWith(filename, ".opb.bz2") || absl::EndsWith(filename, ".opb.gz") || absl::EndsWith(filename, ".wbo") || @@ -217,7 +289,7 @@ bool LoadProblem(const std::string& filename, absl::string_view hint_file, const int num_variables = reader.model_is_supported() ? reader.num_variables() : 1; LogInPbCompetitionFormat(num_variables, cp_model->has_objective(), model, - parameters); + parameters, last_solution_printer); } if (absl::GetFlag(FLAGS_force_interleave_search)) { SetInterleavedWorkers(parameters); @@ -310,9 +382,13 @@ int Run() { google::protobuf::Arena arena; CpModelProto* cp_model = google::protobuf::Arena::Create(&arena); + std::shared_ptr last_solution_printer; + if (absl::GetFlag(FLAGS_competition_mode)) { + last_solution_printer = std::make_shared(); + } if (!LoadProblem(absl::GetFlag(FLAGS_input), absl::GetFlag(FLAGS_hint_file), absl::GetFlag(FLAGS_domain_file), cp_model, &model, - ¶meters)) { + ¶meters, last_solution_printer)) { if (!absl::GetFlag(FLAGS_competition_mode)) { LOG(FATAL) << "Cannot load file '" << absl::GetFlag(FLAGS_input) << "'."; } @@ -329,6 +405,14 @@ int Run() { FingerprintRepeatedField(r.solution(), kDefaultFingerprintSeed)); })); } + + if (absl::GetFlag(FLAGS_competition_mode)) { + model.GetOrCreate()->Register([last_solution_printer]() { + last_solution_printer->MaybePrintLastSolution(); + exit(EXIT_SUCCESS); + }); + } + const CpSolverResponse response = SolveCpModel(*cp_model, &model); if (!absl::GetFlag(FLAGS_output).empty()) { diff --git a/ortools/util/sigint.cc b/ortools/util/sigint.cc index 601f4983cc..bd4f40cfac 100644 --- a/ortools/util/sigint.cc +++ b/ortools/util/sigint.cc @@ -23,29 +23,47 @@ namespace operations_research { void SigintHandler::Register(const std::function& f) { handler_ = [this, f]() -> void { - const int num_sigint_calls = ++num_sigint_calls_; - if (num_sigint_calls < 3) { + const int num_calls = ++num_calls_; + if (num_calls < 3) { LOG(INFO) - << "^C pressed " << num_sigint_calls << " times. " + << "^C pressed " << num_calls << " times. " << "Interrupting the solver. Press 3 times to force termination."; - if (num_sigint_calls == 1) f(); - } else if (num_sigint_calls == 3) { + if (num_calls == 1) f(); + } else if (num_calls == 3) { LOG(INFO) << "^C pressed 3 times. Forcing termination."; exit(EXIT_FAILURE); } else { // Another thread is already running exit(), do nothing. } }; - signal(SIGINT, &ControlCHandler); + signal(SIGINT, &SigHandler); } // This method will be called by the system after the SIGINT signal. // The parameter is the signal received. -void SigintHandler::ControlCHandler(int sig) { handler_(); } +void SigintHandler::SigHandler(int) { handler_(); } -// Unregister the SIGINT handler. -SigintHandler::~SigintHandler() { signal(SIGINT, SIG_DFL); } +// Unregister the signal handlers. +SigintHandler::~SigintHandler() { + if (handler_ != nullptr) signal(SIGINT, SIG_DFL); +} thread_local std::function SigintHandler::handler_; +void SigtermHandler::Register(const std::function& f) { + handler_ = [f]() -> void { f(); }; + signal(SIGTERM, &SigHandler); +} + +// This method will be called by the system after the SIGTERM signal. +// The parameter is the signal received. +void SigtermHandler::SigHandler(int) { handler_(); } + +// Unregister the signal handlers. +SigtermHandler::~SigtermHandler() { + if (handler_ != nullptr) signal(SIGTERM, SIG_DFL); +} + +thread_local std::function SigtermHandler::handler_; + } // namespace operations_research diff --git a/ortools/util/sigint.h b/ortools/util/sigint.h index 7b3098033e..1d9fcd1b81 100644 --- a/ortools/util/sigint.h +++ b/ortools/util/sigint.h @@ -21,7 +21,7 @@ namespace operations_research { class SigintHandler { public: - SigintHandler() {} + SigintHandler() = default; ~SigintHandler(); // Catches ^C and call f() the first time this happen. If ^C is pressed 3 @@ -29,9 +29,23 @@ class SigintHandler { void Register(const std::function& f); private: - static void ControlCHandler(int s); + std::atomic num_calls_ = 0; - std::atomic num_sigint_calls_ = 0; + static void SigHandler(int s); + thread_local static std::function handler_; +}; + +class SigtermHandler { + public: + SigtermHandler() = default; + ~SigtermHandler(); + + // Catches SIGTERM and call f(). It is recommended that f() calls exit() to + // terminate the program. + void Register(const std::function& f); + + private: + static void SigHandler(int s); thread_local static std::function handler_; }; From 9541e49c22cbd7f724142f02ac8b9b202f13126a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 19 Jun 2025 15:07:38 +0200 Subject: [PATCH 32/81] tools/release: fix scripts --- tools/release/amd64.Dockerfile | 6 +-- tools/release/arm64.Dockerfile | 6 +-- tools/release/build_delivery_macos.sh | 66 +++++++++++++++++---------- 3 files changed, 47 insertions(+), 31 deletions(-) diff --git a/tools/release/amd64.Dockerfile b/tools/release/amd64.Dockerfile index b95ad2405e..1622d7368f 100644 --- a/tools/release/amd64.Dockerfile +++ b/tools/release/amd64.Dockerfile @@ -36,10 +36,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/arm64.Dockerfile b/tools/release/arm64.Dockerfile index b19b71c8fc..138c653bb5 100644 --- a/tools/release/arm64.Dockerfile +++ b/tools/release/arm64.Dockerfile @@ -41,10 +41,10 @@ RUN dnf -y update \ ENV JAVA_HOME=/usr/lib/jvm/java # Update maven -ADD https://dlcdn.apache.org/maven/maven-3/3.9.9/binaries/apache-maven-3.9.9-bin.tar.gz /usr/local +ADD https://dlcdn.apache.org/maven/maven-3/3.9.10/binaries/apache-maven-3.9.10-bin.tar.gz /usr/local RUN mkdir -p /usr/local/maven \ - && tar xzvf /usr/local/apache-maven-3.9.9-bin.tar.gz --strip-components=1 -C /usr/local/maven \ - && rm /usr/local/apache-maven-3.9.9-bin.tar.gz + && tar xzvf /usr/local/apache-maven-3.9.10-bin.tar.gz --strip-components=1 -C /usr/local/maven \ + && rm /usr/local/apache-maven-3.9.10-bin.tar.gz ENV PATH=/usr/local/maven/bin:$PATH ENV TZ=America/Los_Angeles diff --git a/tools/release/build_delivery_macos.sh b/tools/release/build_delivery_macos.sh index 7070cd050b..f03ddf3577 100755 --- a/tools/release/build_delivery_macos.sh +++ b/tools/release/build_delivery_macos.sh @@ -64,12 +64,15 @@ function build_dotnet() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log - echo "check dotnet..." + echo "DONE" | tee -a build.log + + echo -n "check dotnet..." command -v dotnet command -v dotnet | xargs echo "dotnet: " | tee -a build.log + echo "DONE" | tee -a build.log # Install .Net SNK echo -n "Install .Net SNK..." | tee -a build.log @@ -77,7 +80,8 @@ function build_dotnet() { if [[ -x $(command -v openssl11) ]]; then OPENSSL_PRG=openssl11 fi - echo "check ${OPENSSL_PRG}..." + echo "DONE" | tee -a build.log + echo -n "check ${OPENSSL_PRG}..." command -v ${OPENSSL_PRG} | xargs echo "openssl: " | tee -a build.log $OPENSSL_PRG aes-256-cbc -iter 42 -pass pass:"$ORTOOLS_TOKEN" \ @@ -92,12 +96,12 @@ function build_dotnet() { rm -rf "${ROOT_DIR}/temp_dotnet" echo "DONE" | tee -a build.log - echo -n "Build .Net..." | tee -a build.log + echo "Build .Net..." | tee -a build.log cmake -S. -Btemp_dotnet -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_DOTNET=ON cmake --build temp_dotnet -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_dotnet/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_dotnet --target test #echo "cmake test: DONE" | tee -a build.log @@ -115,9 +119,11 @@ function build_java() { fi cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log + # maven require JAVA_HOME if [[ -z "${JAVA_HOME}" ]]; then echo "JAVA_HOME: not found !" | tee -a build.log @@ -173,21 +179,19 @@ function build_java() { rm -rf "${ROOT_DIR}/temp_java" echo "DONE" | tee -a build.log - echo -n "Build Java..." | tee -a build.log - + echo "Build Java..." | tee -a build.log if [[ ! -v GPG_ARGS ]]; then GPG_EXTRA="" else GPG_EXTRA="-DGPG_ARGS=${GPG_ARGS}" fi - # shellcheck disable=SC2086 # cmake fail to parse empty string "" cmake -S. -Btemp_java -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF \ -DBUILD_JAVA=ON -DSKIP_GPG=OFF ${GPG_EXTRA} cmake --build temp_java -j8 -v - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L temp_java/lib/libortools.dylib | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log #cmake --build temp_java --target test #echo "cmake test: DONE" | tee -a build.log @@ -215,9 +219,10 @@ function build_python() { PATH_BCKP=${PATH} cd "${ROOT_DIR}" || exit 2 - echo "check swig..." + echo -n "check swig..." command -v swig command -v swig | xargs echo "swig: " | tee -a build.log + echo "DONE" | tee -a build.log if [[ ${PLATFORM} == "arm64" ]]; then local -r PY=(3.9 3.10 3.11 3.12 3.13) @@ -242,7 +247,7 @@ function build_python() { command -v "python${PY_VERSION}" | xargs echo "python${PY_VERSION}: " | tee -a build.log "python${PY_VERSION}" -c "import platform as p; print(p.platform())" | tee -a build.log "python${PY_VERSION}" -m pip install --upgrade --user pip - "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv "typing-extensions>=4.12" + "python${PY_VERSION}" -m pip install --upgrade --user wheel absl-py mypy mypy-protobuf protobuf virtualenv echo "check protoc-gen-mypy..." command -v protoc-gen-mypy | xargs echo "protoc-gen-mypy: " | tee -a build.log protoc-gen-mypy --version | xargs echo "protoc-gen-mypy version: " | tee -a build.log @@ -276,7 +281,7 @@ function build_python() { echo -n "Cleaning Python ${PY_VERSION}..." | tee -a build.log rm -rf "temp_python${PY_VERSION}" echo "DONE" | tee -a build.log - + echo "Build Python ${PY_VERSION}..." | tee -a build.log echo -n " CMake configure..." | tee -a build.log cmake -S. -B"temp_python${PY_VERSION}" -DBUILD_SAMPLES=OFF -DBUILD_EXAMPLES=OFF -DBUILD_PYTHON=ON -DPython3_ROOT_DIR="$PY_PATH" @@ -351,26 +356,27 @@ function build_archive() { echo -n "Clean previous archive..." | tee -a build.log make clean_archive + echo "DONE" | tee -a build.log - echo -n "Make cpp archive..." | tee -a build.log + echo "Make cpp archive..." | tee -a build.log make archive_cpp - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make dotnet archive..." | tee -a build.log + echo "Make dotnet archive..." | tee -a build.log make archive_dotnet - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log - echo -n "Make java archive..." | tee -a build.log + echo "Make java archive..." | tee -a build.log make archive_java - echo " Check libortools.dylib..." | tee -a build.log + echo -n " Check libortools.dylib..." | tee -a build.log otool -L "build_make/lib/libortools.dylib" | grep -vqz "/Users" - echo " DONE" | tee -a build.log + echo "DONE" | tee -a build.log echo "DONE" | tee -a build.log # move archive to export @@ -392,16 +398,26 @@ function build_examples() { echo "Check Sed version..." sed --version 2>&1 | head -n 1 | grep "GNU sed.*\b4" + echo -n "Clean previous example archives..." | tee -a build.log rm -rf temp ./*.tar.gz - echo -n "Build examples archives..." | tee -a build.log + echo "DONE" | tee -a build.log + + echo "Build examples archives..." | tee -a build.log + echo -n " Python examples archive..." | tee -a build.log make python_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " Java examples archive..." | tee -a build.log make java_examples_archive UNIX_PYTHON_VER=3 + echo "DONE" | tee -a build.log + echo -n " .Net examples archive..." | tee -a build.log make dotnet_examples_archive UNIX_PYTHON_VER=3 echo "DONE" | tee -a build.log + echo "DONE" | tee -a build.log + # move example to export/ mv or-tools_*_examples_*.tar.gz export/ echo "${ORTOOLS_BRANCH} ${ORTOOLS_SHA1}" > "${ROOT_DIR}/export/examples_build" From 9fa309b358d1e49476114af533db27b69e0dccc8 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 18 Jun 2025 10:29:31 +0200 Subject: [PATCH 33/81] cmake: Fix cmake_minimum_required to 3.24 (#4692) --- CMakeLists.txt | 2 +- cmake/README.md | 2 +- cmake/dependencies/CMakeLists.txt | 3 +++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b06184dc01..02cee4997c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,7 +12,7 @@ # limitations under the License. # This file is just an orchestration -cmake_minimum_required(VERSION 3.20) +cmake_minimum_required(VERSION 3.24) list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") # Enable output of compile commands during generation. diff --git a/cmake/README.md b/cmake/README.md index 816f67d067..ff84170a29 100644 --- a/cmake/README.md +++ b/cmake/README.md @@ -88,7 +88,7 @@ CMake as a standalone project or incorporate it into an existing CMake project. ## Requirement You'll need: -* `CMake >= 3.18`. +* `CMake >= 3.24`. * A C++20 compiler (GCC 10 or above) ## Solvers supported diff --git a/cmake/dependencies/CMakeLists.txt b/cmake/dependencies/CMakeLists.txt index 80fdbb1b37..184fed78eb 100644 --- a/cmake/dependencies/CMakeLists.txt +++ b/cmake/dependencies/CMakeLists.txt @@ -11,6 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# We are using FetchContent OVERRIDE_FIND_PACKAGE introduced in 3.24 +cmake_minimum_required(VERSION 3.24) + # ############################################################################## # SWIG (WIN32) # ############################################################################## From 78b662a2b223d101e9c291d10f37d4e49c2bef0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 18:05:38 +0200 Subject: [PATCH 34/81] ortools: utils: keep compatibility with protobuf < 26 --- ortools/util/file_util.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ortools/util/file_util.cc b/ortools/util/file_util.cc index 6ee86f6e52..62d0aaa314 100644 --- a/ortools/util/file_util.cc +++ b/ortools/util/file_util.cc @@ -166,7 +166,11 @@ absl::Status WriteProtoToFile(absl::string_view filename, case ProtoWriteFormat::kJson: { google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; +#if PROTOBUF_VERSION >= 5026000 // Version 26.0.0 options.always_print_fields_with_no_presence = true; +#else + options.always_print_primitive_fields = true; +#endif options.preserve_proto_field_names = true; if (!google::protobuf::util::MessageToJsonString(proto, &output_string, options) From 507f1d82f6c8d13a04d2dc554124b02e4aea6cbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20P=C3=A9ron?= Date: Wed, 18 Jun 2025 17:22:11 +0200 Subject: [PATCH 35/81] graph: fix iterator compatibility since C++17 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing iterator typedefs to custom iterator classes when std::iterator inheritance is deprecated since C++17. Signed-off-by: Clément Péron --- ortools/base/proto_enum_utils.h | 13 ++++++++++++- ortools/graph/graph.h | 8 +++++++- ortools/graph/iterators.h | 14 ++++++++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h index a78dd61a72..bdf0331056 100644 --- a/ortools/base/proto_enum_utils.h +++ b/ortools/base/proto_enum_utils.h @@ -175,8 +175,19 @@ namespace internal { template class RepeatedEnumView { public: - class Iterator : public std::iterator { + class Iterator +#if __cplusplus < 201703L + : public std::iterator +#endif + { public: + using difference_type = ptrdiff_t; + using value_type = E; +#if __cplusplus >= 201703L + using iterator_category = std::input_iterator_tag; + using pointer = E*; + using reference = E&; +#endif explicit Iterator(RepeatedField::const_iterator ptr) : ptr_(ptr) {} bool operator==(const Iterator& it) const { return ptr_ == it.ptr_; } bool operator!=(const Iterator& it) const { return ptr_ != it.ptr_; } diff --git a/ortools/graph/graph.h b/ortools/graph/graph.h index c8b7ef0b83..db3f0e2bcb 100644 --- a/ortools/graph/graph.h +++ b/ortools/graph/graph.h @@ -315,7 +315,7 @@ class BaseGraph { template class ArcPropertyIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { @@ -324,6 +324,11 @@ class ArcPropertyIterator // TODO(b/385094969): This should be `NodeIndex` for integers, // `NodeIndex::value_type` for strong signed integer types. using difference_type = std::ptrdiff_t; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = PropertyT*; + using reference = PropertyT&; +#endif ArcPropertyIterator() = default; @@ -346,6 +351,7 @@ class ArcPropertyIterator const ArcPropertyIterator& r) { return l.arc_it_ == r.arc_it_; } + friend bool operator!=(const ArcPropertyIterator& l, const ArcPropertyIterator& r) { return !(l == r); diff --git a/ortools/graph/iterators.h b/ortools/graph/iterators.h index 73f67a07bd..50fd5335b0 100644 --- a/ortools/graph/iterators.h +++ b/ortools/graph/iterators.h @@ -124,13 +124,18 @@ class IntegerRangeIterator // TODO(b/385094969): In C++17, `std::iterator_traits` required // explicitly specifying the iterator category. Remove this when backwards // compatibility with C++17 is no longer needed. -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IntegerType; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = IntegerType*; + using reference = IntegerType&; +#endif IntegerRangeIterator() : index_{} {} @@ -243,13 +248,18 @@ class IntegerRange : public BeginEndWrapper> { // different iterators with the same index type and sentinel. template class ChasingIterator -#if __cplusplus < 202002L +#if __cplusplus < 201703L : public std::iterator #endif { public: using difference_type = ptrdiff_t; using value_type = IndexT; +#if __cplusplus >= 201703L && __cplusplus < 202002L + using iterator_category = std::input_iterator_tag; + using pointer = IndexT*; + using reference = IndexT&; +#endif ChasingIterator() : index_(sentinel), next_(nullptr) {} From d263eea61930d0ccc2cb098590e5d913aba0ddb9 Mon Sep 17 00:00:00 2001 From: Mizux Seiha Date: Wed, 25 Jun 2025 15:46:08 +0200 Subject: [PATCH 36/81] Fix or-tools.code-workspace --- or-tools.code-workspace | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/or-tools.code-workspace b/or-tools.code-workspace index 96c6145349..abf8dd1536 100644 --- a/or-tools.code-workspace +++ b/or-tools.code-workspace @@ -113,7 +113,7 @@ "USE_SCIP" ], "C_Cpp.clang_format_style": "Google", - "python.formatting.provider": "yapf", + "python.formatting.provider": "black", "python.pythonPath": "python3", "python.autoComplete.extraPaths": [ "${workspaceRoot}", From 52d44af1dd46e99e72664d01e5f72a5325e24955 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:24:10 +0200 Subject: [PATCH 37/81] cmake: update doxygen-awesome-css from v2.1.0 to v2.3.4 --- cmake/cpp.cmake | 2 +- cmake/dotnet.cmake | 2 +- cmake/java.cmake | 2 +- cmake/python.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 9cdc27ec1e..09f7417613 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -581,7 +581,7 @@ if(BUILD_CXX_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/cpp/Doxyfile.in ${PROJECT_BINARY_DIR}/cpp/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index c74042c04c..52f77b77e3 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -505,7 +505,7 @@ if(BUILD_DOTNET_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/dotnet/Doxyfile.in ${PROJECT_BINARY_DIR}/dotnet/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/java.cmake b/cmake/java.cmake index 74184c1f4f..9783f2e1dd 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -567,7 +567,7 @@ if(BUILD_JAVA_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/java/Doxyfile.in ${PROJECT_BINARY_DIR}/java/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css SHOW_PROGRESS ) diff --git a/cmake/python.cmake b/cmake/python.cmake index 2112ceeb66..70a8151a89 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -849,7 +849,7 @@ if(BUILD_PYTHON_DOC) if(DOXYGEN_FOUND) configure_file(${PROJECT_SOURCE_DIR}/ortools/python/Doxyfile.in ${PROJECT_BINARY_DIR}/python/Doxyfile @ONLY) file(DOWNLOAD - https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.1.0/doxygen-awesome.css + https://raw.githubusercontent.com/jothepro/doxygen-awesome-css/v2.3.4/doxygen-awesome.css ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css SHOW_PROGRESS ) From 835b3b3b93fc5fdc9db69aa1f6704f393cf2959d Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Thu, 26 Jun 2025 13:32:30 +0200 Subject: [PATCH 38/81] doxygen: force light-mode for doc --- cmake/cpp.cmake | 1 + cmake/dotnet.cmake | 1 + cmake/java.cmake | 1 + cmake/python.cmake | 1 + ortools/cpp/Doxyfile.in | 4 +- ortools/dotnet/Doxyfile.in | 4 +- ortools/doxygen/header.html | 76 +++++++++++++++++++++++++++++++++++++ ortools/java/Doxyfile.in | 4 +- ortools/python/Doxyfile.in | 4 +- 9 files changed, 88 insertions(+), 8 deletions(-) create mode 100644 ortools/doxygen/header.html diff --git a/cmake/cpp.cmake b/cmake/cpp.cmake index 09f7417613..551bbd255d 100644 --- a/cmake/cpp.cmake +++ b/cmake/cpp.cmake @@ -592,6 +592,7 @@ if(BUILD_CXX_DOC) DEPENDS ${PROJECT_BINARY_DIR}/cpp/Doxyfile ${PROJECT_BINARY_DIR}/cpp/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/cpp/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating C++ API documentation with Doxygen" diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 52f77b77e3..731dd4d8ff 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -517,6 +517,7 @@ if(BUILD_DOTNET_DOC) dotnet_package ${PROJECT_BINARY_DIR}/dotnet/Doxyfile ${PROJECT_BINARY_DIR}/dotnet/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/dotnet/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating .Net API documentation with Doxygen" diff --git a/cmake/java.cmake b/cmake/java.cmake index 9783f2e1dd..eb465a106e 100644 --- a/cmake/java.cmake +++ b/cmake/java.cmake @@ -579,6 +579,7 @@ if(BUILD_JAVA_DOC) java_package ${PROJECT_BINARY_DIR}/java/Doxyfile ${PROJECT_BINARY_DIR}/java/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/java/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Java API documentation with Doxygen" diff --git a/cmake/python.cmake b/cmake/python.cmake index 70a8151a89..c5daeee8a3 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -861,6 +861,7 @@ if(BUILD_PYTHON_DOC) python_package ${PROJECT_BINARY_DIR}/python/Doxyfile ${PROJECT_BINARY_DIR}/python/doxygen-awesome.css + ${PROJECT_SOURCE_DIR}/ortools/doxygen/header.html ${PROJECT_SOURCE_DIR}/ortools/python/stylesheet.css WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} COMMENT "Generating Python API documentation with Doxygen" diff --git a/ortools/cpp/Doxyfile.in b/ortools/cpp/Doxyfile.in index c73af82a1c..3b22a9d3b4 100644 --- a/ortools/cpp/Doxyfile.in +++ b/ortools/cpp/Doxyfile.in @@ -1267,7 +1267,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1331,7 +1331,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/dotnet/Doxyfile.in b/ortools/dotnet/Doxyfile.in index 705bb2febb..a137f56eef 100644 --- a/ortools/dotnet/Doxyfile.in +++ b/ortools/dotnet/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/doxygen/header.html b/ortools/doxygen/header.html new file mode 100644 index 0000000000..105640b7d3 --- /dev/null +++ b/ortools/doxygen/header.html @@ -0,0 +1,76 @@ + + + + + + + + +$projectname: $title +$title + + + + + + + + + + + + +$treeview +$search +$mathjax +$darkmode + +$extrastylesheet + + + +
+ + +
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
$projectname $projectnumber +
+
$projectbrief
+
+
$projectbrief
+
$searchbox
$searchbox
+
+ + diff --git a/ortools/java/Doxyfile.in b/ortools/java/Doxyfile.in index 5e35f02281..ff8544e977 100644 --- a/ortools/java/Doxyfile.in +++ b/ortools/java/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to diff --git a/ortools/python/Doxyfile.in b/ortools/python/Doxyfile.in index 4e773821e7..48b3347927 100644 --- a/ortools/python/Doxyfile.in +++ b/ortools/python/Doxyfile.in @@ -1263,7 +1263,7 @@ HTML_FILE_EXTENSION = .html # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_HEADER = +HTML_HEADER = ortools/doxygen/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard @@ -1327,7 +1327,7 @@ HTML_EXTRA_FILES = # The default value is: AUTO_LIGHT. # This tag requires that the tag GENERATE_HTML is set to YES. -HTML_COLORSTYLE = AUTO_LIGHT +HTML_COLORSTYLE = LIGHT # required with Doxygen >= 1.9.5 # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to From 09e7e951a25653b19f0b8fcdaa4565435a54c405 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Mon, 30 Jun 2025 09:59:26 +0200 Subject: [PATCH 39/81] cmake(ci): Rework Vagrantfile ci: Rework BSD workflows ci: Bump freebsd Vagrantfile from python 3.9 to 3.11 --- .github/workflows/amd64_freebsd_cmake.yml | 54 ++++++++++++++--------- cmake/Makefile | 2 +- cmake/vagrant/freebsd/cpp/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/dotnet/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/java/Vagrantfile | 10 ++++- cmake/vagrant/freebsd/python/Vagrantfile | 14 ++++-- cmake/vagrant/netbsd/cpp/Vagrantfile | 4 +- cmake/vagrant/netbsd/dotnet/Vagrantfile | 6 +-- cmake/vagrant/netbsd/java/Vagrantfile | 6 +-- cmake/vagrant/netbsd/python/Vagrantfile | 8 ++-- 10 files changed, 81 insertions(+), 43 deletions(-) diff --git a/.github/workflows/amd64_freebsd_cmake.yml b/.github/workflows/amd64_freebsd_cmake.yml index 9cf860332a..81fa90da0b 100644 --- a/.github/workflows/amd64_freebsd_cmake.yml +++ b/.github/workflows/amd64_freebsd_cmake.yml @@ -1,3 +1,4 @@ +# ref: https://github.com/actions/runner-images name: amd64 FreeBSD CMake on: [push, pull_request, workflow_dispatch] @@ -6,31 +7,44 @@ concurrency: group: ${{github.workflow}}-${{github.ref}} cancel-in-progress: true -# Only macos-12 runner provide virtualisation with vagrant/virtualbox installed. -# ref: https://github.com/actions/runner-images/tree/main/images/macos -# ref: https://app.vagrantup.com/generic/boxes/freebsd13 +# Building using the github runner environement directly. jobs: vagrant: strategy: - fail-fast: false matrix: - distro: [freebsd] - lang: [cpp, python] - allow_failure: [false] - include: - - distro: freebsd - lang: dotnet - allow_failure: true - - distro: freebsd - lang: java - allow_failure: true - name: amd64•FreeBSD•CMake•${{matrix.lang}} - runs-on: macos-12 + distro: [ + freebsd, + #netbsd, + #openbsd, + ] + lang: [ + cpp, + dotnet, + java, + python, + ] + allow_failure: [true] + fail-fast: false + name: amd64•${{matrix.distro}}•CMake•${{matrix.lang}} + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: vagrant version - run: Vagrant --version - - name: VirtualBox version - run: virtualbox -h + - name: Virtualbox install + run: | + sudo apt update -q + sudo apt install -yq virtualbox + virtualbox --help + - name: Vagrant install + run: | + sudo apt update -q + wget https://releases.hashicorp.com/vagrant/2.4.7/vagrant_2.4.7-1_amd64.deb + sudo apt install -y ./vagrant_2.4.7-1_amd64.deb + vagrant --version - name: Build run: make --directory=cmake ${{matrix.distro}}_${{matrix.lang}} + + amd64_bsd_cmake: + runs-on: ubuntu-latest + needs: vagrant + steps: + - uses: actions/checkout@v4 diff --git a/cmake/Makefile b/cmake/Makefile index f63de4d762..021c17fa42 100644 --- a/cmake/Makefile +++ b/cmake/Makefile @@ -803,4 +803,4 @@ clean: clean_all clean_platforms clean_toolchains clean_web clean_vagrant clean_ distclean: clean -docker container rm -f $$(docker container ls -aq) -docker image rm -f $$(docker image ls -aq) - -vagrant box remove -f generic/freebsd12 + -vagrant box remove -f generic/freebsd14 diff --git a/cmake/vagrant/freebsd/cpp/Vagrantfile b/cmake/vagrant/freebsd/cpp/Vagrantfile index 31e311c074..5440fd949e 100644 --- a/cmake/vagrant/freebsd/cpp/Vagrantfile +++ b/cmake/vagrant/freebsd/cpp/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_cpp" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/dotnet/Vagrantfile b/cmake/vagrant/freebsd/dotnet/Vagrantfile index 567bd27889..7648c0ae11 100644 --- a/cmake/vagrant/freebsd/dotnet/Vagrantfile +++ b/cmake/vagrant/freebsd/dotnet/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_dotnet" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/java/Vagrantfile b/cmake/vagrant/freebsd/java/Vagrantfile index c6584eb767..fff3664342 100644 --- a/cmake/vagrant/freebsd/java/Vagrantfile +++ b/cmake/vagrant/freebsd/java/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_java" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. diff --git a/cmake/vagrant/freebsd/python/Vagrantfile b/cmake/vagrant/freebsd/python/Vagrantfile index 73a6ba7b2a..7cd93bd1d5 100644 --- a/cmake/vagrant/freebsd/python/Vagrantfile +++ b/cmake/vagrant/freebsd/python/Vagrantfile @@ -14,6 +14,7 @@ Vagrant.configure("2") do |config| # boxes at https://vagrantcloud.com/search. config.vm.guest = :freebsd config.vm.box = "generic/freebsd14" + config.vm.box_version = "4.3.12" config.vm.provider "virtualbox" do |v| v.name = "ortools_freebsd_python" end @@ -48,9 +49,14 @@ Vagrant.configure("2") do |config| # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third # argument is a set of non-required options. - #config.vm.synced_folder "../../..", "/home/vagrant/project" - config.vm.synced_folder ".", "/vagrant", id: "vagrant-root", disabled: true + # config.vm.synced_folder "../data", "/vagrant_data" + # Disable the default share of the current code directory. Doing this + # provides improved isolation between the vagrant box and your host + # by making sure your Vagrantfile isn't accessible to the vagrant box. + # If you use this you may want to enable additional shared subfolders as + # shown above. + config.vm.synced_folder ".", "/vagrant", disabled: true # Provider-specific configuration so you can fine-tune various # backing providers for Vagrant. These expose provider-specific options. @@ -75,8 +81,8 @@ Vagrant.configure("2") do |config| set -x pkg update -f pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkg install -y swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkg install -y py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/cpp/Vagrantfile b/cmake/vagrant/netbsd/cpp/Vagrantfile index 0378b3d5a8..db188d48cc 100644 --- a/cmake/vagrant/netbsd/cpp/Vagrantfile +++ b/cmake/vagrant/netbsd/cpp/Vagrantfile @@ -73,8 +73,8 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/dotnet/Vagrantfile b/cmake/vagrant/netbsd/dotnet/Vagrantfile index bceb231d8b..09ac57e594 100644 --- a/cmake/vagrant/netbsd/dotnet/Vagrantfile +++ b/cmake/vagrant/netbsd/dotnet/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake + pkgin update + pkgin -y install git cmake kldload linux64 - pkg install -y swig linux-dotnet-sdk + pkgin -y install swig linux-dotnet-sdk SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" diff --git a/cmake/vagrant/netbsd/java/Vagrantfile b/cmake/vagrant/netbsd/java/Vagrantfile index 050e73496e..7c44664b8e 100644 --- a/cmake/vagrant/netbsd/java/Vagrantfile +++ b/cmake/vagrant/netbsd/java/Vagrantfile @@ -73,9 +73,9 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig openjdk11 maven + pkgin update + pkg -y install git cmake + pkg -y install swig openjdk11 maven mount -t fdescfs fdesc /dev/fd mount -t procfs proc /proc SHELL diff --git a/cmake/vagrant/netbsd/python/Vagrantfile b/cmake/vagrant/netbsd/python/Vagrantfile index 6cfb5783f6..86340ab3d0 100644 --- a/cmake/vagrant/netbsd/python/Vagrantfile +++ b/cmake/vagrant/netbsd/python/Vagrantfile @@ -73,10 +73,10 @@ Vagrant.configure("2") do |config| # note: clang installed by default config.vm.provision "env", type: "shell", inline:<<-SHELL set -x - pkg update -f - pkg install -y git cmake - pkg install -y swig python39 py39-wheel py39-pip py39-pytest-virtualenv - pkg install -y py39-numpy py39-pandas py39-matplotlib + pkgin update + pkgin -y install git cmake + pkgin -y install swig python311 py311-wheel py311-pip py311-pytest-virtualenv + pkgin -y install py311-numpy py311-pandas py311-matplotlib SHELL config.vm.provision "file", source: "../../../../CMakeLists.txt", destination: "$HOME/project/" From 0ef0f402d119abbf2416168a101f7c37568a1e3c Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:54:13 +0200 Subject: [PATCH 40/81] cleanup from google3 --- ortools/packing/testdata/Class_01.2bp | 3324 +------------------------ ortools/util/fp_utils.h | 5 + 2 files changed, 79 insertions(+), 3250 deletions(-) diff --git a/ortools/packing/testdata/Class_01.2bp b/ortools/packing/testdata/Class_01.2bp index fc0e3c6e40..265998df61 100644 --- a/ortools/packing/testdata/Class_01.2bp +++ b/ortools/packing/testdata/Class_01.2bp @@ -1,3250 +1,74 @@ - 1 PROBLEM CLASS - 20 N. OF ITEMS - 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 5 H(I),W(I),I=1,...,N - 2 4 - 6 10 - 7 5 - 3 6 - 7 10 - 5 1 - 5 3 - 9 6 - 4 2 - 7 6 - 2 7 - 3 8 - 10 4 - 5 4 - 3 10 - 3 8 - 8 7 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 4 4 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 5 5 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 6 6 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 7 7 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 10 H(I),W(I),I=1,...,N - 1 10 - 7 3 - 3 4 - 9 2 - 2 10 - 1 3 - 8 10 - 8 3 - 1 7 - 7 3 - 4 1 - 10 8 - 7 2 - 1 5 - 1 4 - 8 6 - 9 5 - 2 5 - 9 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 8 8 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 9 9 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - - 1 PROBLEM CLASS - 20 N. OF ITEMS - 10 10 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 1 11 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 2 2 H(I),W(I),I=1,...,N - 8 6 - 2 10 - 3 1 - 4 8 - 10 3 - 9 1 - 5 1 - 3 6 - 1 1 - 2 4 - 2 9 - 9 1 - 5 9 - 7 4 - 2 2 - 4 3 - 7 9 - 1 4 - 8 9 - 3 4 - 5 6 - 7 4 - 4 10 - 5 9 - 2 1 - 1 7 - 1 3 - 3 8 - 4 4 - 2 7 - 9 6 - 2 2 - 8 2 - 1 4 - 6 10 - 1 7 - 9 3 - 5 9 - 8 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 2 12 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 3 13 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 4 14 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 5 15 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 6 16 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 7 17 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 3 1 H(I),W(I),I=1,...,N - 6 6 - 10 4 - 10 1 - 9 2 - 7 1 - 7 7 - 3 3 - 5 4 - 4 9 - 7 9 - 3 2 - 2 6 - 2 2 - 1 6 - 1 8 - 8 9 - 10 6 - 10 6 - 4 3 - 10 1 - 10 10 - 1 2 - 10 2 - 1 9 - 8 6 - 1 8 - 2 4 - 6 6 - 3 10 - 4 5 - 5 2 - 5 8 - 6 1 - 4 3 - 7 1 - 9 3 - 3 10 - 10 10 - 2 10 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 8 18 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 9 19 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - - 1 PROBLEM CLASS - 40 N. OF ITEMS - 10 20 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 1 21 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 7 H(I),W(I),I=1,...,N - 6 10 - 6 5 - 2 7 - 8 4 - 10 9 - 5 8 - 6 8 - 9 4 - 3 9 - 10 3 - 5 9 - 7 1 - 9 8 - 6 4 - 6 3 - 3 4 - 2 10 - 1 6 - 4 1 - 4 7 - 2 10 - 9 8 - 2 4 - 8 4 - 1 6 - 5 2 - 10 9 - 8 4 - 4 1 - 7 4 - 10 3 - 8 10 - 10 10 - 8 2 - 8 9 - 9 7 - 2 3 - 4 10 - 3 4 - 1 3 - 5 1 - 2 1 - 6 4 - 2 2 - 8 10 - 7 6 - 4 4 - 9 8 - 3 9 - 2 7 - 5 8 - 4 2 - 5 10 - 7 8 - 1 5 - 4 3 - 5 8 - 4 9 - 9 8 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 2 22 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 2 H(I),W(I),I=1,...,N - 2 8 - 9 3 - 5 9 - 8 2 - 10 10 - 8 6 - 9 6 - 7 8 - 6 5 - 6 1 - 9 7 - 10 3 - 9 7 - 7 6 - 10 3 - 7 3 - 10 5 - 5 3 - 1 10 - 9 6 - 6 3 - 10 7 - 4 7 - 2 4 - 5 5 - 4 8 - 4 6 - 5 4 - 9 10 - 2 8 - 1 3 - 1 3 - 5 1 - 3 2 - 7 6 - 4 4 - 10 3 - 10 10 - 8 8 - 3 3 - 8 4 - 8 3 - 2 5 - 2 9 - 3 8 - 6 7 - 1 5 - 4 4 - 5 9 - 6 1 - 1 10 - 10 3 - 6 8 - 3 7 - 1 6 - 2 5 - 1 4 - 7 1 - 3 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 3 23 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 6 H(I),W(I),I=1,...,N - 4 3 - 5 10 - 8 6 - 10 4 - 9 8 - 8 7 - 2 5 - 9 6 - 3 9 - 9 7 - 5 5 - 4 1 - 2 9 - 7 4 - 1 4 - 10 2 - 3 4 - 3 8 - 4 6 - 6 5 - 1 2 - 4 10 - 4 6 - 4 6 - 9 5 - 8 7 - 3 2 - 10 1 - 7 10 - 7 7 - 6 7 - 2 2 - 7 4 - 8 3 - 2 6 - 3 1 - 5 10 - 8 10 - 4 8 - 8 8 - 10 4 - 1 7 - 7 6 - 7 10 - 3 5 - 1 5 - 10 5 - 1 5 - 1 5 - 8 2 - 5 4 - 6 5 - 2 9 - 4 5 - 7 6 - 2 6 - 9 6 - 4 7 - 7 4 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 4 24 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 5 25 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 6 26 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 5 H(I),W(I),I=1,...,N - 2 6 - 6 3 - 3 2 - 4 1 - 5 3 - 4 4 - 6 2 - 3 9 - 7 2 - 8 8 - 3 1 - 10 5 - 1 10 - 3 8 - 3 4 - 5 8 - 3 1 - 7 9 - 9 1 - 5 9 - 2 9 - 4 10 - 6 3 - 7 5 - 10 4 - 9 3 - 3 6 - 6 4 - 2 8 - 10 3 - 2 10 - 5 9 - 7 6 - 10 1 - 9 5 - 5 4 - 8 7 - 4 7 - 3 8 - 6 10 - 9 4 - 2 7 - 5 7 - 5 7 - 1 2 - 5 1 - 2 6 - 1 2 - 5 10 - 9 10 - 1 9 - 5 4 - 6 10 - 3 10 - 7 5 - 4 5 - 4 7 - 9 10 - 3 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 7 27 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 5 H(I),W(I),I=1,...,N - 3 8 - 4 5 - 10 8 - 7 3 - 10 9 - 2 7 - 3 4 - 5 9 - 4 2 - 9 1 - 2 4 - 8 3 - 6 3 - 9 4 - 1 9 - 3 1 - 6 1 - 6 10 - 1 4 - 4 3 - 1 7 - 10 3 - 3 10 - 6 10 - 9 8 - 2 3 - 2 8 - 8 8 - 2 2 - 9 2 - 8 4 - 2 6 - 6 1 - 9 5 - 3 6 - 4 7 - 1 2 - 5 1 - 3 7 - 3 4 - 7 2 - 2 3 - 5 3 - 10 9 - 1 5 - 8 6 - 3 1 - 1 2 - 2 5 - 7 9 - 2 5 - 6 10 - 1 2 - 3 4 - 1 1 - 7 1 - 5 8 - 7 7 - 2 6 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 8 28 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 9 29 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 6 - 2 7 - 8 2 - 3 2 - 1 8 - 1 5 - 5 4 - 10 5 - 4 4 - 9 1 - 10 7 - 8 8 - 3 3 - 1 5 - 2 8 - 7 2 - 2 8 - 7 3 - 2 5 - 4 7 - 2 4 - 8 5 - 7 2 - 8 10 - 4 10 - 10 2 - 4 3 - 10 10 - 3 8 - 2 10 - 8 7 - 7 9 - 9 3 - 6 8 - 2 6 - 8 2 - 4 6 - 7 9 - 6 7 - 3 9 - 3 7 - 6 7 - 3 7 - 10 10 - 6 2 - 3 1 - 7 10 - 7 3 - 4 1 - 5 5 - 8 10 - 5 6 - 3 5 - 10 2 - 7 7 - 2 1 - 2 10 - 10 3 - 1 3 - - 1 PROBLEM CLASS - 60 N. OF ITEMS - 10 30 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 1 31 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 4 H(I),W(I),I=1,...,N - 3 1 - 7 6 - 2 8 - 4 9 - 2 6 - 7 7 - 6 3 - 7 2 - 3 1 - 8 3 - 3 4 - 9 1 - 1 8 - 10 1 - 6 7 - 5 9 - 7 3 - 3 8 - 9 6 - 5 2 - 1 5 - 4 8 - 3 6 - 10 7 - 10 5 - 2 5 - 9 5 - 6 9 - 2 10 - 9 9 - 2 7 - 2 1 - 9 8 - 10 2 - 2 7 - 10 3 - 7 2 - 4 9 - 1 3 - 6 7 - 6 6 - 3 9 - 5 8 - 8 9 - 6 1 - 4 5 - 1 8 - 9 8 - 4 7 - 5 2 - 3 1 - 5 5 - 9 2 - 8 9 - 6 9 - 4 7 - 3 3 - 8 8 - 9 8 - 8 2 - 10 2 - 5 4 - 4 7 - 3 3 - 2 6 - 1 8 - 7 8 - 9 4 - 8 4 - 1 10 - 6 7 - 6 6 - 6 7 - 5 6 - 5 3 - 1 9 - 4 1 - 4 6 - 7 7 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 2 32 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 9 H(I),W(I),I=1,...,N - 1 4 - 8 2 - 4 9 - 1 2 - 4 6 - 4 4 - 1 6 - 1 5 - 4 5 - 2 6 - 9 10 - 5 2 - 3 1 - 7 10 - 7 4 - 5 2 - 6 6 - 9 2 - 7 8 - 5 7 - 2 5 - 10 10 - 5 4 - 5 4 - 4 7 - 5 7 - 9 8 - 8 10 - 6 5 - 4 8 - 2 9 - 8 9 - 9 4 - 8 3 - 10 8 - 5 5 - 4 3 - 3 3 - 1 2 - 10 8 - 6 4 - 8 4 - 7 9 - 10 10 - 5 2 - 5 5 - 5 1 - 3 6 - 3 8 - 10 9 - 2 5 - 2 5 - 5 2 - 1 10 - 1 9 - 2 10 - 1 10 - 1 7 - 9 4 - 1 6 - 2 2 - 6 4 - 2 7 - 3 2 - 4 9 - 4 5 - 5 7 - 3 10 - 6 7 - 7 9 - 2 6 - 9 5 - 10 5 - 2 8 - 2 9 - 4 8 - 2 6 - 6 9 - 8 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 3 33 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 3 H(I),W(I),I=1,...,N - 1 7 - 7 5 - 4 4 - 4 1 - 2 10 - 7 5 - 2 6 - 2 5 - 6 7 - 6 1 - 8 10 - 1 9 - 6 8 - 10 7 - 1 3 - 1 4 - 7 4 - 6 6 - 2 1 - 10 1 - 8 1 - 9 9 - 8 9 - 5 5 - 1 8 - 2 7 - 7 1 - 8 6 - 1 7 - 5 7 - 1 6 - 6 6 - 9 3 - 10 6 - 7 7 - 8 1 - 5 7 - 3 8 - 2 8 - 5 8 - 5 9 - 3 7 - 10 10 - 2 1 - 9 6 - 1 3 - 5 4 - 10 5 - 8 6 - 4 7 - 7 5 - 10 9 - 8 1 - 10 6 - 8 3 - 8 5 - 8 7 - 1 8 - 3 10 - 2 9 - 1 4 - 4 6 - 1 2 - 5 10 - 1 2 - 7 5 - 5 6 - 3 9 - 7 8 - 1 3 - 10 6 - 3 2 - 2 1 - 3 4 - 5 7 - 9 9 - 2 3 - 2 3 - 2 8 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 4 34 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 10 H(I),W(I),I=1,...,N - 1 3 - 7 9 - 5 9 - 4 9 - 5 8 - 5 9 - 2 6 - 9 8 - 10 9 - 6 3 - 8 9 - 1 9 - 5 10 - 2 10 - 8 1 - 10 8 - 3 1 - 10 2 - 1 7 - 9 4 - 8 5 - 7 6 - 10 6 - 1 8 - 9 6 - 7 10 - 10 1 - 7 6 - 10 4 - 4 6 - 10 7 - 8 8 - 4 10 - 8 9 - 2 3 - 10 6 - 2 1 - 1 9 - 1 2 - 2 4 - 6 5 - 8 8 - 8 7 - 6 2 - 10 5 - 8 1 - 3 10 - 8 3 - 5 6 - 4 8 - 2 1 - 7 1 - 2 6 - 1 10 - 5 7 - 4 6 - 6 9 - 8 3 - 3 5 - 6 9 - 7 10 - 7 6 - 2 10 - 1 7 - 7 7 - 4 1 - 1 5 - 2 8 - 1 3 - 7 4 - 3 10 - 3 5 - 8 3 - 9 3 - 2 8 - 5 3 - 1 6 - 5 1 - 7 3 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 5 35 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 6 36 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 3 H(I),W(I),I=1,...,N - 9 8 - 5 7 - 8 10 - 5 10 - 4 1 - 7 3 - 10 8 - 3 1 - 6 1 - 7 1 - 3 6 - 6 5 - 4 1 - 3 7 - 7 5 - 1 5 - 4 4 - 8 10 - 9 5 - 6 6 - 10 1 - 8 1 - 8 4 - 3 7 - 7 3 - 1 7 - 6 9 - 7 10 - 4 6 - 9 6 - 8 2 - 8 10 - 3 9 - 9 1 - 3 1 - 8 8 - 10 4 - 8 6 - 3 2 - 2 4 - 4 7 - 9 2 - 5 9 - 10 4 - 6 7 - 8 9 - 7 7 - 3 8 - 2 5 - 4 5 - 1 4 - 7 7 - 2 8 - 9 7 - 2 4 - 1 7 - 8 4 - 8 7 - 10 2 - 7 7 - 4 6 - 10 2 - 7 6 - 1 10 - 10 8 - 8 1 - 8 9 - 1 10 - 9 1 - 6 5 - 3 7 - 10 6 - 5 5 - 5 9 - 3 7 - 4 2 - 8 8 - 6 4 - 4 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 7 37 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 6 H(I),W(I),I=1,...,N - 4 3 - 5 9 - 2 10 - 7 5 - 5 6 - 6 9 - 1 3 - 6 9 - 10 4 - 5 6 - 7 8 - 5 4 - 10 5 - 9 2 - 10 10 - 6 10 - 2 2 - 6 2 - 2 10 - 5 7 - 3 9 - 9 5 - 9 8 - 10 2 - 7 7 - 3 4 - 8 10 - 5 3 - 8 6 - 6 4 - 3 6 - 3 4 - 5 4 - 2 4 - 9 7 - 5 9 - 2 7 - 6 6 - 5 9 - 2 7 - 3 10 - 6 5 - 3 1 - 1 10 - 7 7 - 5 2 - 9 6 - 9 5 - 7 8 - 8 7 - 1 8 - 9 9 - 8 3 - 5 6 - 3 6 - 8 6 - 8 10 - 7 9 - 8 4 - 2 10 - 1 7 - 10 7 - 7 5 - 6 1 - 1 6 - 3 6 - 1 10 - 4 1 - 8 3 - 8 5 - 7 1 - 5 5 - 4 6 - 10 7 - 2 2 - 2 2 - 5 8 - 10 6 - 10 4 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 8 38 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 6 H(I),W(I),I=1,...,N - 4 1 - 4 8 - 3 1 - 10 9 - 3 6 - 10 6 - 3 8 - 7 5 - 3 2 - 1 7 - 7 4 - 1 8 - 4 7 - 3 4 - 5 5 - 3 8 - 4 5 - 4 2 - 8 9 - 10 6 - 9 10 - 8 10 - 2 8 - 10 9 - 4 3 - 10 10 - 6 8 - 6 5 - 3 3 - 8 8 - 3 9 - 2 7 - 6 9 - 5 3 - 7 1 - 9 10 - 3 7 - 7 6 - 7 1 - 7 2 - 2 1 - 1 3 - 7 8 - 4 7 - 1 6 - 2 4 - 10 4 - 6 10 - 2 1 - 3 1 - 7 4 - 8 9 - 1 8 - 4 8 - 10 7 - 4 7 - 6 3 - 7 5 - 3 5 - 4 9 - 10 5 - 8 5 - 7 5 - 9 4 - 8 5 - 6 2 - 3 5 - 4 5 - 10 9 - 2 6 - 6 7 - 10 7 - 7 7 - 3 8 - 6 6 - 8 9 - 6 1 - 7 5 - 5 2 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 9 39 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 9 H(I),W(I),I=1,...,N - 6 7 - 10 3 - 3 1 - 2 6 - 2 7 - 6 5 - 5 6 - 9 10 - 5 9 - 2 7 - 2 5 - 10 3 - 6 8 - 1 9 - 1 8 - 5 10 - 5 5 - 7 4 - 9 3 - 4 1 - 8 10 - 9 6 - 10 8 - 5 7 - 5 10 - 1 5 - 8 4 - 6 4 - 7 3 - 6 2 - 3 9 - 9 1 - 10 1 - 5 4 - 7 10 - 10 10 - 3 10 - 10 9 - 5 10 - 7 7 - 9 10 - 5 10 - 6 4 - 3 10 - 2 1 - 1 2 - 3 2 - 3 3 - 1 10 - 8 3 - 1 5 - 7 9 - 10 6 - 4 7 - 9 9 - 6 9 - 2 10 - 6 9 - 9 3 - 7 4 - 8 9 - 6 7 - 6 9 - 7 1 - 10 2 - 2 4 - 10 3 - 9 7 - 2 9 - 10 5 - 4 3 - 9 5 - 5 8 - 4 10 - 1 2 - 6 2 - 10 7 - 9 10 - 2 6 - - 1 PROBLEM CLASS - 80 N. OF ITEMS - 10 40 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 1 41 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 4 10 H(I),W(I),I=1,...,N - 10 2 - 4 2 - 10 10 - 2 7 - 10 9 - 6 5 - 5 7 - 7 1 - 3 5 - 9 3 - 4 9 - 10 2 - 3 4 - 2 2 - 9 4 - 8 2 - 1 1 - 7 1 - 4 4 - 10 6 - 6 4 - 10 3 - 9 6 - 6 3 - 7 6 - 9 2 - 7 7 - 4 6 - 10 3 - 8 7 - 2 1 - 2 10 - 9 3 - 10 2 - 4 2 - 1 4 - 9 8 - 6 10 - 4 10 - 9 9 - 9 4 - 7 2 - 9 7 - 3 3 - 1 10 - 10 10 - 7 8 - 6 3 - 1 8 - 2 8 - 1 2 - 3 2 - 8 4 - 7 7 - 6 2 - 4 6 - 3 7 - 9 7 - 10 3 - 10 3 - 2 8 - 8 6 - 7 4 - 7 5 - 5 4 - 5 1 - 2 7 - 4 8 - 4 3 - 7 4 - 3 7 - 3 1 - 3 8 - 2 3 - 3 5 - 10 6 - 2 3 - 3 4 - 7 1 - 3 9 - 4 9 - 5 1 - 4 1 - 5 9 - 3 6 - 6 9 - 7 5 - 2 7 - 9 2 - 10 6 - 1 4 - 2 10 - 3 3 - 1 6 - 5 4 - 7 6 - 2 4 - 10 4 - 1 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 2 42 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 10 8 H(I),W(I),I=1,...,N - 5 6 - 2 3 - 10 10 - 8 8 - 2 2 - 6 2 - 6 10 - 9 2 - 8 5 - 9 3 - 1 2 - 5 9 - 8 7 - 8 2 - 8 3 - 3 2 - 2 4 - 6 8 - 3 6 - 10 5 - 10 3 - 9 2 - 5 8 - 10 9 - 10 8 - 5 1 - 5 5 - 7 5 - 10 4 - 6 2 - 10 2 - 1 8 - 2 10 - 8 10 - 8 3 - 4 8 - 2 8 - 10 8 - 1 7 - 1 7 - 8 4 - 4 6 - 6 7 - 1 10 - 3 6 - 6 7 - 10 7 - 7 7 - 2 8 - 5 5 - 7 8 - 4 9 - 5 3 - 2 10 - 1 1 - 1 2 - 3 2 - 4 3 - 2 1 - 10 5 - 2 6 - 9 1 - 2 8 - 6 10 - 2 2 - 10 2 - 6 4 - 10 3 - 3 7 - 1 9 - 3 5 - 2 2 - 2 5 - 3 7 - 6 8 - 8 1 - 7 10 - 6 5 - 10 7 - 8 3 - 2 1 - 8 4 - 4 7 - 3 9 - 1 2 - 6 2 - 10 7 - 9 10 - 3 7 - 2 6 - 4 9 - 5 3 - 10 5 - 3 9 - 4 9 - 4 5 - 9 9 - 8 6 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 3 43 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 1 3 H(I),W(I),I=1,...,N - 4 4 - 9 6 - 9 9 - 8 5 - 7 10 - 7 1 - 4 3 - 6 9 - 6 9 - 9 5 - 5 4 - 5 2 - 6 4 - 5 4 - 5 1 - 2 8 - 5 10 - 6 6 - 10 8 - 6 1 - 7 6 - 8 3 - 8 8 - 9 1 - 6 4 - 10 1 - 1 2 - 6 10 - 3 9 - 4 9 - 5 2 - 6 3 - 7 1 - 6 9 - 10 2 - 2 9 - 7 3 - 4 7 - 6 3 - 8 1 - 9 8 - 1 2 - 10 5 - 5 4 - 7 7 - 3 5 - 1 6 - 4 7 - 6 1 - 8 3 - 1 3 - 10 2 - 5 6 - 8 4 - 5 8 - 3 5 - 6 1 - 10 1 - 2 9 - 5 4 - 5 8 - 1 8 - 4 2 - 1 2 - 1 3 - 4 6 - 8 1 - 1 4 - 5 5 - 5 5 - 2 1 - 6 10 - 3 5 - 3 3 - 8 10 - 2 1 - 5 10 - 10 7 - 1 2 - 8 3 - 3 8 - 1 9 - 1 2 - 6 5 - 1 2 - 5 6 - 10 8 - 2 10 - 9 6 - 5 7 - 3 3 - 4 2 - 10 1 - 5 3 - 10 3 - 6 10 - 8 7 - 6 9 - 2 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 4 44 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 8 H(I),W(I),I=1,...,N - 4 9 - 6 4 - 3 1 - 3 3 - 3 5 - 6 6 - 8 8 - 7 6 - 5 10 - 8 3 - 6 8 - 9 6 - 4 8 - 10 1 - 9 2 - 6 4 - 10 4 - 4 9 - 3 1 - 6 7 - 5 6 - 6 9 - 7 2 - 1 4 - 4 7 - 4 7 - 3 2 - 8 5 - 6 2 - 1 1 - 7 4 - 4 8 - 4 1 - 2 4 - 8 5 - 3 1 - 3 5 - 7 2 - 7 1 - 9 5 - 2 10 - 7 3 - 1 7 - 2 8 - 3 5 - 9 9 - 8 7 - 4 7 - 2 2 - 10 6 - 1 10 - 10 5 - 7 6 - 3 4 - 4 9 - 9 7 - 1 8 - 4 3 - 8 8 - 4 3 - 9 8 - 2 10 - 3 8 - 10 4 - 1 3 - 8 4 - 7 10 - 6 6 - 8 1 - 9 7 - 7 9 - 4 8 - 6 4 - 3 4 - 5 5 - 10 2 - 4 8 - 2 4 - 8 10 - 2 4 - 5 3 - 3 9 - 5 10 - 3 2 - 3 9 - 3 6 - 7 2 - 8 9 - 7 3 - 6 3 - 9 10 - 7 1 - 1 4 - 6 2 - 2 9 - 1 6 - 9 4 - 3 8 - 7 8 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 5 45 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 9 1 H(I),W(I),I=1,...,N - 9 1 - 5 6 - 8 5 - 5 10 - 5 6 - 9 4 - 2 4 - 5 2 - 9 8 - 10 4 - 6 4 - 1 8 - 8 9 - 8 1 - 3 4 - 7 10 - 10 4 - 5 6 - 6 6 - 3 2 - 8 2 - 6 9 - 7 6 - 2 5 - 7 6 - 1 6 - 6 3 - 8 10 - 5 1 - 1 7 - 10 8 - 1 2 - 6 6 - 2 3 - 6 8 - 2 1 - 5 3 - 3 10 - 9 1 - 8 9 - 10 8 - 6 7 - 2 10 - 7 1 - 4 9 - 7 7 - 5 9 - 2 6 - 2 9 - 4 3 - 1 9 - 8 7 - 3 3 - 10 7 - 4 10 - 3 9 - 10 1 - 1 9 - 4 10 - 1 1 - 9 4 - 5 6 - 2 6 - 7 4 - 6 8 - 4 6 - 5 10 - 9 6 - 7 3 - 5 2 - 1 9 - 10 4 - 4 9 - 5 8 - 3 2 - 5 1 - 9 3 - 7 5 - 6 10 - 3 3 - 2 3 - 7 3 - 9 8 - 7 8 - 10 3 - 4 10 - 3 7 - 2 8 - 1 7 - 10 3 - 6 8 - 7 9 - 7 4 - 10 4 - 5 7 - 5 10 - 4 6 - 3 10 - 3 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 6 46 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 8 H(I),W(I),I=1,...,N - 8 3 - 5 5 - 2 3 - 4 9 - 10 8 - 2 9 - 6 5 - 10 1 - 7 6 - 3 2 - 6 5 - 8 4 - 6 10 - 7 10 - 5 2 - 4 8 - 4 5 - 7 10 - 9 7 - 7 10 - 5 9 - 8 6 - 5 6 - 8 9 - 3 6 - 9 3 - 7 9 - 4 6 - 8 10 - 2 10 - 1 8 - 3 8 - 2 2 - 3 9 - 4 7 - 8 4 - 10 1 - 10 6 - 9 6 - 3 4 - 2 6 - 6 9 - 2 8 - 9 6 - 8 3 - 6 2 - 2 7 - 6 8 - 6 6 - 10 8 - 10 3 - 2 1 - 4 10 - 9 8 - 3 7 - 4 3 - 4 4 - 9 3 - 9 6 - 4 1 - 2 10 - 10 10 - 10 8 - 9 8 - 6 4 - 2 6 - 3 6 - 8 9 - 9 5 - 2 5 - 5 5 - 2 4 - 6 4 - 8 1 - 10 3 - 7 4 - 7 7 - 4 4 - 2 4 - 1 5 - 3 4 - 4 5 - 1 3 - 8 6 - 10 7 - 7 1 - 9 8 - 3 9 - 8 10 - 5 10 - 1 2 - 4 10 - 7 3 - 5 5 - 6 10 - 4 7 - 4 9 - 4 1 - 7 1 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 7 47 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 7 5 H(I),W(I),I=1,...,N - 5 4 - 1 1 - 9 6 - 3 10 - 3 9 - 1 1 - 2 1 - 5 2 - 3 3 - 1 7 - 8 10 - 7 10 - 1 10 - 2 4 - 10 3 - 2 7 - 6 4 - 5 1 - 3 1 - 1 9 - 10 5 - 5 10 - 6 10 - 5 4 - 8 5 - 2 7 - 1 9 - 1 5 - 9 9 - 8 10 - 4 1 - 6 6 - 6 6 - 8 2 - 4 10 - 3 8 - 3 9 - 2 8 - 2 5 - 9 1 - 1 3 - 1 8 - 5 10 - 10 3 - 1 7 - 1 3 - 10 8 - 1 1 - 8 3 - 1 9 - 2 10 - 6 7 - 2 6 - 3 6 - 4 8 - 4 8 - 5 6 - 3 4 - 6 6 - 8 3 - 2 5 - 9 10 - 1 5 - 2 3 - 3 9 - 5 2 - 3 5 - 7 4 - 9 6 - 4 4 - 2 4 - 4 4 - 6 8 - 7 10 - 9 9 - 8 7 - 3 1 - 5 9 - 1 2 - 9 8 - 2 4 - 5 3 - 6 1 - 9 5 - 9 9 - 4 9 - 7 2 - 4 1 - 2 2 - 6 4 - 9 9 - 1 2 - 5 2 - 5 2 - 10 7 - 4 5 - 1 10 - 10 7 - 4 4 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 8 48 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 5 8 H(I),W(I),I=1,...,N - 8 8 - 1 7 - 6 6 - 1 5 - 2 2 - 3 4 - 9 4 - 9 7 - 6 2 - 3 3 - 2 3 - 3 2 - 3 2 - 10 9 - 7 2 - 9 2 - 4 6 - 6 2 - 10 3 - 4 3 - 1 3 - 6 4 - 9 2 - 1 5 - 8 6 - 10 10 - 7 7 - 6 8 - 6 10 - 8 10 - 9 1 - 8 9 - 5 3 - 8 5 - 5 10 - 8 7 - 3 9 - 8 1 - 6 2 - 3 4 - 7 2 - 10 7 - 8 9 - 1 5 - 9 3 - 10 5 - 1 7 - 4 9 - 7 1 - 6 1 - 5 6 - 9 9 - 7 5 - 1 3 - 7 6 - 3 9 - 3 1 - 10 4 - 5 4 - 10 10 - 10 3 - 3 10 - 3 9 - 4 3 - 6 6 - 1 9 - 5 3 - 5 3 - 5 9 - 1 3 - 10 1 - 5 10 - 2 4 - 10 9 - 10 2 - 9 6 - 5 7 - 1 4 - 2 8 - 6 6 - 7 7 - 10 1 - 3 6 - 9 10 - 4 5 - 6 6 - 7 4 - 3 9 - 3 7 - 10 8 - 8 4 - 7 9 - 9 3 - 9 9 - 6 1 - 3 9 - 5 5 - 1 4 - 3 5 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 9 49 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 6 5 H(I),W(I),I=1,...,N - 1 3 - 8 2 - 9 2 - 1 5 - 2 6 - 3 5 - 2 3 - 6 6 - 8 3 - 3 7 - 1 2 - 3 7 - 5 6 - 6 3 - 9 7 - 8 1 - 3 9 - 5 2 - 1 1 - 9 2 - 2 3 - 5 7 - 7 9 - 10 9 - 9 8 - 4 3 - 8 1 - 4 6 - 3 2 - 6 9 - 10 8 - 2 6 - 5 2 - 6 7 - 2 6 - 5 10 - 2 10 - 2 2 - 6 5 - 10 10 - 3 4 - 7 4 - 8 5 - 4 10 - 8 10 - 6 6 - 6 3 - 10 1 - 5 2 - 8 1 - 3 7 - 5 4 - 2 5 - 7 10 - 5 1 - 5 4 - 10 7 - 6 4 - 10 9 - 5 1 - 7 2 - 5 9 - 10 10 - 9 6 - 10 5 - 6 4 - 1 6 - 6 7 - 2 4 - 4 1 - 3 4 - 9 10 - 8 2 - 10 6 - 6 1 - 2 9 - 8 5 - 4 8 - 7 1 - 6 9 - 3 10 - 3 8 - 5 8 - 2 8 - 9 2 - 5 3 - 2 2 - 5 10 - 5 4 - 2 1 - 3 5 - 5 10 - 3 6 - 10 5 - 7 2 - 4 6 - 9 10 - 9 9 - 6 2 - - 1 PROBLEM CLASS - 100 N. OF ITEMS - 10 50 RELATIVE AND ABSOLUTE N. OF INSTANCE - 10 10 HBIN,WBIN - 8 9 H(I),W(I),I=1,...,N - 3 3 - 1 10 - 6 9 - 3 2 - 10 6 - 10 9 - 8 3 - 4 3 - 9 9 - 9 3 - 4 1 - 4 1 - 6 5 - 9 9 - 4 2 - 8 7 - 10 8 - 1 5 - 6 9 - 6 7 - 2 8 - 10 3 - 8 7 - 9 1 - 9 6 - 6 8 - 2 5 - 4 1 - 9 9 - 10 10 - 4 5 - 8 1 - 4 5 - 10 1 - 2 3 - 4 1 - 1 7 - 2 6 - 9 8 - 8 4 - 7 10 - 10 5 - 10 1 - 9 4 - 6 2 - 9 9 - 5 9 - 8 1 - 2 7 - 7 4 - 5 9 - 2 6 - 5 10 - 8 9 - 7 6 - 9 6 - 7 9 - 7 6 - 8 3 - 1 9 - 3 7 - 2 1 - 1 10 - 5 8 - 2 9 - 6 2 - 2 10 - 8 8 - 7 4 - 5 2 - 10 7 - 4 7 - 3 5 - 6 7 - 6 1 - 7 9 - 9 8 - 4 5 - 7 3 - 7 9 - 7 9 - 2 8 - 1 10 - 6 5 - 3 4 - 10 3 - 10 10 - 9 10 - 5 5 - 10 7 - 1 6 - 5 9 - 6 4 - 8 3 - 1 3 - 7 1 - 1 1 - 3 7 - 4 10 - + 1 PROBLEM CLASS + 20 N. OF ITEMS + 1 1 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 9 5 H(I),W(I),I=1,...,N + 2 4 + 6 10 + 7 5 + 3 6 + 7 10 + 5 1 + 5 3 + 9 6 + 4 2 + 7 6 + 2 7 + 3 8 + 10 4 + 5 4 + 3 10 + 3 8 + 8 7 + 3 8 + 7 8 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 2 2 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 2 2 H(I),W(I),I=1,...,N + 8 6 + 2 10 + 3 1 + 4 8 + 10 3 + 9 1 + 5 1 + 3 6 + 1 1 + 2 4 + 2 9 + 9 1 + 5 9 + 7 4 + 2 2 + 4 3 + 7 9 + 1 4 + 8 9 + + 1 PROBLEM CLASS + 20 N. OF ITEMS + 3 3 RELATIVE AND ABSOLUTE N. OF INSTANCE + 10 10 HBIN,WBIN + 5 7 H(I),W(I),I=1,...,N + 6 10 + 6 5 + 2 7 + 8 4 + 10 9 + 5 8 + 6 8 + 9 4 + 3 9 + 10 3 + 5 9 + 7 1 + 9 8 + 6 4 + 6 3 + 3 4 + 2 10 + 1 6 + 4 1 diff --git a/ortools/util/fp_utils.h b/ortools/util/fp_utils.h index a5e6cec283..cdfc476ed8 100644 --- a/ortools/util/fp_utils.h +++ b/ortools/util/fp_utils.h @@ -253,6 +253,11 @@ inline FloatType Interpolate(FloatType x, FloatType y, FloatType alpha) { return alpha * x + (1 - alpha) * y; } +inline int fast_ilogb(double value) { return ilogb(value); } +inline double fast_scalbn(double value, int exponent) { + return scalbn(value, exponent); +} + } // namespace operations_research #endif // OR_TOOLS_UTIL_FP_UTILS_H_ From ce6e36569064bcb2ec3f87b44c6b8072019dbe28 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 2 Jul 2025 11:55:25 +0200 Subject: [PATCH 41/81] pdlp: Add README.md --- ortools/pdlp/README.md | 59 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 ortools/pdlp/README.md diff --git a/ortools/pdlp/README.md b/ortools/pdlp/README.md new file mode 100644 index 0000000000..b770f265a3 --- /dev/null +++ b/ortools/pdlp/README.md @@ -0,0 +1,59 @@ +# Primal-Dual Hybrid Gradient Solver (PDLP) + +This directory contains PDLP, a library for solving linear programming (LP) and +quadratic programming (QP) problems using first-order methods. + +The implementation is based on the Primal-Dual Hybrid Gradient (PDHG) algorithm, +which is preprocessed with scaling and optional presolving to improve +performance and numerical stability. + +## Core C++ libraries: + +* [`primal_dual_hybrid_gradient.h`][primal_dual_hybrid_gradient_h]: The main + entry point for the solver, which takes a `QuadraticProgram` and solver + parameters. + +* [`quadratic_program.h`][quadratic_program_h]: Defines the `QuadraticProgram` + struct to represent the optimization problem, including objective vectors, + constraint matrices, and bounds. + +* [`quadratic_program_io.h`][quadratic_program_io_h]: Provides utilities to read + quadratic programs from various file formats, including MPS and MPModelProto. + +* [`sharded_quadratic_program.h`][sharded_quadratic_program_h] and + [`sharder.h`][sharder_h]: These provide the infrastructure for sharding + problem data and performing parallel computations. + +* [`scheduler.h`][scheduler_h]: A thread scheduling interface that supports + multiple backends (e.g. Eigen's thread pools). + +* [`iteration_stats.h`][iteration_stats_h] and + [`termination.h`][termination_h]: Contain logic for computing convergence and + infeasibility statistics and checking termination criteria. + +## Configuration and Logging + +* [`solvers.proto`][solvers_proto]: Defines the `PrimalDualHybridGradientParams` + message for configuring the solver, including termination criteria, + algorithmic choices like restart strategies, and linesearch rules. +* [`solve_log.proto`][solve_log_proto]: Defines messages for logging the + solver's progress and final result, such as `IterationStats` and `SolveLog`. + +## Wrappers and Samples + +* [`python/`](python): Contains the `pybind11` wrapper to expose the C++ library + to Python, along with its build definitions and tests. +* [`samples/`](samples): This directory provides example usage of the library. + + + +[primal_dual_hybrid_gradient_h]: ../pdlp/primal_dual_hybrid_gradient.h +[quadratic_program_h]: ../pdlp/quadratic_program.h +[quadratic_program_io_h]: ../pdlp/quadratic_program_io.h +[sharded_quadratic_program_h]: ../pdlp/sharded_quadratic_program.h +[sharder_h]: ../pdlp/sharder.h +[scheduler_h]: ../pdlp/scheduler.h +[iteration_stats_h]: ../pdlp/iteration_stats.h +[termination_h]: ../pdlp/termination.h +[solvers_proto]: ../pdlp/solvers.proto +[solve_log_proto]: ../pdlp/solve_log.proto From a2c3fa71b929e4efa18dd9e9104c38df4f00703e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 4 Jul 2025 15:17:22 +0200 Subject: [PATCH 42/81] remove deprecated doc generation stuff * remove "doc" target from legacy makefile based build * remove doxygen templates no more used * removed pdoc3 .mako files (we now re-use pdoc) --- Makefile | 9 +- makefiles/Makefile.doc.mk | 44 - tools/doc/DoxygenLayout.xml | 220 --- tools/doc/all.footer.html.in | 8 - tools/doc/all.styleSheet.css.in | 1450 --------------- tools/doc/cpp.doxy.in | 2544 --------------------------- tools/doc/cpp.header.html.in | 33 - tools/doc/default.footer.html.in | 21 - tools/doc/default.header.html.in | 56 - tools/doc/default.styleSheet.css.in | 1730 ------------------ tools/doc/dotnet.doxy.in | 2525 -------------------------- tools/doc/dotnet.header.html.in | 34 - tools/doc/gen_javadoc.sh | 27 - tools/doc/gen_ref_doc.py | 266 --- tools/doc/java.doxy.in | 2525 -------------------------- tools/doc/java.header.html.in | 34 - tools/doc/ortools.header.html.in | 67 - tools/doc/samples_cpp.dox | 19 - tools/doc/templates/credits.mako | 0 tools/doc/templates/head.mako | 6 - tools/doc/templates/logo.mako | 5 - 21 files changed, 1 insertion(+), 11622 deletions(-) delete mode 100644 makefiles/Makefile.doc.mk delete mode 100644 tools/doc/DoxygenLayout.xml delete mode 100644 tools/doc/all.footer.html.in delete mode 100644 tools/doc/all.styleSheet.css.in delete mode 100644 tools/doc/cpp.doxy.in delete mode 100644 tools/doc/cpp.header.html.in delete mode 100644 tools/doc/default.footer.html.in delete mode 100644 tools/doc/default.header.html.in delete mode 100644 tools/doc/default.styleSheet.css.in delete mode 100644 tools/doc/dotnet.doxy.in delete mode 100644 tools/doc/dotnet.header.html.in delete mode 100755 tools/doc/gen_javadoc.sh delete mode 100755 tools/doc/gen_ref_doc.py delete mode 100644 tools/doc/java.doxy.in delete mode 100644 tools/doc/java.header.html.in delete mode 100644 tools/doc/ortools.header.html.in delete mode 100644 tools/doc/samples_cpp.dox delete mode 100644 tools/doc/templates/credits.mako delete mode 100644 tools/doc/templates/head.mako delete mode 100644 tools/doc/templates/logo.mako diff --git a/Makefile b/Makefile index a590e1d748..1021b82baf 100644 --- a/Makefile +++ b/Makefile @@ -101,13 +101,6 @@ include $(OR_ROOT)makefiles/Makefile.dotnet.mk include $(OR_ROOT)makefiles/Makefile.java.mk include $(OR_ROOT)makefiles/Makefile.python.mk include $(OR_ROOT)makefiles/Makefile.archive.mk -ifneq ($(PLATFORM),WIN64) -include $(OR_ROOT)makefiles/Makefile.doc.mk -else -# Remove some rules on windows -help_doc: - -endif .PHONY: help_usage help_usage: @@ -125,7 +118,7 @@ else endif .PHONY: help_all -help_all: help_usage help_cpp help_dotnet help_java help_python help_archive help_doc +help_all: help_usage help_cpp help_dotnet help_java help_python help_archive .PHONY: check_all check_all: check_cpp check_dotnet check_java check_python diff --git a/makefiles/Makefile.doc.mk b/makefiles/Makefile.doc.mk deleted file mode 100644 index e523245cb4..0000000000 --- a/makefiles/Makefile.doc.mk +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2010-2025 Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generate documentation -.PHONY: help_doc # Generate list of Documentation targets with descriptions. -help_doc: - @echo Use one of the following Documentation targets: - @$(GREP) "^.PHONY: .* #" $(CURDIR)/makefiles/Makefile.doc.mk | $(SED) "s/\.PHONY: \(.*\) # \(.*\)/\1\t\2/" | expand -t20 - @echo - - -# Main target -.PHONY: doc # Create doxygen and python documentation. -doc: doxy-doc python-doc java-doc - -.PHONY: doxy-doc # Create doxygen ref documentation. -doxy-doc: cpp python java dotnet - bash -c "command -v doxygen" - python3 tools/doc/gen_ref_doc.py - -.PHONY: java-doc # Create Javadoc ref documentation. -java-doc: java - bash -c "command -v mvn" - tools/doc/gen_javadoc.sh - -.PHONY: python-doc # Create python documentation. -python-doc: python - bash -c "command -v pdoc" - $(SET_PYTHONPATH) pdoc \ - --logo https://developers.google.com/optimization/images/orLogo.png \ - -o docs/python/ \ - --no-search -d google \ - --footer-text "OR-Tools ${OR_TOOLS_MAJOR}.${OR_TOOLS_MINOR}" \ - build_make/python/ortools diff --git a/tools/doc/DoxygenLayout.xml b/tools/doc/DoxygenLayout.xml deleted file mode 100644 index aa503b3de9..0000000000 --- a/tools/doc/DoxygenLayout.xml +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools/doc/all.footer.html.in b/tools/doc/all.footer.html.in deleted file mode 100644 index 5c65399883..0000000000 --- a/tools/doc/all.footer.html.in +++ /dev/null @@ -1,8 +0,0 @@ -
-
- - - diff --git a/tools/doc/all.styleSheet.css.in b/tools/doc/all.styleSheet.css.in deleted file mode 100644 index 50f8fdbe8e..0000000000 --- a/tools/doc/all.styleSheet.css.in +++ /dev/null @@ -1,1450 +0,0 @@ -/* The standard CSS for doxygen */ - -/* @group Heading Levels */ - -div.contents .textblock h1 { - text-align: left; - font-size: 20pt; - font-weight: normal; - margin-top: 1.5em; - padding: 0 0 0.4em 0; - border-bottom: 1px solid #999; - border-top-width: 0; - border-left-width: 0; - border-right-width: 0; - background-color: transparent; -} - -h1.groupheader { - font-size: 150%; -} - -.title { - font-size: 20pt; - font-weight: normal; - margin: 10px 2px; -} - -dt { - font-weight: bold; -} - -div.multicol { - -moz-column-gap: 1em; - -webkit-column-gap: 1em; - -moz-column-count: 3; - -webkit-column-count: 3; -} - -p.startli, p.startdd, p.starttd { - margin-top: 2px; -} - -p.endli { - margin-bottom: 0px; -} - -p.enddd { - margin-bottom: 4px; -} - -p.endtd { - margin-bottom: 2px; -} - -/* @end */ - -caption { - font-weight: bold; -} - -span.legend { - font-size: 70%; - text-align: center; -} - -h3.version { - font-size: 90%; - text-align: center; -} - -div.qindex { - margin-bottom: 1em; -} - -div.qindex, div.navtab{ - background-color: #eee; - border: 1px solid #999; - text-align: center; -} - -div.qindex, div.navpath { - width: 100%; - line-height: 140%; -} - -div.navtab { - margin-right: 15px; -} - -/* @group Link Styling */ - -a.qindex { - font-weight: bold; -} - -a.qindexHL { - font-weight: bold; - background-color: #9CAFD4; - color: #ffffff; - border: 1px double #869DCA; -} - -/* @end */ - -dl.el { - margin-left: -1cm; -} - -a.el { - padding: 1px; - text-decoration: none; - color: #577E25; -} - -a.el:hover { - text-decoration: underline; -} - -pre.fragment { - /*border: 1px solid #C4CFE5; - background-color: #FBFCFD; - padding: 4px 6px; - margin: 4px 8px 4px 2px; - overflow: auto; - word-wrap: break-word; - font-size: 9pt; - line-height: 125%; - font-family: monospace, fixed; - font-size: 105%;*/ -font-family: Consolas, "Liberation Mono", Courier, monospace; -font-size: 10pt; -padding: 0.5em 1em; -background-color: #f5f5f5; -border: 1px solid #bbb; -border-radius(5px); -} - -div.fragment { - /*margin: 0 0 0 5px; - padding: 0.5em 1em; - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-size: 10pt; - background-color: #eef7e3; - border-left: 3px solid #8DC841; - border-right: 0; - border-bottom: 0;*/ - -font-family: Consolas, "Liberation Mono", Courier, monospace; -font-size: 10pt; -padding: 0.5em 1em; -background-color: #f5f5f5; -border: 1px solid #bbb; -border-radius(5px); -} - -div.line { - min-height: 13px; - text-wrap: unrestricted; - white-space: -moz-pre-wrap; /* Moz */ - white-space: -pre-wrap; /* Opera 4-6 */ - white-space: -o-pre-wrap; /* Opera 7 */ - white-space: pre-wrap; /* CSS3 */ - word-wrap: break-word; /* IE 5.5+ */ - text-indent: -53px; - padding-left: 53px; - padding-bottom: 0px; - margin: 0px; - line-height: normal; -} - -span.lineno { - padding-right: 4px; - text-align: right; - background-color: #E8E8E8; - white-space: pre; -} - -div.ah { - width: 100%; - background-color: #eee; - font-weight: bold; - color: #000; - margin-bottom: 1px; - margin-top: 1px; - border: solid 1px #999; -} - -div.groupHeader { - margin-left: 16px; - margin-top: 12px; - font-weight: bold; -} - -div.groupText { - margin-left: 16px; - font-style: italic; -} - -body { - background-color: white; - color: black; - margin: 0; -} - -div.contents { - width: 950px; - margin: 0 auto; -} - -td.indexkey { - background-color: #EBEFF6; - font-weight: bold; - border: 1px solid #C4CFE5; - margin: 2px 0px 2px 0; - padding: 2px 10px; - white-space: nowrap; - vertical-align: top; -} - -td.indexvalue { - background-color: #EBEFF6; - border: 1px solid #C4CFE5; - padding: 2px 10px; - margin: 2px 0px; -} - -tr.memlist { - background-color: #EEF1F7; -} - -p.formulaDsp { - text-align: center; -} - -img.formulaDsp { - -} - -img.formulaInl { - vertical-align: middle; -} - -div.center { - text-align: center; - margin-top: 0px; - margin-bottom: 0px; - padding: 0px; -} - -div.center img { - border: 0px; -} - -address.footer { - text-align: right; - padding-right: 12px; -} - -img.footer { - border: 0px; - vertical-align: middle; -} - -/* @group Code Colorization */ - -span.keyword { - color: #008000 -} - -span.keywordtype { - color: #604020 -} - -span.keywordflow { - color: #e08000 -} - -span.comment { - color: #800000 -} - -span.preprocessor { - color: #806020 -} - -span.stringliteral { - color: #002080 -} - -span.charliteral { - color: #008080 -} - -span.vhdldigit { - color: #ff00ff -} - -span.vhdlchar { - color: #000000 -} - -span.vhdlkeyword { - color: #700070 -} - -span.vhdllogic { - color: #ff0000 -} - -blockquote { - background-color: #F7F8FB; - border-left: 2px solid #9CAFD4; - margin: 0 24px 0 4px; - padding: 0 12px 0 16px; -} - -/* @end */ - -td.tiny { - font-size: 75%; -} - -.dirtab { - padding: 4px; - border-collapse: collapse; - border: 1px solid #A3B4D7; -} - -th.dirtab { - background: #EBEFF6; - font-weight: bold; -} - -hr { - display: none; - height: 0px; - border: none; - border-top: 1px solid #4A6AAA; -} - -hr.footer { - height: 1px; -} - -/* @group Member Descriptions */ - -table.memberdecls { - border-spacing: 0px; - padding: 0px; -} - -.memberdecls td, .fieldtable tr { - -webkit-transition-property: background-color, box-shadow; - -webkit-transition-duration: 0.5s; - -moz-transition-property: background-color, box-shadow; - -moz-transition-duration: 0.5s; - -ms-transition-property: background-color, box-shadow; - -ms-transition-duration: 0.5s; - -o-transition-property: background-color, box-shadow; - -o-transition-duration: 0.5s; - transition-property: background-color, box-shadow; - transition-duration: 0.5s; -} - -.memberdecls td.glow, .fieldtable tr.glow { - background-color: cyan; - /*box-shadow: 0 0 15px cyan;*/ -} - -.mdescLeft, .mdescRight, -.memItemLeft, .memItemRight, -.memTemplItemLeft, .memTemplItemRight, .memTemplParams { - background-color: #F9FAFC; - border: none; - margin: 4px; - padding: 1px 0 0 8px; -} - -.mdescLeft, .mdescRight { - padding: 0px 8px 4px 8px; - color: #555; -} - -.memSeparator { - border-bottom: 1px solid #DEE4F0; - line-height: 1px; - margin: 0px; - padding: 0px; -} - -.memItemLeft, .memTemplItemLeft { - white-space: nowrap; -} - -.memItemRight { - width: 100%; -} - -.memTemplParams { - color: #4665A2; - white-space: nowrap; - font-size: 80%; -} - -/* @end */ - -/* @group Member Details */ - -/* Styles for detailed member documentation */ - -.memtemplate { - font-size: 80%; - color: #4665A2; - font-weight: normal; - margin-left: 9px; -} - -.memtitle { - display: none; -} - -.memnav { - background-color: #EBEFF6; - border: 1px solid #A3B4D7; - text-align: center; - margin: 2px; - margin-right: 15px; - padding: 2px; -} - -.mempage { - width: 100%; -} - -.memitem { - padding: 0; - /*margin-bottom: 10px;*/ - margin-right: 5px; - display: table !important; - width: 100%; -} - -.memname { - font-weight: bold; - margin-left: 6px; -} - -.memname td { - vertical-align: bottom; -} - -.memproto, dl.reflist dt { - border-top: 1px solid #A8B8D9; - border-left: 1px solid #A8B8D9; - border-right: 1px solid #A8B8D9; - padding: 6px 0px 6px 0px; - color: #000; - font-weight: bold; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - background-color: #eee; - border-top-right-radius: 4px; - border-top-left-radius: 4px; - -moz-border-radius-topright: 4px; - -moz-border-radius-topleft: 4px; - -webkit-border-top-right-radius: 4px; - -webkit-border-top-left-radius: 4px; - -} - -.memdoc, dl.reflist dd { - border: 1px solid #A8B8D9; - padding: 6px 10px 2px 10px; - background-color: #FBFCFD; - background-color: #FFFFFF; - border-bottom-left-radius: 4px; - border-bottom-right-radius: 4px; - -moz-border-radius-bottomleft: 4px; - -moz-border-radius-bottomright: 4px; - -webkit-border-bottom-left-radius: 4px; - -webkit-border-bottom-right-radius: 4px; -} - -dl.reflist dt { - padding: 5px; -} - -dl.reflist dd { - margin: 0px 0px 10px 0px; - padding: 5px; -} - -.paramkey { - text-align: right; -} - -.paramtype { - white-space: nowrap; -} - -.paramname { - color: #602020; - white-space: nowrap; -} -.paramname em { - font-style: normal; -} -.paramname code { - line-height: 14px; -} - -.params, .retval, .exception, .tparams { - margin-left: 0px; - padding-left: 0px; -} - -.params .paramname, .retval .paramname { - font-weight: bold; - vertical-align: top; -} - -.params .paramtype { - font-style: italic; - vertical-align: top; -} - -.params .paramdir { - font-family: "courier new",courier,monospace; - vertical-align: top; -} - -table.mlabels { - border-spacing: 0px; -} - -td.mlabels-left { - width: 100%; - padding: 0px; -} - -td.mlabels-right { - vertical-align: bottom; - padding: 0px; - white-space: nowrap; -} - -span.mlabels { - margin-left: 8px; -} - -span.mlabel { - background-color: #728DC1; - border-top:1px solid #5373B4; - border-left:1px solid #5373B4; - border-right:1px solid #C4CFE5; - border-bottom:1px solid #C4CFE5; - text-shadow: none; - color: white; - margin-right: 4px; - padding: 2px 3px; - border-radius: 3px; - font-size: 7pt; - white-space: nowrap; - vertical-align: middle; -} - - - -/* @end */ - -/* these are for tree view when not used as main index */ - -div.directory { - margin: 10px 0px; - border-top: 1px solid #bbb; - width: 100%; -} - -.directory table { - border-collapse:collapse; -} - -.directory td { - margin: 0px; - padding: 0px; - vertical-align: top; -} - -.directory td.entry { - white-space: nowrap; - padding: 5px 5px 5px 0; -} - -.directory td.entry a { - outline:none; -} - -.directory td.entry a img { - border: none; -} - -.directory td.desc { - width: 100%; - padding-left: 6px; - padding-right: 6px; - padding-top: 3px; - /*border-left: 1px solid rgba(0,0,0,0.05);*/ -} - -.directory tr.even { - padding-left: 6px; - background-color: #F7F8FB; -} - -.directory img { - vertical-align: -30%; -} - -.directory .levels { - white-space: nowrap; - width: 100%; - text-align: right; - font-size: 9pt; -} - -.directory .levels span { - cursor: pointer; - padding-left: 2px; - padding-right: 2px; - color: #3D578C; -} - -div.dynheader { - margin-top: 8px; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -address { - font-style: normal; - color: #2A3D61; -} - -table table { - width: 90%; -} - -.memitem table table { - width: auto; -} - -table.doxtable { - border-collapse:collapse; - margin-top: 4px; - margin-bottom: 4px; -} - -table.doxtable td, table.doxtable th { - border: 1px solid #2D4068; - padding: 3px 7px 2px; -} - -table.doxtable th { - background-color: #374F7F; - color: #FFFFFF; - font-size: 110%; - padding-bottom: 4px; - padding-top: 5px; -} - -table.fieldtable { - width: 100%; - margin-bottom: 10px; - border: 1px solid #A8B8D9; - border-spacing: 0px; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - border-radius: 4px; -} - -.fieldtable td, .fieldtable th { - padding: 3px 7px 2px; -} - -.fieldtable td.fieldtype, .fieldtable td.fieldname { - white-space: nowrap; - border-right: 1px solid #A8B8D9; - border-bottom: 1px solid #A8B8D9; - vertical-align: top; -} - -.fieldtable td.fielddoc { - border-bottom: 1px solid #A8B8D9; - width: 100%; -} - -.fieldtable tr:last-child td { - border-bottom: none; -} - -.fieldtable th { - background-color: #E2E8F2; - font-size: 90%; - color: #253555; - padding-bottom: 4px; - padding-top: 5px; - text-align:left; - -moz-border-radius-topleft: 4px; - -moz-border-radius-topright: 4px; - -webkit-border-top-left-radius: 4px; - -webkit-border-top-right-radius: 4px; - border-top-left-radius: 4px; - border-top-right-radius: 4px; - border-bottom: 1px solid #A8B8D9; -} - - -.tabsearch { - top: 0px; - left: 10px; - height: 36px; - z-index: 101; - overflow: hidden; - font-size: 13px; -} - -.navpath { - display: none; -} - -.navpath ul { - font-size: 11px; - height:30px; - line-height:30px; - color:#8AA0CC; - border:solid 1px #C2CDE4; - overflow:hidden; - margin:0px; - padding:0px; -} - -.navpath li { - list-style-type:none; - float:left; - padding-left:10px; - padding-right:15px; - color:#364D7C; -} - -.navpath li.navelem a { - height:32px; - display:block; - text-decoration: none; - outline: none; - color: #283A5D; - font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; - text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); - text-decoration: none; -} - -.navpath li.navelem a:hover { - color:#6884BD; -} - -.navpath li.footer { - list-style-type:none; - float:right; - padding-left:10px; - padding-right:15px; - background-image:none; - background-repeat:no-repeat; - background-position:right; - color:#364D7C; - font-size: 8pt; -} - - -div.summary { - font-size: 8pt; - padding-right: 5px; -} - -div.summary a { - white-space: nowrap; - padding: 1px; - text-decoration: none; - color: #577E25; -} - -div.summary a:hover { - text-decoration: underline; -} - -div.ingroups { - font-size: 8pt; - width: 50%; - text-align: left; -} - -div.ingroups a { - white-space: nowrap; -} - -div.header { - width: 950px; - margin: 2em auto; - border-bottom: 1px solid #999; -} - -dl { - padding: 0 0 0 10px; -} - -/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ -dl.section { - margin-left: 0px; - padding-left: 0px; -} - -dl.note { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #D0C000; -} - -dl.warning, dl.attention { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #FF0000; -} - -dl.pre, dl.post, dl.invariant { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00D000; -} - -dl.deprecated { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #505050; -} - -dl.todo { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #00C0E0; -} - -dl.test { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #3030E0; -} - -dl.bug { - margin-left:-7px; - padding-left: 3px; - border-left:4px solid; - border-color: #C08050; -} - -dl.section dd { - margin-bottom: 6px; -} - - -#projectlogo { - text-align: center; - vertical-align: bottom; - border-collapse: separate; -} - -#projectlogo img { - border: 0px none; -} - -#projectname { - font: 300% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 2px 0px; -} - -#projectbrief { - font: 120% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#projectnumber { - font: 50% Tahoma, Arial,sans-serif; - margin: 0px; - padding: 0px; -} - -#titlearea { - padding: 0px; - margin: 0px; - width: 100%; - border-bottom: 1px solid #5373B4; -} - -.image { - text-align: center; -} - -.dotgraph { - text-align: center; -} - -.mscgraph { - text-align: center; -} - -.caption { - font-weight: bold; -} - -div.zoom { - border: 1px solid #90A5CE; -} - -dl.citelist { - margin-bottom:50px; -} - -dl.citelist dt { - color:#334975; - float:left; - font-weight:bold; - margin-right:10px; - padding:5px; -} - -dl.citelist dd { - margin:2px 0; - padding:5px 0; -} - -div.toc { - padding: 14px 25px; - background-color: #F4F6FA; - border: 1px solid #D8DFEE; - border-radius: 7px 7px 7px 7px; - float: right; - height: auto; - margin: 0 20px 10px 10px; - width: 200px; -} - -div.toc li { - font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; - margin-top: 5px; - padding-left: 10px; - padding-top: 2px; -} - -div.toc h3 { - font: bold 12px/1.2 Arial,FreeSans,sans-serif; - color: #4665A2; - border-bottom: 0 none; - margin: 0; -} - -div.toc ul { - list-style: none outside none; - border: medium none; - padding: 0px; -} - -div.toc li.level1 { - margin-left: 0px; -} - -div.toc li.level2 { - margin-left: 15px; -} - -div.toc li.level3 { - margin-left: 30px; -} - -div.toc li.level4 { - margin-left: 45px; -} - -.inherit_header { - font-weight: bold; - color: gray; - cursor: pointer; - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -.inherit_header td { - padding: 6px 0px 2px 5px; -} - -.inherit { - display: none; -} - -tr.heading h2 { - margin-top: 12px; - margin-bottom: 4px; -} - -@media print { - #top { display: none; } - #side-nav { display: none; } - #nav-path { display: none; } - body { overflow:visible; } - h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } - .summary { display: none; } - .memitem { page-break-inside: avoid; } - - #doc-content { - margin-left:0 !important; - height:auto !important; - width:auto !important; - overflow:inherit; - display:inline; - } -} - -/* tabs.css */ -.tabs, .tabs2, .tabs3 { - width: 100%; - z-index: 101; - font-size: 11pt; - background-color: #EAF5DB; - border-left: 1px solid #999; - border-right: 1px solid #999; - border-bottom: 1px solid #999; - padding: 0; - margin: 0; -} - -.tabs2 { - font-size: 10pt; -} -.tabs3 { - font-size: 9pt; -} - -#navrow1 .tablist, #navrow2 .tablist, #navrow3 .tablist, #navrow4 .tablist { - margin: 0; - padding: 0; - display: table; -} - -.tablist li { - float: left; - display: table-cell; - list-style: none; -} - -#navrow1 { - border-top: 1px solid #999; - margin-top: 2em; -} - -#navrow1 .tablist a, #navrow2 .tablist a, #navrow3 .tablist a, #navrow4 .tablist a { - display: block; - margin: 8px 0; - padding: 0 8px; - border-right: 1px solid #bbb; -} - -.tablist li { - margin-bottom: 0 !important; -} - -.tablist li.current a { - font-weight: bold; -} - - - - - -/* SFML css */ -body { - font-family: 'Ubuntu', 'Arial', sans-serif; - line-height: 140%; - margin: 0 0 2em 0; - padding: 0; -} - -#banner-container { - width: 100%; - margin-top: 25px; - border-top: 2px solid #999; - border-bottom: 2px solid #999; - background-color: rgb(140, 200, 65); -} - -#banner { - width: 950px; - height: 60px; - line-height: 54px; - margin: 0 auto; - text-align: center; -} - -#banner #sfml { - display: inline; - vertical-align: top; - margin-left: 15px; - color: #fff; - font-size: 50pt; - text-shadow: rgba(0, 0, 0, 0.5) 1px 1px 5px; -} - -#footer-container { - clear: both; - width: 100%; - margin-top: 50px; - border-top: 1px solid #999; -} - -#footer { - width: 950px; - margin: 10px auto; - text-align: center; - font-size: 10pt; - color: #555; -} - -#footer a { - padding: 1px; - text-decoration: none; - color: rgb(70, 100, 30); -} - -#footer a:hover { - text-decoration: underline; -} - -div.contents, #content { - width: 950px; - margin: 0 auto; - padding: 0; -} - -div.contents h1 { - color: #333; - padding: 0.5em 0; - margin-top: 30px; - margin-bottom: 0; - text-align: center; - font-size: 26pt; - font-weight: normal; -} - -div.contents h2 { - font-size: 20pt; - font-weight: normal; - margin-top: 1.5em; - padding-bottom: 0.4em; - border-bottom: 1px solid #999; -} - -div.contents h3 { - font-size: 16pt; - font-weight: normal; -} - -div.contents p { - color: #333; - text-align: justify; -} - -div.contents a, #content a { - padding: 1px; - text-decoration: none; - color: rgb(70, 100, 30); -} - -div.contents a:hover, #content a:hover { - text-decoration: underline; -} - -div.contents code { - font-size: 11pt; - font-family: Consolas, "Liberation Mono", Courier, monospace; -} - -div.contents pre code { - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-size: 10pt; - padding: 0.5em 1em; - background-color: #f5f5f5; - border: 1px solid #bbb; -} - -div.contents ul { - list-style-type: square; - list-style-position: outside; - margin: 0 0 0 1.5em; - padding: 0; -} - -div.contents ul li { - color: #333; - margin: 0 0 0.3em 0; -} - - -.icon { - font-family: Arial, Helvetica; - font-weight: bold; - font-size: 12px; - height: 14px; - width: 16px; - display: inline-block; - background-color: #8cc445; - color: white; - text-align: center; - border-radius: 4px; - margin-left: 2px; - margin-right: 2px; - line-height: normal; -} - -.icona { - width: 24px; - height: 22px; - display: inline-block; -} - -.iconfopen { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2folderopen.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -.iconfclosed { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2folderclosed.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -.icondoc { - width: 24px; - height: 18px; - margin-bottom: 4px; - background-image:url('ftv2doc.png'); - background-position: 0px -4px; - background-repeat: repeat-y; - vertical-align:top; - display: inline-block; -} - -/* tooltip related style info */ - -.ttc { - position: absolute; - display: none; -} - -#powerTip { - cursor: default; - white-space: nowrap; - background-color: white; - border: 1px solid gray; - border-radius: 4px 4px 4px 4px; - box-shadow: 1px 1px 7px gray; - display: none; - font-size: smaller; - max-width: 80%; - opacity: 0.9; - padding: 1ex 1em 1em; - position: absolute; - z-index: 2147483647; -} - -#powerTip div.ttdoc { - color: grey; - font-style: italic; -} - -#powerTip div.ttname a { - font-weight: bold; -} - -#powerTip div.ttname { - font-weight: bold; -} - -#powerTip div.ttdeci { - color: #006318; -} - -#powerTip div { - margin: 0px; - padding: 0px; - font: 12px/16px Roboto,sans-serif; -} - -#powerTip:before, #powerTip:after { - content: ""; - position: absolute; - margin: 0px; -} - -#powerTip.n:after, #powerTip.n:before, -#powerTip.s:after, #powerTip.s:before, -#powerTip.w:after, #powerTip.w:before, -#powerTip.e:after, #powerTip.e:before, -#powerTip.ne:after, #powerTip.ne:before, -#powerTip.se:after, #powerTip.se:before, -#powerTip.nw:after, #powerTip.nw:before, -#powerTip.sw:after, #powerTip.sw:before { - border: solid transparent; - content: " "; - height: 0; - width: 0; - position: absolute; -} - -#powerTip.n:after, #powerTip.s:after, -#powerTip.w:after, #powerTip.e:after, -#powerTip.nw:after, #powerTip.ne:after, -#powerTip.sw:after, #powerTip.se:after { - border-color: rgba(255, 255, 255, 0); -} - -#powerTip.n:before, #powerTip.s:before, -#powerTip.w:before, #powerTip.e:before, -#powerTip.nw:before, #powerTip.ne:before, -#powerTip.sw:before, #powerTip.se:before { - border-color: rgba(128, 128, 128, 0); -} - -#powerTip.n:after, #powerTip.n:before, -#powerTip.ne:after, #powerTip.ne:before, -#powerTip.nw:after, #powerTip.nw:before { - top: 100%; -} - -#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { - border-top-color: #ffffff; - border-width: 10px; - margin: 0px -10px; -} -#powerTip.n:before { - border-top-color: #808080; - border-width: 11px; - margin: 0px -11px; -} -#powerTip.n:after, #powerTip.n:before { - left: 50%; -} - -#powerTip.nw:after, #powerTip.nw:before { - right: 14px; -} - -#powerTip.ne:after, #powerTip.ne:before { - left: 14px; -} - -#powerTip.s:after, #powerTip.s:before, -#powerTip.se:after, #powerTip.se:before, -#powerTip.sw:after, #powerTip.sw:before { - bottom: 100%; -} - -#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { - border-bottom-color: #ffffff; - border-width: 10px; - margin: 0px -10px; -} - -#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { - border-bottom-color: #808080; - border-width: 11px; - margin: 0px -11px; -} - -#powerTip.s:after, #powerTip.s:before { - left: 50%; -} - -#powerTip.sw:after, #powerTip.sw:before { - right: 14px; -} - -#powerTip.se:after, #powerTip.se:before { - left: 14px; -} - -#powerTip.e:after, #powerTip.e:before { - left: 100%; -} -#powerTip.e:after { - border-left-color: #ffffff; - border-width: 10px; - top: 50%; - margin-top: -10px; -} -#powerTip.e:before { - border-left-color: #808080; - border-width: 11px; - top: 50%; - margin-top: -11px; -} - -#powerTip.w:after, #powerTip.w:before { - right: 100%; -} -#powerTip.w:after { - border-right-color: #ffffff; - border-width: 10px; - top: 50%; - margin-top: -10px; -} -#powerTip.w:before { - border-right-color: #808080; - border-width: 11px; - top: 50%; - margin-top: -11px; -} -.arrow { - cursor: pointer; -} diff --git a/tools/doc/cpp.doxy.in b/tools/doc/cpp.doxy.in deleted file mode 100644 index b8bf9599c0..0000000000 --- a/tools/doc/cpp.doxy.in +++ /dev/null @@ -1,2544 +0,0 @@ -# Doxyfile 1.8.18 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project. -# -# All text after a double hash (##) is considered a comment and is placed in -# front of the TAG it is preceding. -# -# All text after a single hash (#) is considered a comment and will be ignored. -# The format is: -# TAG = value [value, ...] -# For lists, items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (\" \"). - -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- - -# This tag specifies the encoding used for all characters in the configuration -# file that follow. The default is UTF-8 which is also the encoding used for all -# text before the first occurrence of this tag. Doxygen uses libiconv (or the -# iconv built into libc) for the transcoding. See -# https://www.gnu.org/software/libiconv/ for the list of possible encodings. -# The default value is: UTF-8. - -DOXYFILE_ENCODING = UTF-8 - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by -# double-quotes, unless you are using Doxywizard) that should identify the -# project for which the documentation is generated. This name is used in the -# title of most generated pages and in a few other places. -# The default value is: My Project. - -PROJECT_NAME - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. This -# could be handy for archiving the generated documentation or if some version -# control system is used. - -PROJECT_NUMBER - -# Using the PROJECT_BRIEF tag one can provide an optional one line description -# for a project that appears at the top of each page and should give viewer a -# quick idea about the purpose of the project. Keep the description short. - -PROJECT_BRIEF = - -# With the PROJECT_LOGO tag one can specify a logo or an icon that is included -# in the documentation. The maximum height of the logo should not exceed 55 -# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy -# the logo to the output directory. - -PROJECT_LOGO = tools/doc/orLogo.png - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path -# into which the generated documentation will be written. If a relative path is -# entered, it will be relative to the location where doxygen was started. If -# left blank the current directory will be used. - -OUTPUT_DIRECTORY = docs - -# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- -# directories (in 2 levels) under the output directory of each output format and -# will distribute the generated files over these directories. Enabling this -# option can be useful when feeding doxygen a huge amount of source files, where -# putting all generated files in the same directory would otherwise causes -# performance problems for the file system. -# The default value is: NO. - -CREATE_SUBDIRS = NO - -# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII -# characters to appear in the names of generated files. If set to NO, non-ASCII -# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode -# U+3044. -# The default value is: NO. - -ALLOW_UNICODE_NAMES = NO - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, -# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), -# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, -# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), -# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, -# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, -# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, -# Ukrainian and Vietnamese. -# The default value is: English. - -OUTPUT_LANGUAGE = English - -# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all generated output in the proper direction. -# Possible values are: None, LTR, RTL and Context. -# The default value is: None. - -OUTPUT_TEXT_DIRECTION = None - -# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member -# descriptions after the members that are listed in the file and class -# documentation (similar to Javadoc). Set to NO to disable this. -# The default value is: YES. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief -# description of a member or function before the detailed description -# -# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. -# The default value is: YES. - -REPEAT_BRIEF = YES - -# This tag implements a quasi-intelligent brief description abbreviator that is -# used to form the text in various listings. Each string in this list, if found -# as the leading text of the brief description, will be stripped from the text -# and the result, after processing the whole list, is used as the annotated -# text. Otherwise, the brief description is used as-is. If left blank, the -# following values are used ($name is automatically replaced with the name of -# the entity):The $name class, The $name widget, The $name file, is, provides, -# specifies, contains, represents, a, an and the. - -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# doxygen will generate a detailed section even if there is only a brief -# description. -# The default value is: NO. - -ALWAYS_DETAILED_SEC = YES - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all -# inherited members of a class in the documentation of that class as if those -# members were ordinary class members. Constructors, destructors and assignment -# operators of the base classes will not be shown. -# The default value is: NO. - -INLINE_INHERITED_MEMB = YES - -# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path -# before files name in the file list and in the header files. If set to NO the -# shortest path that makes the file name unique will be used -# The default value is: YES. - -FULL_PATH_NAMES = NO - -# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. -# Stripping is only done if one of the specified strings matches the left-hand -# part of the path. The tag can be used to show relative paths in the file list. -# If left blank the directory from which doxygen is run is used as the path to -# strip. -# -# Note that you can specify absolute paths here, but also relative paths, which -# will be relative from the directory where doxygen is started. -# This tag requires that the tag FULL_PATH_NAMES is set to YES. - -STRIP_FROM_PATH = - -# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the -# path mentioned in the documentation of a class, which tells the reader which -# header file to include in order to use a class. If left blank only the name of -# the header file containing the class definition is used. Otherwise one should -# specify the list of include paths that are normally passed to the compiler -# using the -I flag. - -STRIP_FROM_INC_PATH = - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but -# less readable) file names. This can be useful is your file systems doesn't -# support long names like on DOS, Mac, or CD-ROM. -# The default value is: NO. - -SHORT_NAMES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the -# first line (until the first dot) of a Javadoc-style comment as the brief -# description. If set to NO, the Javadoc-style will behave just like regular Qt- -# style comments (thus requiring an explicit @brief command for a brief -# description.) -# The default value is: NO. - -JAVADOC_AUTOBRIEF = YES - -# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line -# such as -# /*************** -# as being the beginning of a Javadoc-style comment "banner". If set to NO, the -# Javadoc-style will behave just like regular comments and it will not be -# interpreted by doxygen. -# The default value is: NO. - -JAVADOC_BANNER = NO - -# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first -# line (until the first dot) of a Qt-style comment as the brief description. If -# set to NO, the Qt-style will behave just like regular Qt-style comments (thus -# requiring an explicit \brief command for a brief description.) -# The default value is: NO. - -QT_AUTOBRIEF = NO - -# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a -# multi-line C++ special comment block (i.e. a block of //! or /// comments) as -# a brief description. This used to be the default behavior. The new default is -# to treat a multi-line C++ comment block as a detailed description. Set this -# tag to YES if you prefer the old behavior instead. -# -# Note that setting this tag to YES also means that rational rose comments are -# not recognized any more. -# The default value is: NO. - -MULTILINE_CPP_IS_BRIEF = NO - -# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the -# documentation from any documented member that it re-implements. -# The default value is: YES. - -INHERIT_DOCS = YES - -# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new -# page for each member. If set to NO, the documentation of a member will be part -# of the file/class/namespace that contains it. -# The default value is: NO. - -SEPARATE_MEMBER_PAGES = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen -# uses this value to replace tabs by spaces in code fragments. -# Minimum value: 1, maximum value: 16, default value: 4. - -TAB_SIZE = 4 - -# This tag can be used to specify a number of aliases that act as commands in -# the documentation. An alias has the form: -# name=value -# For example adding -# "sideeffect=@par Side Effects:\n" -# will allow you to put the command \sideeffect (or @sideeffect) in the -# documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines (in the resulting output). You can put ^^ in the value part of an -# alias to insert a newline as if a physical newline was in the original file. -# When you need a literal { or } or , in the value part of an alias you have to -# escape them by means of a backslash (\), this can lead to conflicts with the -# commands \{ and \} for these it is advised to use the version @{ and @} or use -# a double escape (\\{ and \\}) - -ALIASES = - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. For -# instance, some of the names that are used will be different. The list of all -# members will be omitted, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or -# Python sources only. Doxygen will then generate output that is more tailored -# for that language. For instance, namespaces will be presented as packages, -# qualified scopes will look different, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_JAVA = NO - -# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran -# sources. Doxygen will then generate output that is tailored for Fortran. -# The default value is: NO. - -OPTIMIZE_FOR_FORTRAN = NO - -# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL -# sources. Doxygen will then generate output that is tailored for VHDL. -# The default value is: NO. - -OPTIMIZE_OUTPUT_VHDL = NO - -# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice -# sources only. Doxygen will then generate output that is more tailored for that -# language. For instance, namespaces will be presented as modules, types will be -# separated into more groups, etc. -# The default value is: NO. - -OPTIMIZE_OUTPUT_SLICE = NO - -# Doxygen selects the parser to use depending on the extension of the files it -# parses. With this tag you can assign which parser to use for a given -# extension. Doxygen has a built-in mapping, but you can override or extend it -# using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, -# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: -# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser -# tries to guess whether the code is fixed or free formatted code, this is the -# default for Fortran type files). For instance to make doxygen treat .inc files -# as Fortran files (default is PHP), and .f files as C (default is Fortran), -# use: inc=Fortran f=C. -# -# Note: For files without extension you can use no_extension as a placeholder. -# -# Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. - -EXTENSION_MAPPING = - -# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments -# according to the Markdown format, which allows for more readable -# documentation. See https://daringfireball.net/projects/markdown/ for details. -# The output of markdown processing is further processed by doxygen, so you can -# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in -# case of backward compatibilities issues. -# The default value is: YES. - -MARKDOWN_SUPPORT = YES - -# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up -# to that level are automatically included in the table of contents, even if -# they do not have an id attribute. -# Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 5. -# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. - -TOC_INCLUDE_HEADINGS = 5 - -# When enabled doxygen tries to link words that correspond to documented -# classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by putting a % sign in front of the word or -# globally by setting AUTOLINK_SUPPORT to NO. -# The default value is: YES. - -AUTOLINK_SUPPORT = YES - -# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want -# to include (a tag file for) the STL sources as input, then you should set this -# tag to YES in order to let doxygen match functions declarations and -# definitions whose arguments contain STL classes (e.g. func(std::string); -# versus func(std::string) {}). This also make the inheritance and collaboration -# diagrams that involve STL classes more complete and accurate. -# The default value is: NO. - -BUILTIN_STL_SUPPORT = YES - -# If you use Microsoft's C++/CLI language, you should set this option to YES to -# enable parsing support. -# The default value is: NO. - -CPP_CLI_SUPPORT = NO - -# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: -# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen -# will parse them like normal C++ but will assume all classes use public instead -# of private inheritance when no explicit protection keyword is present. -# The default value is: NO. - -SIP_SUPPORT = NO - -# For Microsoft's IDL there are propget and propput attributes to indicate -# getter and setter methods for a property. Setting this option to YES will make -# doxygen to replace the get and set methods by a property in the documentation. -# This will only work if the methods are indeed getting or setting a simple -# type. If this is not the case, or you want to show the methods anyway, you -# should set this option to NO. -# The default value is: YES. - -IDL_PROPERTY_SUPPORT = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. -# The default value is: NO. - -DISTRIBUTE_GROUP_DOC = NO - -# If one adds a struct or class to a group and this option is enabled, then also -# any nested class or struct is added to the same group. By default this option -# is disabled and one has to add nested compounds explicitly via \ingroup. -# The default value is: NO. - -GROUP_NESTED_COMPOUNDS = NO - -# Set the SUBGROUPING tag to YES to allow class member groups of the same type -# (for instance a group of public functions) to be put as a subgroup of that -# type (e.g. under the Public Functions section). Set it to NO to prevent -# subgrouping. Alternatively, this can be done per class using the -# \nosubgrouping command. -# The default value is: YES. - -SUBGROUPING = YES - -# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions -# are shown inside the group in which they are included (e.g. using \ingroup) -# instead of on a separate page (for HTML and Man pages) or section (for LaTeX -# and RTF). -# -# Note that this feature does not work in combination with -# SEPARATE_MEMBER_PAGES. -# The default value is: NO. - -INLINE_GROUPED_CLASSES = NO - -# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions -# with only public data fields or simple typedef fields will be shown inline in -# the documentation of the scope in which they are defined (i.e. file, -# namespace, or group documentation), provided this scope is documented. If set -# to NO, structs, classes, and unions are shown on a separate page (for HTML and -# Man pages) or section (for LaTeX and RTF). -# The default value is: NO. - -INLINE_SIMPLE_STRUCTS = NO - -# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or -# enum is documented as struct, union, or enum with the name of the typedef. So -# typedef struct TypeS {} TypeT, will appear in the documentation as a struct -# with name TypeT. When disabled the typedef will appear as a member of a file, -# namespace, or class. And the struct will be named TypeS. This can typically be -# useful for C code in case the coding convention dictates that all compound -# types are typedef'ed and only the typedef is referenced, never the tag name. -# The default value is: NO. - -TYPEDEF_HIDES_STRUCT = NO - -# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This -# cache is used to resolve symbols given their name and scope. Since this can be -# an expensive process and often the same symbol appears multiple times in the -# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small -# doxygen will become slower. If the cache is too large, memory is wasted. The -# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range -# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 -# symbols. At the end of a run doxygen will report the cache usage and suggest -# the optimal cache size from a speed point of view. -# Minimum value: 0, maximum value: 9, default value: 0. - -LOOKUP_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in -# documentation are documented, even if no documentation was available. Private -# class members and static file members will be hidden unless the -# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. -# Note: This will also disable the warnings about undocumented members that are -# normally produced when WARNINGS is set to YES. -# The default value is: NO. - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will -# be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual -# methods of a class will be included in the documentation. -# The default value is: NO. - -EXTRACT_PRIV_VIRTUAL = NO - -# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal -# scope will be included in the documentation. -# The default value is: NO. - -EXTRACT_PACKAGE = NO - -# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be -# included in the documentation. -# The default value is: NO. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO, -# only classes defined in header files are included. Does not have any effect -# for Java sources. -# The default value is: YES. - -EXTRACT_LOCAL_CLASSES = YES - -# This flag is only useful for Objective-C code. If set to YES, local methods, -# which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO, only methods in the interface are -# included. -# The default value is: NO. - -EXTRACT_LOCAL_METHODS = NO - -# If this flag is set to YES, the members of anonymous namespaces will be -# extracted and appear in the documentation as a namespace called -# 'anonymous_namespace{file}', where file will be replaced with the base name of -# the file that contains the anonymous namespace. By default anonymous namespace -# are hidden. -# The default value is: NO. - -EXTRACT_ANON_NSPACES = NO - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all -# undocumented members inside documented classes or files. If set to NO these -# members will be included in the various overviews, but no documentation -# section is generated. This option has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. If set -# to NO, these classes will be included in the various overviews. This option -# has no effect if EXTRACT_ALL is enabled. -# The default value is: NO. - -HIDE_UNDOC_CLASSES = NO - -# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# declarations. If set to NO, these declarations will be included in the -# documentation. -# The default value is: NO. - -HIDE_FRIEND_COMPOUNDS = NO - -# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO, these -# blocks will be appended to the function's detailed documentation block. -# The default value is: NO. - -HIDE_IN_BODY_DOCS = NO - -# The INTERNAL_DOCS tag determines if documentation that is typed after a -# \internal command is included. If the tag is set to NO then the documentation -# will be excluded. Set it to YES to include the internal documentation. -# The default value is: NO. - -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) ands Mac users are advised to set this option to NO. -# The default value is: system dependent. - -CASE_SENSE_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES, the -# scope will be hidden. -# The default value is: NO. - -HIDE_SCOPE_NAMES = YES - -# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will -# append additional text to a page's title, such as Class Reference. If set to -# YES the compound reference will be hidden. -# The default value is: NO. - -HIDE_COMPOUND_REFERENCE= YES - -# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of -# the files that are included by a file in the documentation of that file. -# The default value is: YES. - -SHOW_INCLUDE_FILES = YES - -# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each -# grouped member an include statement to the documentation, telling the reader -# which file to include in order to use the member. -# The default value is: NO. - -SHOW_GROUPED_MEMB_INC = NO - -# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include -# files with double quotes in the documentation rather than with sharp brackets. -# The default value is: NO. - -FORCE_LOCAL_INCLUDES = NO - -# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the -# documentation for inline members. -# The default value is: YES. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the -# (detailed) documentation of file and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. -# The default value is: YES. - -SORT_MEMBER_DOCS = YES - -# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief -# descriptions of file, namespace and class members alphabetically by member -# name. If set to NO, the members will appear in declaration order. Note that -# this will also influence the order of the classes in the class list. -# The default value is: NO. - -SORT_BRIEF_DOCS = NO - -# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the -# (brief and detailed) documentation of class members so that constructors and -# destructors are listed first. If set to NO the constructors will appear in the -# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. -# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief -# member documentation. -# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting -# detailed member documentation. -# The default value is: NO. - -SORT_MEMBERS_CTORS_1ST = YES - -# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy -# of group names into alphabetical order. If set to NO the group names will -# appear in their defined order. -# The default value is: NO. - -SORT_GROUP_NAMES = NO - -# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by -# fully-qualified names, including namespaces. If set to NO, the class list will -# be sorted only by class name, not including the namespace part. -# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. -# Note: This option applies only to the class list, not to the alphabetical -# list. -# The default value is: NO. - -SORT_BY_SCOPE_NAME = NO - -# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper -# type resolution of all parameters of a function it will reject a match between -# the prototype and the implementation of a member function even if there is -# only one candidate or it is obvious which candidate to choose by doing a -# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still -# accept a match between prototype and implementation in such cases. -# The default value is: NO. - -STRICT_PROTO_MATCHING = NO - -# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo -# list. This list is created by putting \todo commands in the documentation. -# The default value is: YES. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test -# list. This list is created by putting \test commands in the documentation. -# The default value is: YES. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug -# list. This list is created by putting \bug commands in the documentation. -# The default value is: YES. - -GENERATE_BUGLIST = YES - -# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) -# the deprecated list. This list is created by putting \deprecated commands in -# the documentation. -# The default value is: YES. - -GENERATE_DEPRECATEDLIST= YES - -# The ENABLED_SECTIONS tag can be used to enable conditional documentation -# sections, marked by \if ... \endif and \cond -# ... \endcond blocks. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the -# initial value of a variable or macro / define can have for it to appear in the -# documentation. If the initializer consists of more lines than specified here -# it will be hidden. Use a value of 0 to hide initializers completely. The -# appearance of the value of individual variables and macros / defines can be -# controlled using \showinitializer or \hideinitializer command in the -# documentation regardless of this setting. -# Minimum value: 0, maximum value: 10000, default value: 30. - -MAX_INITIALIZER_LINES = 30 - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES, the -# list will mention the files that were used to generate the documentation. -# The default value is: YES. - -SHOW_USED_FILES = YES - -# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This -# will remove the Files entry from the Quick Index and from the Folder Tree View -# (if specified). -# The default value is: YES. - -SHOW_FILES = YES - -# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces -# page. This will remove the Namespaces entry from the Quick Index and from the -# Folder Tree View (if specified). -# The default value is: YES. - -SHOW_NAMESPACES = YES - -# The FILE_VERSION_FILTER tag can be used to specify a program or script that -# doxygen should invoke to get the current version for each file (typically from -# the version control system). Doxygen will invoke the program by executing (via -# popen()) the command command input-file, where command is the value of the -# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided -# by doxygen. Whatever the program writes to standard output is used as the file -# version. For an example see the documentation. - -FILE_VERSION_FILTER = - -# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed -# by doxygen. The layout file controls the global structure of the generated -# output files in an output format independent way. To create the layout file -# that represents doxygen's defaults, run doxygen with the -l option. You can -# optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. -# -# Note that if you run doxygen from a directory containing a file called -# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE -# tag is left empty. - -LAYOUT_FILE = tools/doc/DoxygenLayout.xml - -# The CITE_BIB_FILES tag can be used to specify one or more bib files containing -# the reference definitions. This must be a list of .bib files. The .bib -# extension is automatically appended if omitted. This requires the bibtex tool -# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. -# For LaTeX the style of the bibliography can be controlled using -# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the -# search path. See also \cite for info how to create references. - -CITE_BIB_FILES = - -#--------------------------------------------------------------------------- -# Configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated to -# standard output by doxygen. If QUIET is set to YES this implies that the -# messages are off. -# The default value is: NO. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES -# this implies that the warnings are on. -# -# Tip: Turn warnings on while writing the documentation. -# The default value is: YES. - -WARNINGS = YES - -# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate -# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag -# will automatically be disabled. -# The default value is: YES. - -WARN_IF_UNDOCUMENTED = YES - -# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. -# The default value is: YES. - -WARN_IF_DOC_ERROR = YES - -# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that -# are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. If -# EXTRACT_ALL is set to YES then this flag will automatically be disabled. -# The default value is: NO. - -WARN_NO_PARAMDOC = YES - -# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. -# The default value is: NO. - -WARN_AS_ERROR = NO - -# The WARN_FORMAT tag determines the format of the warning messages that doxygen -# can produce. The string should contain the $file, $line, and $text tags, which -# will be replaced by the file and line number from which the warning originated -# and the warning text. Optionally the format may contain $version, which will -# be replaced by the version of the file (if it could be obtained via -# FILE_VERSION_FILTER) -# The default value is: $file:$line: $text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning and error -# messages should be written. If left blank the output is written to standard -# error (stderr). - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# Configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag is used to specify the files and/or directories that contain -# documented source files. You may enter file names like myfile.cpp or -# directories like /usr/src/myproject. Separate the files or directories with -# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING -# Note: If this tag is empty the current directory is searched. - -INPUT = ortools tools/doc - -# This tag can be used to specify the character encoding of the source files -# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses -# libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. -# The default value is: UTF-8. - -INPUT_ENCODING = UTF-8 - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# read by doxygen. -# -# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, -# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. - -FILE_PATTERNS = *.h *.cc *cpp.dox -# The RECURSIVE tag can be used to specify whether or not subdirectories should -# be searched for input files as well. -# The default value is: NO. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should be -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. -# -# Note that relative paths are relative to the directory from which doxygen is -# run. - -EXCLUDE = \ - SWIGTYPE* swig* Swig* \ - ortools/algorithms/samples \ - ortools/algorithms/csharp \ - ortools/algorithms/java \ - ortools/algorithms/python \ - ortools/constraint_solver/samples \ - ortools/constraint_solver/csharp \ - ortools/constraint_solver/java \ - ortools/constraint_solver/python \ - ortools/graph/samples \ - ortools/graph/csharp \ - ortools/graph/java \ - ortools/graph/python \ - ortools/linear_solver/samples \ - ortools/linear_solver/csharp \ - ortools/linear_solver/java \ - ortools/linear_solver/python \ - ortools/sat/samples \ - ortools/sat/csharp \ - ortools/sat/java \ - ortools/sat/python \ - ortools/util/csharp \ - ortools/util/java \ - ortools/util/python - -# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or -# directories that are symbolic links (a Unix file system feature) are excluded -# from the input. -# The default value is: NO. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories for example use the pattern */test/* - -EXCLUDE_PATTERNS = */SWIGTYPE* */*swig* */*Swig* */mainJNI* - -# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names -# (namespaces, classes, functions, etc.) that should be excluded from the -# output. The symbol name can be a fully qualified name, a word, or if the -# wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test -# -# Note that the wildcards are matched against the file with absolute path, so to -# exclude all test directories use the pattern */test/* - -EXCLUDE_SYMBOLS = */mainJNI* - -# The EXAMPLE_PATH tag can be used to specify one or more files or directories -# that contain example code fragments that are included (see the \include -# command). - -EXAMPLE_PATH = \ - examples/cpp \ - ortools/algorithms/samples \ - ortools/constraint_solver/samples \ - ortools/graph/samples \ - ortools/linear_solver/samples \ - ortools/sat/samples - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank all -# files are included. - -EXAMPLE_PATTERNS = *.cc *.h - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude commands -# irrespective of the value of the RECURSIVE tag. -# The default value is: NO. - -EXAMPLE_RECURSIVE = YES - -# The IMAGE_PATH tag can be used to specify one or more files or directories -# that contain images that are to be included in the documentation (see the -# \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command: -# -# -# -# where is the value of the INPUT_FILTER tag, and is the -# name of an input file. Doxygen will then use the output that the filter -# program writes to standard output. If FILTER_PATTERNS is specified, this tag -# will be ignored. -# -# Note that the filter must not add or remove lines; it is applied before the -# code is scanned, but not when the output code is generated. If lines are added -# or removed, the anchors will not be placed correctly. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -INPUT_FILTER = "python3 tools/doc/doxygen_filter.py" - -# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern -# basis. Doxygen will compare the file name with each pattern and apply the -# filter if there is a match. The filters are a list of the form: pattern=filter -# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how -# filters are used. If the FILTER_PATTERNS tag is empty or if none of the -# patterns match the file name, INPUT_FILTER is applied. -# -# Note that for custom extensions or not directly supported extensions you also -# need to set EXTENSION_MAPPING for the extension otherwise the files are not -# properly processed by doxygen. - -FILTER_PATTERNS = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will also be used to filter the input files that are used for -# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). -# The default value is: NO. - -FILTER_SOURCE_FILES = NO - -# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file -# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and -# it is also possible to disable source filtering for a specific pattern using -# *.ext= (so without naming a filter). -# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. - -FILTER_SOURCE_PATTERNS = - -# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that -# is part of the input, its contents will be placed on the main page -# (index.html). This can be useful if you have a project on for instance GitHub -# and want to reuse the introduction page also for the doxygen output. - -USE_MDFILE_AS_MAINPAGE = - -#--------------------------------------------------------------------------- -# Configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will be -# generated. Documented entities will be cross-referenced with these sources. -# -# Note: To get rid of all source code in the generated output, make sure that -# also VERBATIM_HEADERS is set to NO. -# The default value is: NO. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body of functions, -# classes and enums directly into the documentation. -# The default value is: NO. - -INLINE_SOURCES = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any -# special comment blocks from generated source code fragments. Normal C, C++ and -# Fortran comments will always remain visible. -# The default value is: YES. - -STRIP_CODE_COMMENTS = YES - -# If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# entity all documented functions referencing it will be listed. -# The default value is: NO. - -REFERENCED_BY_RELATION = NO - -# If the REFERENCES_RELATION tag is set to YES then for each documented function -# all documented entities called/used by that function will be listed. -# The default value is: NO. - -REFERENCES_RELATION = NO - -# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES then the hyperlinks from functions in REFERENCES_RELATION and -# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will -# link to the documentation. -# The default value is: YES. - -REFERENCES_LINK_SOURCE = YES - -# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the -# source code will show a tooltip with additional information such as prototype, -# brief description and links to the definition and documentation. Since this -# will make the HTML file larger and loading of large files a bit slower, you -# can opt to disable this feature. -# The default value is: YES. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -SOURCE_TOOLTIPS = YES - -# If the USE_HTAGS tag is set to YES then the references to source code will -# point to the HTML generated by the htags(1) tool instead of doxygen built-in -# source browser. The htags tool is part of GNU's global source tagging system -# (see https://www.gnu.org/software/global/global.html). You will need version -# 4.8.6 or higher. -# -# To use it do the following: -# - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file -# - Make sure the INPUT points to the root of the source tree -# - Run doxygen as normal -# -# Doxygen will invoke htags (and that will in turn invoke gtags), so these -# tools must be available from the command line (i.e. in the search path). -# -# The result: instead of the source browser generated by doxygen, the links to -# source code will now point to the output of htags. -# The default value is: NO. -# This tag requires that the tag SOURCE_BROWSER is set to YES. - -USE_HTAGS = NO - -# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a -# verbatim copy of the header file for each class for which an include is -# specified. Set to NO to disable this. -# See also: Section \class. -# The default value is: YES. - -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# Configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all -# compounds will be generated. Enable this if the project contains a lot of -# classes, structs, unions or interfaces. -# The default value is: YES. - -ALPHABETICAL_INDEX = YES - -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all classes will -# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag -# can be used to specify a prefix (or a list of prefixes) that should be ignored -# while generating the index headers. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output -# The default value is: YES. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a -# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of -# it. -# The default directory is: html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_OUTPUT -#HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each -# generated HTML page (for example: .htm, .php, .asp). -# The default value is: .html. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a user-defined HTML header file for -# each generated HTML page. If the tag is left blank doxygen will generate a -# standard header. -# -# To get valid HTML the header file that includes any scripts and style sheets -# that doxygen needs, which is dependent on the configuration options used (e.g. -# the setting GENERATE_TREEVIEW). It is highly recommended to start with a -# default header using -# doxygen -w html new_header.html new_footer.html new_stylesheet.css -# YourConfigFile -# and then modify the file new_header.html. See also section "Doxygen usage" -# for information on how to generate the default header that doxygen normally -# uses. -# Note: The header is subject to change so you typically have to regenerate the -# default header when upgrading to a newer version of doxygen. For a description -# of the possible markers and block names see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_HEADER = tools/doc/header.tmp.html - -# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each -# generated HTML page. If the tag is left blank doxygen will generate a standard -# footer. See HTML_HEADER for more information on how to generate a default -# footer and what special commands can be used inside the footer. See also -# section "Doxygen usage" for information on how to generate the default footer -# that doxygen normally uses. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FOOTER = tools/doc/footer.tmp.html - -# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style -# sheet that is used by each HTML page. It can be used to fine-tune the look of -# the HTML output. If left blank doxygen will generate a default style sheet. -# See also section "Doxygen usage" for information on how to generate the style -# sheet that doxygen normally uses. -# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as -# it is more robust and this tag (HTML_STYLESHEET) will in the future become -# obsolete. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_STYLESHEET = - -# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined -# cascading style sheets that are included after the standard style sheets -# created by doxygen. Using this option one can overrule certain style aspects. -# This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefore more robust against future updates. -# Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra style sheet files is of importance (e.g. the last -# style sheet in the list overrules the setting of the previous ones in the -# list). For an example see the documentation. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_STYLESHEET = tools/doc/styleSheet.tmp.css - -# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or -# other source files which should be copied to the HTML output directory. Note -# that these files will be copied to the base HTML output directory. Use the -# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these -# files. In the HTML_STYLESHEET file, use the file name only. Also note that the -# files will be copied as-is; there are no commands or markers available. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_EXTRA_FILES = - -# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see -# https://en.wikipedia.org/wiki/Hue for more information. For instance the value -# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 -# purple, and 360 is red again. -# Minimum value: 0, maximum value: 359, default value: 220. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_HUE = 220 - -# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A -# value of 255 will produce the most vivid colors. -# Minimum value: 0, maximum value: 255, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_SAT = 100 - -# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the -# luminance component of the colors in the HTML output. Values below 100 -# gradually make the output lighter, whereas values above 100 make the output -# darker. The value divided by 100 is the actual gamma applied, so 80 represents -# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not -# change the gamma. -# Minimum value: 40, maximum value: 240, default value: 80. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_COLORSTYLE_GAMMA = 80 - -# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML -# page will contain the date and time when the page was generated. Setting this -# to YES can help to show when doxygen was last run and thus if the -# documentation is up to date. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_TIMESTAMP = NO - -# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML -# documentation will contain a main index with vertical navigation menus that -# are dynamically created via JavaScript. If disabled, the navigation index will -# consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have JavaScript, -# like the Qt help browser. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_MENUS = YES - -# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML -# documentation will contain sections that can be hidden and shown after the -# page has loaded. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_DYNAMIC_SECTIONS = NO - -# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries -# shown in the various tree structured indices initially; the user can expand -# and collapse entries dynamically later on. Doxygen will expand the tree to -# such a level that at most the specified number of entries are visible (unless -# a fully collapsed tree already exceeds this amount). So setting the number of -# entries 1 will produce a full collapsed tree by default. 0 is a special value -# representing an infinite number of entries and will result in a full expanded -# tree by default. -# Minimum value: 0, maximum value: 9999, default value: 100. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_INDEX_NUM_ENTRIES = 100 - -# If the GENERATE_DOCSET tag is set to YES, additional index files will be -# generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in -# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy -# genXcode/_index.html for more information. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_DOCSET = NO - -# This tag determines the name of the docset feed. A documentation feed provides -# an umbrella under which multiple documentation sets from a single provider -# (such as a company or product suite) can be grouped. -# The default value is: Doxygen generated docs. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_FEEDNAME = "OR-Tools Documentation" - -# This tag specifies a string that should uniquely identify the documentation -# set bundle. This should be a reverse domain-name style string, e.g. -# com.mycompany.MyDocSet. Doxygen will append .docset to the name. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_BUNDLE_ID = com.Google.OrTools - -# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify -# the documentation publisher. This should be a reverse domain-name style -# string, e.g. com.mycompany.MyDocSet.documentation. -# The default value is: org.doxygen.Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_ID = com.Google.OrTools - -# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. -# The default value is: Publisher. -# This tag requires that the tag GENERATE_DOCSET is set to YES. - -DOCSET_PUBLISHER_NAME = Google.OR-Tools - -# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three -# additional HTML index files: index.hhp, index.hhc, and index.hhk. The -# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. -# -# The HTML Help Workshop contains a compiler that can convert all HTML output -# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML -# files are now used as the Windows 98 help format, and will replace the old -# Windows help format (.hlp) on all Windows platforms in the future. Compressed -# HTML files also contain an index, a table of contents, and you can search for -# words in the documentation. The HTML workshop also contains a viewer for -# compressed HTML files. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_HTMLHELP = NO - -# The CHM_FILE tag can be used to specify the file name of the resulting .chm -# file. You can add a path in front of the file if the result should not be -# written to the html output directory. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_FILE = - -# The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler (hhc.exe). If non-empty, -# doxygen will try to run the HTML help compiler on the generated index.hhp. -# The file has to be specified with full path. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -HHC_LOCATION = - -# The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the main .chm file (NO). -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -GENERATE_CHI = NO - -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) -# and project file content. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -CHM_INDEX_ENCODING = - -# The BINARY_TOC flag controls whether a binary table of contents is generated -# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it -# enables the Previous and Next buttons. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members to -# the table of contents of the HTML help documentation and to the tree view. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTMLHELP is set to YES. - -TOC_EXPAND = NO - -# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and -# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that -# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help -# (.qch) of the generated HTML documentation. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_QHP = NO - -# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify -# the file name of the resulting .qch file. The path specified is relative to -# the HTML output folder. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QCH_FILE = - -# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help -# Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_NAMESPACE = com.Google.OrTools - -# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt -# Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). -# The default value is: doc. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_VIRTUAL_FOLDER = doc - -# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom -# filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_NAME = - -# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the -# custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_CUST_FILTER_ATTRS = - -# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this -# project's filter section matches. Qt Help Project / Filter Attributes (see: -# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHP_SECT_FILTER_ATTRS = - -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. -# This tag requires that the tag GENERATE_QHP is set to YES. - -QHG_LOCATION = - -# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be -# generated, together with the HTML files, they form an Eclipse help plugin. To -# install this plugin and make it available under the help contents menu in -# Eclipse, the contents of the directory containing the HTML and XML files needs -# to be copied into the plugins directory of eclipse. The name of the directory -# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. -# After copying Eclipse needs to be restarted before the help appears. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_ECLIPSEHELP = NO - -# A unique identifier for the Eclipse help plugin. When installing the plugin -# the directory name containing the HTML and XML files should also have this -# name. Each documentation set should have its own identifier. -# The default value is: org.doxygen.Project. -# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. - -ECLIPSE_DOC_ID = org.doxygen.Project - -# If you want full control over the layout of the generated HTML pages it might -# be necessary to disable the index and replace it with your own. The -# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top -# of each HTML page. A value of NO enables the index and the value YES disables -# it. Since the tabs in the index contain the same information as the navigation -# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -DISABLE_INDEX = NO - -# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index -# structure should be generated to display hierarchical information. If the tag -# value is set to YES, a side panel will be generated containing a tree-like -# index structure (just like the one that is generated for HTML Help). For this -# to work a browser that supports JavaScript, DHTML, CSS and frames is required -# (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -GENERATE_TREEVIEW = YES - -# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that -# doxygen will group on one line in the generated HTML documentation. -# -# Note that a value of 0 will completely suppress the enum values from appearing -# in the overview section. -# Minimum value: 0, maximum value: 20, default value: 4. -# This tag requires that the tag GENERATE_HTML is set to YES. - -ENUM_VALUES_PER_LINE = 4 - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used -# to set the initial width (in pixels) of the frame in which the tree is shown. -# Minimum value: 0, maximum value: 1500, default value: 250. -# This tag requires that the tag GENERATE_HTML is set to YES. - -TREEVIEW_WIDTH = 250 - -# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to -# external symbols imported via tag files in a separate window. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -EXT_LINKS_IN_WINDOW = NO - -# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg -# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see -# https://inkscape.org) to generate formulas as SVG images instead of PNGs for -# the HTML output. These images will generally look nicer at scaled resolutions. -# Possible values are: png The default and svg Looks nicer but requires the -# pdf2svg tool. -# The default value is: png. -# This tag requires that the tag GENERATE_HTML is set to YES. - -HTML_FORMULA_FORMAT = png - -# Use this tag to change the font size of LaTeX formulas included as images in -# the HTML documentation. When you change the font size after a successful -# doxygen run you need to manually remove any form_*.png images from the HTML -# output directory to force them to be regenerated. -# Minimum value: 8, maximum value: 50, default value: 10. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_FONTSIZE = 10 - -# Use the FORMULA_TRANSPARENT tag to determine whether or not the images -# generated for formulas are transparent PNGs. Transparent PNGs are not -# supported properly for IE 6.0, but are supported on all modern browsers. -# -# Note that when changing this option you need to delete any form_*.png files in -# the HTML output directory before the changes have effect. -# The default value is: YES. -# This tag requires that the tag GENERATE_HTML is set to YES. - -FORMULA_TRANSPARENT = YES - -# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands -# to create new LaTeX commands to be used in formulas as building blocks. See -# the section "Including formulas" for details. - -FORMULA_MACROFILE = - -# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side JavaScript for the rendering -# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX -# installed or if you want to formulas look prettier in the HTML output. When -# enabled you may also need to install MathJax separately and configure the path -# to it using the MATHJAX_RELPATH option. -# The default value is: NO. -# This tag requires that the tag GENERATE_HTML is set to YES. - -USE_MATHJAX = NO - -# When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. -# Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. -# The default value is: HTML-CSS. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_FORMAT = HTML-CSS - -# When MathJax is enabled you need to specify the location relative to the HTML -# output directory using the MATHJAX_RELPATH option. The destination directory -# should contain the MathJax.js script. For instance, if the mathjax directory -# is located at the same level as the HTML output directory, then -# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax -# Content Delivery Network so you can quickly see the result without installing -# MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@2 - -# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax -# extension names that should be enabled during MathJax rendering. For example -# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_EXTENSIONS = - -# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces -# of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an -# example see the documentation. -# This tag requires that the tag USE_MATHJAX is set to YES. - -MATHJAX_CODEFILE = - -# When the SEARCHENGINE tag is enabled doxygen will generate a search box for -# the HTML output. The underlying search engine uses javascript and DHTML and -# should work on any modern browser. Note that when using HTML help -# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) -# there is already a search function so this one should typically be disabled. -# For large projects the javascript based search engine can be slow, then -# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to -# search using the keyboard; to jump to the search box use + S -# (what the is depends on the OS and browser, but it is typically -# , /