update python notebooks

This commit is contained in:
Corentin Le Molgat
2022-11-02 10:12:37 +01:00
parent 9270aa0d59
commit 4908fd2fb4
25 changed files with 1760 additions and 177 deletions

View File

@@ -0,0 +1,289 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2022 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# vrp_solution_callback"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/constraint_solver/vrp_solution_callback.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/constraint_solver/samples/vrp_solution_callback.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"!pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"\n",
"Simple Vehicles Routing Problem (VRP).\n",
"\n",
" This is a sample using the routing library python wrapper to solve a VRP\n",
" problem.\n",
"\n",
" The solver stop after improving its solution 15 times or after 5 seconds.\n",
"\n",
" Distances are in meters.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"from ortools.constraint_solver import routing_enums_pb2\n",
"from ortools.constraint_solver import pywrapcp\n",
"\n",
"\n",
"def create_data_model():\n",
" \"\"\"Stores the data for the problem.\"\"\"\n",
" data = {}\n",
" data['distance_matrix'] = [\n",
" [\n",
" 0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354,\n",
" 468, 776, 662\n",
" ],\n",
" [\n",
" 548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674,\n",
" 1016, 868, 1210\n",
" ],\n",
" [\n",
" 776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164,\n",
" 1130, 788, 1552, 754\n",
" ],\n",
" [\n",
" 696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822,\n",
" 1164, 560, 1358\n",
" ],\n",
" [\n",
" 582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708,\n",
" 1050, 674, 1244\n",
" ],\n",
" [\n",
" 274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628,\n",
" 514, 1050, 708\n",
" ],\n",
" [\n",
" 502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856,\n",
" 514, 1278, 480\n",
" ],\n",
" [\n",
" 194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320,\n",
" 662, 742, 856\n",
" ],\n",
" [\n",
" 308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662,\n",
" 320, 1084, 514\n",
" ],\n",
" [\n",
" 194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388,\n",
" 274, 810, 468\n",
" ],\n",
" [\n",
" 536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764,\n",
" 730, 388, 1152, 354\n",
" ],\n",
" [\n",
" 502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114,\n",
" 308, 650, 274, 844\n",
" ],\n",
" [\n",
" 388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194,\n",
" 536, 388, 730\n",
" ],\n",
" [\n",
" 354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0,\n",
" 342, 422, 536\n",
" ],\n",
" [\n",
" 468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536,\n",
" 342, 0, 764, 194\n",
" ],\n",
" [\n",
" 776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274,\n",
" 388, 422, 764, 0, 798\n",
" ],\n",
" [\n",
" 662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730,\n",
" 536, 194, 798, 0\n",
" ],\n",
" ]\n",
" data['num_vehicles'] = 4\n",
" data['depot'] = 0\n",
" return data\n",
"\n",
"\n",
"def print_solution(routing_manager: pywrapcp.RoutingIndexManager,\n",
" routing_model: pywrapcp.RoutingModel):\n",
" \"\"\"Prints solution on console.\"\"\"\n",
" print('################')\n",
" print(f'Solution objective: {routing_model.CostVar().Value()}')\n",
" total_distance = 0\n",
" for vehicle_id in range(routing_manager.GetNumberOfVehicles()):\n",
" index = routing_model.Start(vehicle_id)\n",
" plan_output = f'Route for vehicle {vehicle_id}:\\n'\n",
" route_distance = 0\n",
" while not routing_model.IsEnd(index):\n",
" plan_output += f' {routing_manager.IndexToNode(index)} ->'\n",
" previous_index = index\n",
" index = routing_model.NextVar(index).Value()\n",
" route_distance += routing_model.GetArcCostForVehicle(\n",
" previous_index, index, vehicle_id)\n",
" plan_output += f' {routing_manager.IndexToNode(index)}\\n'\n",
" plan_output += f'Distance of the route: {route_distance}m\\n'\n",
" print(plan_output)\n",
" total_distance += route_distance\n",
" print(f'Total Distance of all routes: {total_distance}m')\n",
"\n",
"\n",
"class SolutionCallback:\n",
" \"\"\"Create a solution callback.\"\"\"\n",
"\n",
" def __init__(self, manager: pywrapcp.RoutingIndexManager,\n",
" model: pywrapcp.RoutingModel, limit: int):\n",
" self._routing_manager = manager\n",
" self._routing_model = model\n",
" self._counter = 0\n",
" self._counter_limit = limit\n",
" self.objectives = []\n",
"\n",
" def __call__(self):\n",
" objective = int(self._routing_model.CostVar().Value())\n",
" if not self.objectives or objective < self.objectives[-1]:\n",
" self.objectives.append(objective)\n",
" print_solution(self._routing_manager, self._routing_model)\n",
" self._counter += 1\n",
" if self._counter > self._counter_limit:\n",
" self._routing_model.solver().FinishCurrentSearch()\n",
"\n",
"\n",
"def main():\n",
" \"\"\"Entry point of the program.\"\"\"\n",
" # Instantiate the data problem.\n",
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
" routing_manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n",
" data['num_vehicles'],\n",
" data['depot'])\n",
"\n",
" # Create Routing Model.\n",
" routing_model = pywrapcp.RoutingModel(routing_manager)\n",
"\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
" \"\"\"Returns the distance between the two nodes.\"\"\"\n",
" # Convert from routing variable Index to distance matrix NodeIndex.\n",
" from_node = routing_manager.IndexToNode(from_index)\n",
" to_node = routing_manager.IndexToNode(to_index)\n",
" return data['distance_matrix'][from_node][to_node]\n",
"\n",
" transit_callback_index = routing_model.RegisterTransitCallback(\n",
" distance_callback)\n",
"\n",
" # Define cost of each arc.\n",
" routing_model.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Add Distance constraint.\n",
" dimension_name = 'Distance'\n",
" routing_model.AddDimension(\n",
" transit_callback_index,\n",
" 0, # no slack\n",
" 3000, # vehicle maximum travel distance\n",
" True, # start cumul to zero\n",
" dimension_name)\n",
" distance_dimension = routing_model.GetDimensionOrDie(dimension_name)\n",
" distance_dimension.SetGlobalSpanCostCoefficient(100)\n",
"\n",
" # Attach a solution callback.\n",
" solution_callback = SolutionCallback(routing_manager, routing_model, 15)\n",
" routing_model.AddAtSolutionCallback(solution_callback)\n",
"\n",
" # Setting first solution heuristic.\n",
" search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
" routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
" search_parameters.local_search_metaheuristic = (\n",
" routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n",
" search_parameters.time_limit.FromSeconds(5)\n",
"\n",
" # Solve the problem.\n",
" solution = routing_model.SolveWithParameters(search_parameters)\n",
"\n",
" # Print solution on console.\n",
" if solution:\n",
" print(f'Best objective: {solution_callback.objectives[-1]}')\n",
" else:\n",
" print('No solution found !')\n",
"\n",
"\n",
"main()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -74,9 +74,9 @@
"metadata": {},
"outputs": [],
"source": [
"#!/usr/bin/env python3\n",
"from ortools.constraint_solver import pywrapcp\n",
"\n",
"\n",
"class OneVarLns(pywrapcp.BaseLns):\n",
" \"\"\"One Var LNS.\"\"\"\n",
"\n",

View File

@@ -72,6 +72,7 @@
"id": "description",
"metadata": {},
"source": [
"\n",
"Appointment selection.\n",
"\n",
"This module maximizes the number of appointments that can\n",
@@ -91,12 +92,13 @@
"from ortools.linear_solver import pywraplp\n",
"from ortools.sat.python import cp_model\n",
"\n",
"class FLAGS: pass\n",
"_LOAD_MIN = flags.DEFINE_integer('load_min', 480, 'Minimum load in minutes.')\n",
"_LOAD_MAX = flags.DEFINE_integer('load_max', 540, 'Maximum load in minutes.')\n",
"_COMMUTE_TIME = flags.DEFINE_integer('commute_time', 30,\n",
" 'Commute time in minutes.')\n",
"_NUM_WORKERS = flags.DEFINE_integer('num_workers', 98,\n",
" 'Maximum number of workers.')\n",
"\n",
"FLAGS.load_min = 480 # Minimum load in minutes.\n",
"FLAGS.load_max = 540 # Maximum load in minutes.\n",
"FLAGS.commute_time = 30 # Commute time in minutes.\n",
"FLAGS.num_workers = 98 # Maximum number of workers.\n",
"\n",
"class AllSolutionCollector(cp_model.CpSolverSolutionCallback):\n",
" \"\"\"Stores all solutions.\"\"\"\n",
@@ -180,7 +182,7 @@
" \"\"\"\n",
" solver = pywraplp.Solver.CreateSolver('SCIP')\n",
" if not solver:\n",
" return []\n",
" return []\n",
" n = len(ideal_item_ratios)\n",
" num_distinct_collections = len(item_collections)\n",
" max_num_items_per_collection = 0\n",
@@ -253,13 +255,13 @@
" The same output type as EnumerateAllKnapsacksWithRepetition.\n",
" \"\"\"\n",
" combinations = EnumerateAllKnapsacksWithRepetition(\n",
" [a[2] + FLAGS.commute_time for a in demand], FLAGS.load_min,\n",
" FLAGS.load_max)\n",
" [a[2] + _COMMUTE_TIME.value for a in demand], _LOAD_MIN.value,\n",
" _LOAD_MAX.value)\n",
" print(('Found %d possible day schedules ' % len(combinations) +\n",
" '(i.e. combination of appointments filling up one worker\\'s day)'))\n",
"\n",
" selection = AggregateItemCollectionsOptimally(\n",
" combinations, FLAGS.num_workers, [a[0] / 100.0 for a in demand])\n",
" combinations, _NUM_WORKERS.value, [a[0] / 100.0 for a in demand])\n",
" output = []\n",
" for i in range(len(selection)):\n",
" if selection[i] != 0:\n",
@@ -270,16 +272,16 @@
" return output\n",
"\n",
"\n",
"def solve_appointments(_):\n",
"def main(_):\n",
" demand = [(45.0, 'Type1', 90), (30.0, 'Type2', 120), (25.0, 'Type3', 180)]\n",
" print('*** input problem ***')\n",
" print('Appointments: ')\n",
" for a in demand:\n",
" print(' %.2f%% of %s : %d min' % (a[0], a[1], a[2]))\n",
" print('Commute time = %d' % FLAGS.commute_time)\n",
" print('Commute time = %d' % _COMMUTE_TIME.value)\n",
" print('Acceptable duration of a work day = [%d..%d]' %\n",
" (FLAGS.load_min, FLAGS.load_max))\n",
" print('%d workers' % FLAGS.num_workers)\n",
" (_LOAD_MIN.value, _LOAD_MAX.value))\n",
" print('%d workers' % _NUM_WORKERS.value)\n",
" selection = GetOptimalSchedule(demand)\n",
" print()\n",
" installed = 0\n",
@@ -300,15 +302,17 @@
" print()\n",
" print('%d installations planned' % installed)\n",
" for a in demand:\n",
" name = a[1]\n",
" per_type = installed_per_type[name]\n",
" if installed != 0:\n",
" print(f' {per_type} ({per_type * 100.0 / installed}%) installations of type {name} planned')\n",
" else:\n",
" print(f' {per_type} installations of type {name} planned')\n",
" name = a[1]\n",
" per_type = installed_per_type[name]\n",
" if installed != 0:\n",
" print(\n",
" f' {per_type} ({per_type * 100.0 / installed}%) installations of type {name} planned'\n",
" )\n",
" else:\n",
" print(f' {per_type} installations of type {name} planned')\n",
"\n",
"\n",
"solve_appointments()\n",
"main()\n",
"\n"
]
}

View File

@@ -98,11 +98,14 @@
"from google.protobuf import text_format\n",
"from ortools.sat.python import cp_model\n",
"\n",
"class FLAGS: pass\n",
"_OUTPUT_PROTO = flags.DEFINE_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
"_PARAMS = flags.DEFINE_string('params',\n",
" 'num_search_workers:8,log_search_progress:true',\n",
" 'Sat solver parameters.')\n",
"_INSTANCE = flags.DEFINE_integer('instance', 1, 'Instance to select (1, 2, 3).',\n",
" 1, 3)\n",
"\n",
"FLAGS.output_proto = '' # Output file to write the cp_model proto to.\n",
"FLAGS.params = 'num_search_workers:16,log_search_progress:true' # Sat solver parameters.\n",
"FLAGS.instance = 1 # Instance to select (1, 2, 3).\n",
"SAMPLE_SHIFTS_SMALL = [\n",
" #\n",
" # column description:\n",
@@ -1752,11 +1755,11 @@
" The objective value of the model.\n",
" \"\"\"\n",
" shifts = None\n",
" if FLAGS.instance == 1:\n",
" if _INSTANCE.value == 1:\n",
" shifts = SAMPLE_SHIFTS_SMALL\n",
" elif FLAGS.instance == 2:\n",
" elif _INSTANCE.value == 2:\n",
" shifts = SAMPLE_SHIFTS_MEDIUM\n",
" elif FLAGS.instance == 3:\n",
" elif _INSTANCE.value == 3:\n",
" shifts = SAMPLE_SHIFTS_LARGE\n",
"\n",
" num_shifts = len(shifts)\n",
@@ -1997,15 +2000,15 @@
" model.Minimize(\n",
" cp_model.LinearExpr.WeightedSum(delay_literals, delay_weights))\n",
"\n",
" if not minimize_drivers and FLAGS.output_proto:\n",
" print('Writing proto to %s' % FLAGS.output_proto)\n",
" with open(FLAGS.output_proto, 'w') as text_file:\n",
" if not minimize_drivers and _OUTPUT_PROTO.value:\n",
" print('Writing proto to %s' % _OUTPUT_PROTO.value)\n",
" with open(_OUTPUT_PROTO.value, 'w') as text_file:\n",
" text_file.write(str(model))\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" if _PARAMS.value:\n",
" text_format.Parse(_PARAMS.value, solver.parameters)\n",
"\n",
" status = solver.Solve(model)\n",
"\n",
@@ -2042,7 +2045,7 @@
" return int(solver.ObjectiveValue())\n",
"\n",
"\n",
"def solve_bus_driver_scheduling():\n",
"def main(_):\n",
" \"\"\"Optimize the bus driver allocation in two passes.\"\"\"\n",
" print('----------- first pass: minimize the number of drivers')\n",
" num_drivers = bus_driver_scheduling(True, -1)\n",
@@ -2053,10 +2056,6 @@
" bus_driver_scheduling(False, num_drivers)\n",
"\n",
"\n",
"def main(_=None):\n",
" solve_bus_driver_scheduling()\n",
"\n",
"\n",
"main()\n",
"\n"
]

View File

@@ -144,8 +144,8 @@
" model.Add(x_starts[i] <= x_starts[i + 1]).OnlyEnforceIf(same)\n",
"\n",
" # Symmetry breaking 2: first square in one quadrant.\n",
" model.Add(x_starts[0] < 36)\n",
" model.Add(y_starts[0] < 19)\n",
" model.Add(x_starts[0] < (size_x + 1)// 2)\n",
" model.Add(y_starts[0] < (size_y + 1) // 2)\n",
"\n",
" # Creates a solver and solves.\n",
" solver = cp_model.CpSolver()\n",

View File

@@ -95,7 +95,7 @@
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"def main(_=None):\n",
"def main(_):\n",
" \"\"\"Solves the gate scheduling problem.\"\"\"\n",
" model = cp_model.CpModel()\n",
"\n",

View File

@@ -257,7 +257,7 @@
" print(' - wall time : %f s' % solver.WallTime())\n",
"\n",
"\n",
"def main(_=None):\n",
"def main(_):\n",
" for pb in range(1, 7):\n",
" solve_hidato(build_puzzle(pb), pb)\n",
"\n",

View File

@@ -95,11 +95,14 @@
"\n",
"from ortools.sat.python import cp_model\n",
"\n",
"class FLAGS: pass\n",
"_OUTPUT_PROTO = flags.DEFINE_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
"_PARAMS = flags.DEFINE_string('params',\n",
" 'num_search_workers:16,log_search_progress:true',\n",
" 'Sat solver parameters.')\n",
"_MODEL = flags.DEFINE_string('model', 'rotation',\n",
" '\\'duplicate\\' or \\'rotation\\' or \\'optional\\'')\n",
"\n",
"FLAGS.output_proto = '' # Output file to write the cp_model proto to.\n",
"FLAGS.params = 'num_search_workers:16,log_search_progress:true' # Sat solver parameters.\n",
"FLAGS.model = 'rotation' # 'duplicate' or 'rotation' or 'optional'\n",
"\n",
"def build_data():\n",
" \"\"\"Build the data frame.\"\"\"\n",
@@ -193,15 +196,15 @@
" model.Maximize(cp_model.LinearExpr.WeightedSum(is_used, item_values))\n",
"\n",
" # Output proto to file.\n",
" if FLAGS.output_proto:\n",
" print('Writing proto to %s' % FLAGS.output_proto)\n",
" with open(FLAGS.output_proto, 'w') as text_file:\n",
" if _OUTPUT_PROTO.value:\n",
" print('Writing proto to %s' % _OUTPUT_PROTO.value)\n",
" with open(_OUTPUT_PROTO.value, 'w') as text_file:\n",
" text_file.write(str(model))\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" if _PARAMS.value:\n",
" text_format.Parse(_PARAMS.value, solver.parameters)\n",
"\n",
" status = solver.Solve(model)\n",
"\n",
@@ -283,15 +286,15 @@
" model.Maximize(cp_model.LinearExpr.WeightedSum(is_used, item_values))\n",
"\n",
" # Output proto to file.\n",
" if FLAGS.output_proto:\n",
" print('Writing proto to %s' % FLAGS.output_proto)\n",
" with open(FLAGS.output_proto, 'w') as text_file:\n",
" if _OUTPUT_PROTO.value:\n",
" print('Writing proto to %s' % _OUTPUT_PROTO.value)\n",
" with open(_OUTPUT_PROTO.value, 'w') as text_file:\n",
" text_file.write(str(model))\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" if _PARAMS.value:\n",
" text_format.Parse(_PARAMS.value, solver.parameters)\n",
"\n",
" status = solver.Solve(model)\n",
"\n",
@@ -396,15 +399,15 @@
" model.Maximize(cp_model.LinearExpr.WeightedSum(is_used, item_values))\n",
"\n",
" # Output proto to file.\n",
" if FLAGS.output_proto:\n",
" print('Writing proto to %s' % FLAGS.output_proto)\n",
" with open(FLAGS.output_proto, 'w') as text_file:\n",
" if _OUTPUT_PROTO.value:\n",
" print('Writing proto to %s' % _OUTPUT_PROTO.value)\n",
" with open(_OUTPUT_PROTO.value, 'w') as text_file:\n",
" text_file.write(str(model))\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" if _PARAMS.value:\n",
" text_format.Parse(_PARAMS.value, solver.parameters)\n",
"\n",
" status = solver.Solve(model)\n",
"\n",
@@ -423,21 +426,17 @@
" print(data)\n",
"\n",
"\n",
"def solve_knapsack(model):\n",
"def main(_):\n",
" \"\"\"Solve the problem with all models.\"\"\"\n",
" data, max_height, max_width = build_data()\n",
" if model == 'duplicate':\n",
" if _MODEL.value == 'duplicate':\n",
" solve_with_duplicate_items(data, max_height, max_width)\n",
" elif model == 'optional':\n",
" elif _MODEL.value == 'optional':\n",
" solve_with_duplicate_optional_items(data, max_height, max_width)\n",
" else:\n",
" solve_with_rotations(data, max_height, max_width)\n",
"\n",
"\n",
"def main(_=None):\n",
" solve_knapsack(FLAGS.model)\n",
"\n",
"\n",
"main()\n",
"\n"
]

View File

@@ -0,0 +1,428 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2022 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# line_balancing_sat"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/examples/line_balancing_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/examples/python/line_balancing_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"!pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"Reader and solver of the single assembly line balancing problem.\n",
"\n",
"from https://assembly-line-balancing.de/salbp/:\n",
"\n",
"The simple assembly line balancing problem (SALBP) is the basic optimization\n",
"problem in assembly line balancing research. Given is a set of tasks each of\n",
"which has a deterministic task time. The tasks are partially ordered by\n",
"precedence relations defining a precedence graph as depicted below.\n",
"\n",
"It reads .alb files:\n",
" https://assembly-line-balancing.de/wp-content/uploads/2017/01/format-ALB.pdf\n",
"\n",
"and solves the corresponding problem.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"import collections\n",
"import re\n",
"from typing import Sequence\n",
"\n",
"from google.protobuf import text_format\n",
"from ortools.sat.python import cp_model\n",
"\n",
"_INPUT = flags.DEFINE_string('input', '', 'Input file to parse and solve.')\n",
"_PARAMS = flags.DEFINE_string('params', '', 'Sat solver parameters.')\n",
"_OUTPUT_PROTO = flags.DEFINE_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
"_MODEL = flags.DEFINE_string('model', 'boolean',\n",
" 'Model used: boolean, scheduling, greedy')\n",
"# pytype: disable=wrong-arg-types\n",
"\n",
"\n",
"class SectionInfo(object):\n",
" \"\"\"Store model information for each section of the input file.\"\"\"\n",
"\n",
" def __init__(self):\n",
" self.value = None\n",
" self.index_map = {}\n",
" self.set_of_pairs = set()\n",
"\n",
" def __str__(self):\n",
" if self.index_map:\n",
" return f'SectionInfo(index_map={self.index_map})'\n",
" elif self.set_of_pairs:\n",
" return f'SectionInfo(set_of_pairs={self.set_of_pairs})'\n",
" elif self.value is not None:\n",
" return f'SectionInfo(value={self.value})'\n",
" else:\n",
" return 'SectionInfo()'\n",
"\n",
"\n",
"def read_model(filename):\n",
" \"\"\"Reads a .alb file and returns the model.\"\"\"\n",
"\n",
" current_info = SectionInfo()\n",
"\n",
" model = {}\n",
" with open(filename, 'r') as input_file:\n",
" print(f'Reading model from \\'{filename}\\'')\n",
" section_name = ''\n",
"\n",
" for line in input_file:\n",
" stripped_line = line.strip()\n",
" if not stripped_line:\n",
" continue\n",
"\n",
" match_section_def = re.match(r'<([\\w\\s]+)>', stripped_line)\n",
" if match_section_def:\n",
" section_name = match_section_def.group(1)\n",
" if section_name == 'end':\n",
" continue\n",
"\n",
" current_info = SectionInfo()\n",
" model[section_name] = current_info\n",
" continue\n",
"\n",
" match_single_number = re.match(r'^([0-9]+)$', stripped_line)\n",
" if match_single_number:\n",
" current_info.value = int(match_single_number.group(1))\n",
" continue\n",
"\n",
" match_key_value = re.match(r'^([0-9]+)\\s+([0-9]+)$', stripped_line)\n",
" if match_key_value:\n",
" key = int(match_key_value.group(1))\n",
" value = int(match_key_value.group(2))\n",
" current_info.index_map[key] = value\n",
" continue\n",
"\n",
" match_pair = re.match(r'^([0-9]+),([0-9]+)$', stripped_line)\n",
" if match_pair:\n",
" left = int(match_pair.group(1))\n",
" right = int(match_pair.group(2))\n",
" current_info.set_of_pairs.add((left, right))\n",
" continue\n",
"\n",
" print(f'Unrecognized line \\'{stripped_line}\\'')\n",
"\n",
" return model\n",
"\n",
"\n",
"def print_stats(model):\n",
" print('Model Statistics')\n",
" for key, value in model.items():\n",
" print(f' - {key}: {value}')\n",
"\n",
"\n",
"def solve_model_greedily(model):\n",
" \"\"\"Compute a greedy solution.\"\"\"\n",
" print('Solving using a Greedy heuristics')\n",
"\n",
" num_tasks = model['number of tasks'].value\n",
" all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.\n",
" precedences = model['precedence relations'].set_of_pairs\n",
" durations = model['task times'].index_map\n",
" cycle_time = model['cycle time'].value\n",
"\n",
" weights = collections.defaultdict(int)\n",
" successors = collections.defaultdict(list)\n",
"\n",
" candidates = set(all_tasks)\n",
"\n",
" for before, after in precedences:\n",
" weights[after] += 1\n",
" successors[before].append(after)\n",
" if after in candidates:\n",
" candidates.remove(after)\n",
"\n",
" assignment = {}\n",
" current_pod = 0\n",
" residual_capacity = cycle_time\n",
"\n",
" while len(assignment) < num_tasks:\n",
" if not candidates:\n",
" print('error empty')\n",
" break\n",
"\n",
" best = -1\n",
" best_slack = cycle_time\n",
" best_duration = 0\n",
"\n",
" for c in candidates:\n",
" duration = durations[c]\n",
" slack = residual_capacity - duration\n",
" if slack < best_slack and slack >= 0:\n",
" best_slack = slack\n",
" best = c\n",
" best_duration = duration\n",
"\n",
" if best == -1:\n",
" current_pod += 1\n",
" residual_capacity = cycle_time\n",
" continue\n",
"\n",
" candidates.remove(best)\n",
" assignment[best] = current_pod\n",
" residual_capacity -= best_duration\n",
"\n",
" for succ in successors[best]:\n",
" weights[succ] -= 1\n",
" if weights[succ] == 0:\n",
" candidates.add(succ)\n",
" del weights[succ]\n",
"\n",
" print(f' greedy solution uses {current_pod + 1} pods.')\n",
"\n",
" return assignment\n",
"\n",
"\n",
"def solve_boolean_model(model, hint):\n",
" \"\"\"Solve the given model.\"\"\"\n",
"\n",
" print('Solving using the Boolean model')\n",
" # Model data\n",
" num_tasks = model['number of tasks'].value\n",
" all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the model.\n",
" durations = model['task times'].index_map\n",
" precedences = model['precedence relations'].set_of_pairs\n",
" cycle_time = model['cycle time'].value\n",
"\n",
" num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks - 1\n",
" all_pods = range(num_pods)\n",
"\n",
" model = cp_model.CpModel()\n",
"\n",
" # assign[t, p] indicates if task t is done on pod p.\n",
" assign = {}\n",
" # possible[t, p] indicates if task t is possible on pod p.\n",
" possible = {}\n",
"\n",
" # Create the variables\n",
" for t in all_tasks:\n",
" for p in all_pods:\n",
" assign[t, p] = model.NewBoolVar(f'assign_{t}_{p}')\n",
" possible[t, p] = model.NewBoolVar(f'possible_{t}_{p}')\n",
"\n",
" # active[p] indicates if pod p is active.\n",
" active = [model.NewBoolVar(f'active_{p}') for p in all_pods]\n",
"\n",
" # Each task is done on exactly one pod.\n",
" for t in all_tasks:\n",
" model.AddExactlyOne([assign[t, p] for p in all_pods])\n",
"\n",
" # Total tasks assigned to one pod cannot exceed cycle time.\n",
" for p in all_pods:\n",
" model.Add(\n",
" sum(assign[t, p] * durations[t] for t in all_tasks) <= cycle_time)\n",
"\n",
" # Maintain the possible variables:\n",
" # possible at pod p -> possible at any pod after p\n",
" for t in all_tasks:\n",
" for p in range(num_pods - 1):\n",
" model.AddImplication(possible[t, p], possible[t, p + 1])\n",
"\n",
" # Link possible and active variables.\n",
" for t in all_tasks:\n",
" for p in all_pods:\n",
" model.AddImplication(assign[t, p], possible[t, p])\n",
" if p > 1:\n",
" model.AddImplication(assign[t, p], possible[t, p - 1].Not())\n",
"\n",
" # Precedences.\n",
" for before, after in precedences:\n",
" for p in range(1, num_pods):\n",
" model.AddImplication(assign[before, p], possible[after,\n",
" p - 1].Not())\n",
"\n",
" # Link active variables with the assign one.\n",
" for p in all_pods:\n",
" all_assign_vars = [assign[t, p] for t in all_tasks]\n",
" for a in all_assign_vars:\n",
" model.AddImplication(a, active[p])\n",
" model.AddBoolOr(all_assign_vars + [active[p].Not()])\n",
"\n",
" # Force pods to be contiguous. This is critical to get good lower bounds\n",
" # on the objective, even if it makes feasibility harder.\n",
" for p in range(1, num_pods):\n",
" model.AddImplication(active[p - 1].Not(), active[p].Not())\n",
" for t in all_tasks:\n",
" model.AddImplication(active[p].Not(), possible[t, p - 1])\n",
"\n",
" # Objective.\n",
" model.Minimize(sum(active))\n",
"\n",
" # Add search hinting from the greedy solution.\n",
" for t in all_tasks:\n",
" model.AddHint(assign[t, hint[t]], 1)\n",
"\n",
" if FLAGS.output_proto:\n",
" print(f'Writing proto to {FLAGS.output_proto}')\n",
" model.ExportToFile(FLAGS.output_proto)\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" solver.parameters.log_search_progress = True\n",
" solver.Solve(model)\n",
"\n",
"\n",
"def solve_scheduling_model(model, hint):\n",
" \"\"\"Solve the given model using a cumutive model.\"\"\"\n",
"\n",
" print('Solving using the scheduling model')\n",
" # Model data\n",
" num_tasks = model['number of tasks'].value\n",
" all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.\n",
" durations = model['task times'].index_map\n",
" precedences = model['precedence relations'].set_of_pairs\n",
" cycle_time = model['cycle time'].value\n",
"\n",
" num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks\n",
"\n",
" model = cp_model.CpModel()\n",
"\n",
" # pod[t] indicates on which pod the task is performed.\n",
" pods = {}\n",
" for t in all_tasks:\n",
" pods[t] = model.NewIntVar(0, num_pods - 1, f'pod_{t}')\n",
"\n",
" # Create the variables\n",
" intervals = []\n",
" demands = []\n",
" for t in all_tasks:\n",
" interval = model.NewFixedSizeIntervalVar(pods[t], 1, '')\n",
" intervals.append(interval)\n",
" demands.append(durations[t])\n",
"\n",
" # Add terminating interval as the objective.\n",
" obj_var = model.NewIntVar(1, num_pods, 'obj_var')\n",
" obj_size = model.NewIntVar(1, num_pods, 'obj_duration')\n",
" obj_interval = model.NewIntervalVar(obj_var, obj_size, num_pods + 1,\n",
" 'obj_interval')\n",
" intervals.append(obj_interval)\n",
" demands.append(cycle_time)\n",
"\n",
" # Cumulative constraint.\n",
" model.AddCumulative(intervals, demands, cycle_time)\n",
"\n",
" # Precedences.\n",
" for before, after in precedences:\n",
" model.Add(pods[after] >= pods[before])\n",
"\n",
" # Objective.\n",
" model.Minimize(obj_var)\n",
"\n",
" # Add search hinting from the greedy solution.\n",
" for t in all_tasks:\n",
" model.AddHint(pods[t], hint[t])\n",
"\n",
" if FLAGS.output_proto:\n",
" print(f'Writing proto to{FLAGS.output_proto}')\n",
" model.ExportToFile(FLAGS.output_proto)\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" if FLAGS.params:\n",
" text_format.Parse(FLAGS.params, solver.parameters)\n",
" solver.parameters.log_search_progress = True\n",
" solver.parameters.exploit_all_precedences = True # Helps with the lower bound.\n",
" solver.Solve(model)\n",
"\n",
"\n",
"def main(argv: Sequence[str]) -> None:\n",
" if len(argv) > 1:\n",
" raise app.UsageError('Too many command-line arguments.')\n",
" if FLAGS.input == '':\n",
" raise app.UsageError('Missing input file.')\n",
"\n",
" model = read_model(FLAGS.input)\n",
" print_stats(model)\n",
" greedy_solution = solve_model_greedily(model)\n",
"\n",
" if FLAGS.model == 'boolean':\n",
" solve_boolean_model(model, greedy_solution)\n",
" elif FLAGS.model == 'scheduling':\n",
" solve_scheduling_model(model, greedy_solution)\n",
"\n",
"\n",
"main()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -234,15 +234,17 @@
"\n",
" # Solve and print out the solution.\n",
" solver = cp_model.CpSolver()\n",
" # To benefit from the linearization of the circuit constraint.\n",
" solver.parameters.linearization_level = 2\n",
" solver.parameters.num_search_workers = 8\n",
" solver.parameters.max_time_in_seconds = 15.0\n",
" #solver.parameters.log_search_progress = True\n",
"\n",
" solver.Solve(model)\n",
" #print(solver.ResponseStats())\n",
" print_solution(solver, visited_nodes, used_arcs, num_nodes, num_vehicles)\n",
"\n",
" status = solver.Solve(model)\n",
" if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:\n",
" print(f'search returned with the status {solver.StatusName(status)}')\n",
" print_solution(solver, visited_nodes, used_arcs,\n",
" num_nodes, num_vehicles)\n",
" else:\n",
" print(solver.ResponseStats())\n",
"\n",
"\n",
"main()\n",

View File

@@ -0,0 +1,118 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2022 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# proto_solve"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/examples/proto_solve.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/examples/python/proto_solve.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"!pip install ortools"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"from ortools.linear_solver.python import model_builder\n",
"\n",
"class FLAGS: pass\n",
"\n",
"_INPUT = flags.DEFINE_string('input', '', 'Input file to load and solve.')\n",
"_PARAMS = flags.DEFINE_string('params', '', 'Solver parameters in string format.')\n",
"_SOLVER = flags.DEFINE_string('solver', 'sat', 'Solver type to solve the model with.')\n",
"\n",
"\n",
"def main(_):\n",
" model = model_builder.ModelBuilder()\n",
"\n",
" # Load MPS file.\n",
" if not model.import_from_mps_file(_INPUT.value):\n",
" print(f'Cannot import MPS file: \\'{_INPUT.value}\\'')\n",
" return\n",
"\n",
" # Create solver.\n",
" solver = model_builder.ModelSolver(_SOLVER.value)\n",
" if not solver:\n",
" print(f'Cannot create solver with name \\'{_SOLVER.value}\\'')\n",
" return\n",
"\n",
" # Set parameters.\n",
" if _PARAMS.value:\n",
" solver.set_solver_specific_parameters(_PARAMS.value)\n",
"\n",
" # Enable the output of the solver.\n",
" solver.enable_output(True)\n",
"\n",
" # And solve.\n",
" solver.solve(model)\n",
"\n",
"\n",
"main()\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -72,7 +72,15 @@
"id": "description",
"metadata": {},
"source": [
"Sat based solver for the RCPSP problems (see rcpsp.proto).\n"
"Sat based solver for the RCPSP problems (see rcpsp.proto).\n",
"\n",
"Introduction to the problem:\n",
" https://www.projectmanagement.ugent.be/research/project_scheduling/rcpsp\n",
"\n",
"Data use in flags:\n",
" http://www.om-db.wi.tum.de/psplib/data.html\n",
"\n",
"\n"
]
},
{
@@ -87,15 +95,31 @@
"from google.protobuf import text_format\n",
"from ortools.sat.python import cp_model\n",
"from ortools.scheduling import pywraprcpsp\n",
"from ortools.scheduling import rcpsp_pb2\n",
"\n",
"class FLAGS: pass\n",
"\n",
"FLAGS.input = '' # Input file to parse and solve.\n",
"FLAGS.output_proto = '' # Output file to write the cp_model proto to.\n",
"FLAGS.params = '' # Sat solver parameters.\n",
"FLAGS.use_interval_makespan = True # Whether we encode the makespan using an interval or not.\n",
"FLAGS.horizon = -1 # Force horizon.\n",
"FLAGS.use_main_interval_for_tasks = True # Creates a main interval for each task, and use it in precedences\n",
"_INPUT = flags.DEFINE_string('input', '', 'Input file to parse and solve.')\n",
"_OUTPUT_PROTO = flags.DEFINE_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
"_PARAMS = flags.DEFINE_string('params', '', 'Sat solver parameters.')\n",
"_USE_INTERVAL_MAKESPAN = flags.DEFINE_bool(\n",
" 'use_interval_makespan', True,\n",
" 'Whether we encode the makespan using an interval or not.')\n",
"_HORIZON = flags.DEFINE_integer('horizon', -1, 'Force horizon.')\n",
"_ADD_REDUNDANT_ENERGETIC_CONSTRAINTS = flags.DEFINE_bool(\n",
" 'add_redundant_energetic_constraints', False,\n",
" 'Add redundant energetic constraints on the pairs of tasks extracted from' +\n",
" ' precedence graph.')\n",
"_DELAY_TIME_LIMIT = flags.DEFINE_float(\n",
" 'delay_time_limit', 0.0,\n",
" 'Time limit when computing min delay between tasks.' +\n",
" ' A non-positive time limit disable min delays computation.')\n",
"_PREEMPTIVE_LB_TIME_LIMIT = flags.DEFINE_float(\n",
" 'preemptive_lb_time_limit', 0.0,\n",
" 'Time limit when computing a preemptive schedule lower bound.' +\n",
" ' A non-positive time limit disable this computation.')\n",
"\n",
"\n",
"def PrintProblemStatistics(problem):\n",
" \"\"\"Display various statistics on the problem.\"\"\"\n",
@@ -145,24 +169,135 @@
" print(f' - {tasks_with_delay} tasks with successor delays')\n",
"\n",
"\n",
"def SolveRcpsp(problem, proto_file, params):\n",
" \"\"\"Parse and solve a given RCPSP problem in proto format.\"\"\"\n",
" PrintProblemStatistics(problem)\n",
"def AnalyseDependencyGraph(problem):\n",
" \"\"\"Analyses the dependency graph to improve the model.\n",
"\n",
" Args:\n",
" problem: the protobuf of the problem to solve.\n",
"\n",
" Returns:\n",
" a list of (task1, task2, in_between_tasks) with task2 and indirect successor\n",
" of task1, and in_between_tasks being the list of all tasks after task1 and\n",
" before task2.\n",
" \"\"\"\n",
"\n",
" num_nodes = len(problem.tasks)\n",
" print(f'Analysing the dependency graph over {num_nodes} nodes')\n",
"\n",
" ins = collections.defaultdict(list)\n",
" outs = collections.defaultdict(list)\n",
" after = collections.defaultdict(set)\n",
" before = collections.defaultdict(set)\n",
"\n",
" # Build the transitive closure of the precedences.\n",
" # This algorithm has the wrong complexity (n^4), but is OK for the psplib\n",
" # as the biggest example has 120 nodes.\n",
" for n in range(num_nodes):\n",
" for s in problem.tasks[n].successors:\n",
" ins[s].append(n)\n",
" outs[n].append(s)\n",
"\n",
" for a in list(after[s]) + [s]:\n",
" for b in list(before[n]) + [n]:\n",
" after[b].add(a)\n",
" before[a].add(b)\n",
"\n",
" # Search for pair of tasks, containing at least two parallel branch between\n",
" # them in the precedence graph.\n",
" num_candidates = 0\n",
" result = []\n",
" for source, start_outs in outs.items():\n",
" if len(start_outs) <= 1:\n",
" # Starting with the unique successor of source will be as good.\n",
" continue\n",
" for sink, end_ins in ins.items():\n",
" if len(end_ins) <= 1:\n",
" # Ending with the unique predecessor of sink will be as good.\n",
" continue\n",
" if sink == source:\n",
" continue\n",
" if sink not in after[source]:\n",
" continue\n",
"\n",
" num_active_outgoing_branches = 0\n",
" num_active_incoming_branches = 0\n",
" for succ in outs[source]:\n",
" if sink in after[succ]:\n",
" num_active_outgoing_branches += 1\n",
" for pred in ins[sink]:\n",
" if source in before[pred]:\n",
" num_active_incoming_branches += 1\n",
"\n",
" if num_active_outgoing_branches <= 1 or num_active_incoming_branches <= 1:\n",
" continue\n",
"\n",
" common = after[source].intersection(before[sink])\n",
" if len(common) <= 1:\n",
" continue\n",
" num_candidates += 1\n",
" result.append((source, sink, common))\n",
"\n",
" # Sort entries lexicographically by (len(common), source, sink)\n",
" def Price(entry):\n",
" return (num_nodes * num_nodes * len(entry[2]) + num_nodes * entry[0] +\n",
" entry[1])\n",
"\n",
" result.sort(key=Price)\n",
" print(f' - created {len(result)} pairs of nodes to examine', flush=True)\n",
" return result, after\n",
"\n",
"\n",
"def SolveRcpsp(problem,\n",
" proto_file,\n",
" params,\n",
" active_tasks,\n",
" source,\n",
" sink,\n",
" intervals_of_tasks,\n",
" delays,\n",
" in_main_solve=False,\n",
" initial_solution=None,\n",
" lower_bound=0):\n",
" \"\"\"Parse and solve a given RCPSP problem in proto format.\n",
"\n",
" The model will only look at the tasks {source} + {sink} + active_tasks, and\n",
" ignore all others.\n",
"\n",
" Args:\n",
" problem: the description of the model to solve in protobuf format\n",
" proto_file: the name of the file to export the CpModel proto to.\n",
" params: the string representation of the parameters to pass to the sat\n",
" solver.\n",
" active_tasks: the set of active tasks to consider.\n",
" source: the source task in the graph. Its end will be forced to 0.\n",
" sink: the sink task of the graph. Its start is the makespan of the problem.\n",
" intervals_of_tasks: a heuristic lists of (task1, task2, tasks) used to add\n",
" redundant energetic equations to the model.\n",
" delays: a list of (task1, task2, min_delays) used to add extended precedence\n",
" constraints (start(task2) >= end(task1) + min_delay).\n",
" in_main_solve: indicates if this is the main solve procedure.\n",
" initial_solution: A valid assignment used to hint the search.\n",
" lower_bound: A valid lower bound of the makespan objective.\n",
"\n",
" Returns:\n",
" (lower_bound of the objective, best solution found, asssignment)\n",
" \"\"\"\n",
" # Create the model.\n",
" model = cp_model.CpModel()\n",
" model.SetName(problem.name)\n",
"\n",
" num_tasks = len(problem.tasks)\n",
" num_resources = len(problem.resources)\n",
"\n",
" all_active_tasks = range(1, num_tasks - 1)\n",
" all_active_tasks = list(active_tasks)\n",
" all_active_tasks.sort()\n",
" all_resources = range(num_resources)\n",
"\n",
" horizon = problem.deadline if problem.deadline != -1 else problem.horizon\n",
" if FLAGS.horizon > 0:\n",
" horizon = FLAGS.horizon\n",
" if horizon == -1: # Naive computation.\n",
" if _HORIZON.value > 0:\n",
" horizon = _HORIZON.value\n",
" elif delays and in_main_solve and (source, sink) in delays:\n",
" horizon = delays[(source, sink)][1]\n",
" elif horizon == -1: # Naive computation.\n",
" horizon = sum(max(r.duration for r in t.recipes) for t in problem.tasks)\n",
" if problem.is_rcpsp_max:\n",
" for t in problem.tasks:\n",
@@ -170,18 +305,21 @@
" for rd in sd.recipe_delays:\n",
" for d in rd.min_delays:\n",
" horizon += abs(d)\n",
" print(f' - horizon = {horizon}')\n",
" if in_main_solve:\n",
" print(f'Horizon = {horizon}', flush=True)\n",
"\n",
" # Containers.\n",
" task_starts = {}\n",
" task_ends = {}\n",
" task_durations = {}\n",
" task_intervals = {}\n",
" task_resource_to_energy = {}\n",
" task_to_resource_demands = collections.defaultdict(list)\n",
"\n",
" task_to_presence_literals = collections.defaultdict(list)\n",
" task_to_recipe_durations = collections.defaultdict(list)\n",
" task_resource_to_fixed_demands = collections.defaultdict(dict)\n",
" task_resource_to_max_energy = collections.defaultdict(int)\n",
"\n",
" resource_to_sum_of_demand_max = collections.defaultdict(int)\n",
"\n",
@@ -239,25 +377,33 @@
" task_to_presence_literals[t] = literals\n",
"\n",
" # Create the demand variable of the task for each resource.\n",
" for resource in all_resources:\n",
" demands = [\n",
" demand_matrix[(resource, recipe)] for recipe in all_recipes\n",
" ]\n",
" task_resource_to_fixed_demands[(t, resource)] = demands\n",
" for res in all_resources:\n",
" demands = [demand_matrix[(res, recipe)] for recipe in all_recipes]\n",
" task_resource_to_fixed_demands[(t, res)] = demands\n",
" demand_var = model.NewIntVarFromDomain(\n",
" cp_model.Domain.FromValues(demands), f'demand_{t}_{resource}')\n",
" cp_model.Domain.FromValues(demands), f'demand_{t}_{res}')\n",
" task_to_resource_demands[t].append(demand_var)\n",
"\n",
" # Link the recipe literals and the demand_var.\n",
" for r in all_recipes:\n",
" model.Add(demand_var == demand_matrix[(resource,\n",
" r)]).OnlyEnforceIf(\n",
" literals[r])\n",
" model.Add(demand_var == demand_matrix[(res, r)]).OnlyEnforceIf(\n",
" literals[r])\n",
"\n",
" resource_to_sum_of_demand_max[resource] += max(demands)\n",
" resource_to_sum_of_demand_max[res] += max(demands)\n",
"\n",
" # Create the energy expression for (task, resource):\n",
" for res in all_resources:\n",
" task_resource_to_energy[(t, res)] = sum(\n",
" literals[r] * task_to_recipe_durations[t][r] *\n",
" task_resource_to_fixed_demands[(t, res)][r]\n",
" for r in all_recipes)\n",
" task_resource_to_max_energy[(t, res)] = max(\n",
" task_to_recipe_durations[t][r] *\n",
" task_resource_to_fixed_demands[(t, res)][r]\n",
" for r in all_recipes)\n",
"\n",
" # Create makespan variable\n",
" makespan = model.NewIntVar(0, horizon, 'makespan')\n",
" makespan = model.NewIntVar(lower_bound, horizon, 'makespan')\n",
" makespan_size = model.NewIntVar(1, horizon, 'interval_makespan_size')\n",
" interval_makespan = model.NewIntervalVar(makespan, makespan_size,\n",
" model.NewConstant(horizon + 1),\n",
@@ -278,7 +424,7 @@
" for m1 in range(num_modes):\n",
" s1 = task_starts[task_id]\n",
" p1 = task_to_presence_literals[task_id][m1]\n",
" if next_id == num_tasks - 1:\n",
" if next_id == sink:\n",
" delay = delay_matrix.recipe_delays[m1].min_delays[0]\n",
" model.Add(s1 + delay <= makespan).OnlyEnforceIf(p1)\n",
" else:\n",
@@ -292,9 +438,9 @@
" # Normal dependencies (task ends before the start of successors).\n",
" for t in all_active_tasks:\n",
" for n in problem.tasks[t].successors:\n",
" if n == num_tasks - 1:\n",
" if n == sink:\n",
" model.Add(task_ends[t] <= makespan)\n",
" else:\n",
" elif n in active_tasks:\n",
" model.Add(task_ends[t] <= task_starts[n])\n",
"\n",
" # Containers for resource investment problems.\n",
@@ -302,25 +448,27 @@
" max_cost = 0 # Upper bound on the investment cost.\n",
"\n",
" # Create resources.\n",
" for r in all_resources:\n",
" resource = problem.resources[r]\n",
" for res in all_resources:\n",
" resource = problem.resources[res]\n",
" c = resource.max_capacity\n",
" if c == -1:\n",
" print(f'No capacity: {resource}')\n",
" c = resource_to_sum_of_demand_max[r]\n",
" c = resource_to_sum_of_demand_max[res]\n",
"\n",
" # RIP problems have only renewable resources, and no makespan.\n",
" if problem.is_resource_investment or resource.renewable:\n",
" intervals = [task_intervals[t] for t in all_active_tasks]\n",
" demands = [task_to_resource_demands[t][r] for t in all_active_tasks]\n",
" demands = [\n",
" task_to_resource_demands[t][res] for t in all_active_tasks\n",
" ]\n",
"\n",
" if problem.is_resource_investment:\n",
" capacity = model.NewIntVar(0, c, f'capacity_of_{r}')\n",
" capacity = model.NewIntVar(0, c, f'capacity_of_{res}')\n",
" model.AddCumulative(intervals, demands, capacity)\n",
" capacities.append(capacity)\n",
" max_cost += c * resource.unit_cost\n",
" else: # Standard renewable resource.\n",
" if FLAGS.use_interval_makespan:\n",
" if _USE_INTERVAL_MAKESPAN.value:\n",
" intervals.append(interval_makespan)\n",
" demands.append(c)\n",
"\n",
@@ -330,10 +478,10 @@
" reservoir_starts = []\n",
" reservoir_demands = []\n",
" for t in all_active_tasks:\n",
" if task_resource_to_fixed_demands[(t, r)][0]:\n",
" if task_resource_to_fixed_demands[(t, res)][0]:\n",
" reservoir_starts.append(task_starts[t])\n",
" reservoir_demands.append(\n",
" task_resource_to_fixed_demands[(t, r)][0])\n",
" task_resource_to_fixed_demands[(t, res)][0])\n",
" model.AddReservoirConstraint(reservoir_starts,\n",
" reservoir_demands,\n",
" resource.min_capacity,\n",
@@ -341,7 +489,8 @@
" else: # No producer-consumer. We just sum the demands.\n",
" model.Add(\n",
" cp_model.LinearExpr.Sum([\n",
" task_to_resource_demands[t][r] for t in all_active_tasks\n",
" task_to_resource_demands[t][res]\n",
" for t in all_active_tasks\n",
" ]) <= c)\n",
"\n",
" # Objective.\n",
@@ -355,6 +504,73 @@
"\n",
" model.Minimize(objective)\n",
"\n",
" # Add min delay constraints.\n",
" if delays is not None:\n",
" for (local_start, local_end), (min_delay, _) in delays.items():\n",
" if local_start == source and local_end in active_tasks:\n",
" model.Add(task_starts[local_end] >= min_delay)\n",
" elif local_start in active_tasks and local_end == sink:\n",
" model.Add(makespan >= task_ends[local_start] + min_delay)\n",
" elif local_start in active_tasks and local_end in active_tasks:\n",
" model.Add(task_starts[local_end] >= task_ends[local_start] +\n",
" min_delay)\n",
"\n",
" problem_is_single_mode = True\n",
" for t in all_active_tasks:\n",
" if len(task_to_presence_literals[t]) > 1:\n",
" problem_is_single_mode = False\n",
" break\n",
"\n",
" # Add sentinels.\n",
" task_starts[source] = 0\n",
" task_ends[source] = 0\n",
" task_to_presence_literals[0].append(True)\n",
" task_starts[sink] = makespan\n",
" task_to_presence_literals[sink].append(True)\n",
"\n",
" # For multi-mode problems, add a redundant energetic constraint:\n",
" # for every (start, end, in_between_tasks) extracted from the precedence\n",
" # graph, it add the energetic relaxation:\n",
" # (start_var('end') - end_var('start')) * capacity_max >=\n",
" # sum of linearized energies of all tasks from 'in_between_tasks'\n",
" if (not problem.is_resource_investment and\n",
" not problem.is_consumer_producer and\n",
" _ADD_REDUNDANT_ENERGETIC_CONSTRAINTS.value and in_main_solve and\n",
" not problem_is_single_mode):\n",
" added_constraints = 0\n",
" ignored_constraits = 0\n",
" for local_start, local_end, common in intervals_of_tasks:\n",
" for res in all_resources:\n",
" resource = problem.resources[res]\n",
" if not resource.renewable:\n",
" continue\n",
" c = resource.max_capacity\n",
" if delays and (local_start, local_end) in delays:\n",
" min_delay, _ = delays[local_start, local_end]\n",
" sum_of_max_energies = sum(\n",
" task_resource_to_max_energy[(t, res)] for t in common)\n",
" if sum_of_max_energies <= c * min_delay:\n",
" ignored_constraits += 1\n",
" continue\n",
" model.Add(\n",
" c *\n",
" (task_starts[local_end] - task_ends[local_start]) >= sum(\n",
" task_resource_to_energy[(t, res)] for t in common))\n",
" added_constraints += 1\n",
" print(\n",
" f'Added {added_constraints} redundant energetic constraints, and ' +\n",
" f'ignored {ignored_constraits} constraints.',\n",
" flush=True)\n",
"\n",
" # Add solution hint.\n",
" if initial_solution:\n",
" for t in all_active_tasks:\n",
" model.AddHint(task_starts[t], initial_solution.start_of_task[t])\n",
" if len(task_to_presence_literals[t]) > 1:\n",
" selected = initial_solution.selected_recipe_of_task[t]\n",
" model.AddHint(task_to_presence_literals[t][selected], 1)\n",
"\n",
" # Write model to file.\n",
" if proto_file:\n",
" print(f'Writing proto to{proto_file}')\n",
" with open(proto_file, 'w') as text_file:\n",
@@ -364,14 +580,218 @@
" solver = cp_model.CpSolver()\n",
" if params:\n",
" text_format.Parse(params, solver.parameters)\n",
" solver.parameters.log_search_progress = True\n",
" solver.Solve(model)\n",
" if in_main_solve:\n",
" solver.parameters.log_search_progress = True\n",
" status = solver.Solve(model)\n",
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
" assignment = rcpsp_pb2.RcpspAssignment()\n",
" for t in range(len(problem.tasks)):\n",
" if t in task_starts:\n",
" assignment.start_of_task.append(solver.Value(task_starts[t]))\n",
" for r in range(len(task_to_presence_literals[t])):\n",
" if solver.BooleanValue(task_to_presence_literals[t][r]):\n",
" assignment.selected_recipe_of_task.append(r)\n",
" break\n",
" else: # t is not an active task.\n",
" assignment.start_of_task.append(0)\n",
" assignment.selected_recipe_of_task.append(0)\n",
" return (int(solver.BestObjectiveBound()), int(solver.ObjectiveValue()),\n",
" assignment)\n",
" return -1, -1, None\n",
"\n",
"\n",
"def ComputeDelaysBetweenNodes(problem, task_intervals):\n",
" \"\"\"Computes the min delays between all pairs of tasks in 'task_intervals'.\n",
"\n",
" Args:\n",
" problem: The protobuf of the model.\n",
" task_intervals: The output of the AnalysePrecedenceGraph().\n",
"\n",
" Returns:\n",
" a list of (task1, task2, min_delay_between_task1_and_task2)\n",
" \"\"\"\n",
" print('Computing the minimum delay between pairs of intervals')\n",
" delays = {}\n",
" if (problem.is_resource_investment or problem.is_consumer_producer or\n",
" problem.is_rcpsp_max or _DELAY_TIME_LIMIT.value <= 0.0):\n",
" return delays, None, False\n",
"\n",
" complete_problem_assignment = None\n",
" num_optimal_delays = 0\n",
" num_delays_not_found = 0\n",
" optimal_found = True\n",
" for start_task, end_task, active_tasks in task_intervals:\n",
" min_delay, feasible_delay, assignment = SolveRcpsp(\n",
" problem, '',\n",
" f'num_search_workers:16,max_time_in_seconds:{_DELAY_TIME_LIMIT.value}',\n",
" active_tasks, start_task, end_task, [], delays)\n",
" if min_delay != -1:\n",
" delays[(start_task, end_task)] = min_delay, feasible_delay\n",
" if start_task == 0 and end_task == len(problem.tasks) - 1:\n",
" complete_problem_assignment = assignment\n",
" if min_delay == feasible_delay:\n",
" num_optimal_delays += 1\n",
" else:\n",
" optimal_found = False\n",
" else:\n",
" num_delays_not_found += 1\n",
" optimal_found = False\n",
"\n",
" print(f' - #optimal delays = {num_optimal_delays}', flush=True)\n",
" if num_delays_not_found:\n",
" print(f' - #not computed delays = {num_delays_not_found}', flush=True)\n",
"\n",
" return delays, complete_problem_assignment, optimal_found\n",
"\n",
"\n",
"def AcceptNewCandidate(problem, after, demand_map, current, candidate):\n",
" \"\"\"Check if candidate is compatible with the tasks in current.\"\"\"\n",
" for c in current:\n",
" if candidate in after[c] or c in after[candidate]:\n",
" return False\n",
"\n",
" all_resources = range(len(problem.resources))\n",
" for res in all_resources:\n",
" resource = problem.resources[res]\n",
" if not resource.renewable:\n",
" continue\n",
" if (sum(demand_map[(t, res)] for t in current) +\n",
" demand_map[(candidate, res)] > resource.max_capacity):\n",
" return False\n",
"\n",
" return True\n",
"\n",
"\n",
"def ComputePreemptiveLowerBound(problem, after, lower_bound):\n",
" \"\"\"Computes a preemtive lower bound for the makespan statically.\n",
"\n",
" For this, it breaks all intervals into a set of intervals of size one.\n",
" Then it will try to assign all of them in a minimum number of configurations.\n",
" This is a standard complete set covering using column generation approach\n",
" where each column is a possible combination of itervals of size one.\n",
"\n",
" Args:\n",
" problem: The probuf of the model.\n",
" after: a task to list of task dict that contains all tasks after a given\n",
" task.\n",
" lower_bound: A valid lower bound of the problem. It can be 0.\n",
"\n",
" Returns:\n",
" a valid lower bound of the problem.\n",
" \"\"\"\n",
" # Check this is a single mode problem.\n",
" if (problem.is_rcpsp_max or problem.is_resource_investment or\n",
" problem.is_consumer_producer):\n",
" return lower_bound\n",
"\n",
" demand_map = collections.defaultdict(int)\n",
" duration_map = {}\n",
" all_active_tasks = list(range(1, len(problem.tasks) - 1))\n",
" max_duration = 0\n",
" sum_of_demands = 0\n",
"\n",
" for t in all_active_tasks:\n",
" task = problem.tasks[t]\n",
" if len(task.recipes) > 1:\n",
" return 0\n",
" recipe = task.recipes[0]\n",
" duration_map[t] = recipe.duration\n",
" for demand, resource in zip(recipe.demands, recipe.resources):\n",
" demand_map[(t, resource)] = demand\n",
" max_duration = max(max_duration, recipe.duration)\n",
" sum_of_demands += demand\n",
"\n",
" print(f'Compute a bin-packing lower bound with {len(all_active_tasks)}' +\n",
" ' active tasks',\n",
" flush=True)\n",
" all_combinations = []\n",
"\n",
" for t in all_active_tasks:\n",
" new_combinations = [[t]]\n",
"\n",
" for c in all_combinations:\n",
" if AcceptNewCandidate(problem, after, demand_map, c, t):\n",
" new_combinations.append(c + [t])\n",
"\n",
" all_combinations.extend(new_combinations)\n",
"\n",
" print(f' - created {len(all_combinations)} combinations')\n",
" if len(all_combinations) > 5000000:\n",
" return lower_bound # Abort if too large.\n",
"\n",
" # Solve the selection model.\n",
"\n",
" # TODO(user): a few possible improvements:\n",
" # 1/ use \"dominating\" columns, i.e. if you can add a task to a column, then\n",
" # do not use that column.\n",
" # 2/ Merge all task with exactly same demands into one.\n",
" model = cp_model.CpModel()\n",
" model.SetName(f'lower_bound_{problem.name}')\n",
"\n",
" vars_per_task = collections.defaultdict(list)\n",
" all_vars = []\n",
" for c in all_combinations:\n",
" min_duration = max_duration\n",
" for t in c:\n",
" min_duration = min(min_duration, duration_map[t])\n",
" count = model.NewIntVar(0, min_duration, f'count_{t}')\n",
" all_vars.append(count)\n",
" for t in c:\n",
" vars_per_task[t].append(count)\n",
"\n",
" # Each task must be performed.\n",
" for t in all_active_tasks:\n",
" model.Add(sum(vars_per_task[t]) >= duration_map[t])\n",
"\n",
" # Objective\n",
" objective_var = model.NewIntVar(lower_bound, sum_of_demands,\n",
" 'objective_var')\n",
" model.Add(objective_var == sum(all_vars))\n",
"\n",
" model.Minimize(objective_var)\n",
"\n",
" # Solve model.\n",
" solver = cp_model.CpSolver()\n",
" solver.parameters.num_search_workers = 16\n",
" solver.parameters.max_time_in_seconds = _PREEMPTIVE_LB_TIME_LIMIT.value\n",
" status = solver.Solve(model)\n",
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
" status_str = 'optimal' if status == cp_model.OPTIMAL else ''\n",
" lower_bound = max(lower_bound, int(solver.BestObjectiveBound()))\n",
" print(f' - {status_str} static lower bound = {lower_bound}',\n",
" flush=True)\n",
"\n",
" return lower_bound\n",
"\n",
"\n",
"def main(_):\n",
" rcpsp_parser = pywraprcpsp.RcpspParser()\n",
" rcpsp_parser.ParseFile(FLAGS.input)\n",
" SolveRcpsp(rcpsp_parser.Problem(), FLAGS.output_proto, FLAGS.params)\n",
" rcpsp_parser.ParseFile(_INPUT.value)\n",
"\n",
" problem = rcpsp_parser.Problem()\n",
" PrintProblemStatistics(problem)\n",
"\n",
" intervals_of_tasks, after = AnalyseDependencyGraph(problem)\n",
" delays, initial_solution, optimal_found = ComputeDelaysBetweenNodes(\n",
" problem, intervals_of_tasks)\n",
"\n",
" last_task = len(problem.tasks) - 1\n",
" key = (0, last_task)\n",
" lower_bound = delays[key][0] if key in delays else 0\n",
" if not optimal_found and _PREEMPTIVE_LB_TIME_LIMIT.value > 0.0:\n",
" lower_bound = ComputePreemptiveLowerBound(problem, after, lower_bound)\n",
"\n",
" SolveRcpsp(problem=problem,\n",
" proto_file=_OUTPUT_PROTO.value,\n",
" params=_PARAMS.value,\n",
" active_tasks=set(range(1, last_task)),\n",
" source=0,\n",
" sink=last_task,\n",
" intervals_of_tasks=intervals_of_tasks,\n",
" delays=delays,\n",
" in_main_solve=True,\n",
" initial_solution=initial_solution,\n",
" lower_bound=lower_bound)\n",
"\n",
"\n",
"main()\n",

View File

@@ -85,10 +85,11 @@
"from ortools.sat.python import cp_model\n",
"from google.protobuf import text_format\n",
"\n",
"class FLAGS: pass\n",
"_OUTPUT_PROTO = flags.DEFINE_string(\n",
" 'output_proto', '', 'Output file to write the cp_model proto to.')\n",
"_PARAMS = flags.DEFINE_string('params', 'max_time_in_seconds:10.0',\n",
" 'Sat solver parameters.')\n",
"\n",
"FLAGS.output_proto = '' # Output file to write the cp_model proto to.\n",
"FLAGS.params = 'max_time_in_seconds:10.0' # Sat solver parameters.\n",
"\n",
"def negated_bounded_span(works, start, length):\n",
" \"\"\"Filters an isolated sub-sequence of variables assined to True.\n",
@@ -123,8 +124,8 @@
" soft_max, hard_max, max_cost, prefix):\n",
" \"\"\"Sequence constraint on true variables with soft and hard bounds.\n",
"\n",
" This constraint look at every maximal contiguous sequence of variables\n",
" assigned to true. If forbids sequence of length < hard_min or > hard_max.\n",
" This constraint looks at every maximal contiguous sequence of variables\n",
" assigned to true. It forbids sequence of length < hard_min or > hard_max.\n",
" Then it creates penalty terms if the length is < soft_min or > soft_max.\n",
"\n",
" Args:\n",
@@ -484,7 +485,7 @@
"\n",
"\n",
"def main(_):\n",
" solve_shift_scheduling(FLAGS.params, FLAGS.output_proto)\n",
" solve_shift_scheduling(_PARAMS.value, _OUTPUT_PROTO.value)\n",
"\n",
"\n",
"main()\n",

View File

@@ -89,11 +89,13 @@
"from ortools.linear_solver import pywraplp\n",
"from ortools.sat.python import cp_model\n",
"\n",
"class FLAGS: pass\n",
"_PROBLEM = flags.DEFINE_integer('problem', 2, 'Problem id to solve.')\n",
"_BREAK_SYMMETRIES = flags.DEFINE_boolean(\n",
" 'break_symmetries', True, 'Break symmetries between equivalent orders.')\n",
"_SOLVER = flags.DEFINE_string(\n",
" 'solver', 'mip_column', 'Method used to solve: sat, sat_table, sat_column, '\n",
" 'mip_column.')\n",
"\n",
"FLAGS.problem = 2 # Problem id to solve.\n",
"FLAGS.break_symmetries = True # Break symmetries between equivalent orders.\n",
"FLAGS.solver = 'mip_column' # Method used to solve: sat, sat_table, sat_column, mip_column.\n",
"\n",
"def build_problem(problem_id):\n",
" \"\"\"Build problem data.\"\"\"\n",
@@ -798,15 +800,16 @@
" print('No solution')\n",
"\n",
"\n",
"def main(_=None):\n",
" if FLAGS.solver == 'sat':\n",
" steel_mill_slab(FLAGS.problem, FLAGS.break_symmetries)\n",
" elif FLAGS.solver == 'sat_table':\n",
" steel_mill_slab_with_valid_slabs(FLAGS.problem, FLAGS.break_symmetries)\n",
" elif FLAGS.solver == 'sat_column':\n",
" steel_mill_slab_with_column_generation(FLAGS.problem)\n",
"def main(_):\n",
" if _SOLVER.value == 'sat':\n",
" steel_mill_slab(_PROBLEM.value, _BREAK_SYMMETRIES.value)\n",
" elif _SOLVER.value == 'sat_table':\n",
" steel_mill_slab_with_valid_slabs(_PROBLEM.value,\n",
" _BREAK_SYMMETRIES.value)\n",
" elif _SOLVER.value == 'sat_column':\n",
" steel_mill_slab_with_column_generation(_PROBLEM.value)\n",
" else: # 'mip_column'\n",
" steel_mill_slab_with_mip_column_generation(FLAGS.problem)\n",
" steel_mill_slab_with_mip_column_generation(_PROBLEM.value)\n",
"\n",
"\n",
"main()\n",

View File

@@ -83,10 +83,10 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.graph.python import linear_sum_assignment\n",
"\n",
"import numpy as np\n",
"\n",
"from ortools.graph.python import linear_sum_assignment\n",
"\n",
"\n",
"def main():\n",
" \"\"\"Linear Sum Assignment example.\"\"\"\n",
@@ -99,8 +99,10 @@
" [45, 110, 95, 115],\n",
" ])\n",
"\n",
" # Let's transform this into 3 parallel vectors (start_nodes, end_nodes, arc_costs)\n",
" end_nodes_unraveled, start_nodes_unraveled = np.meshgrid(np.arange(costs.shape[1]),np.arange(costs.shape[0]))\n",
" # Let's transform this into 3 parallel vectors (start_nodes, end_nodes,\n",
" # arc_costs)\n",
" end_nodes_unraveled, start_nodes_unraveled = np.meshgrid(\n",
" np.arange(costs.shape[1]), np.arange(costs.shape[0]))\n",
" start_nodes = start_nodes_unraveled.ravel()\n",
" end_nodes = end_nodes_unraveled.ravel()\n",
" arc_costs = costs.ravel()\n",

View File

@@ -83,10 +83,10 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.graph.python import max_flow\n",
"\n",
"import numpy as np\n",
"\n",
"from ortools.graph.python import max_flow\n",
"\n",
"\n",
"def main():\n",
" \"\"\"MaxFlow simple interface example.\"\"\"\n",
@@ -100,9 +100,9 @@
" end_nodes = np.array([1, 2, 3, 2, 4, 3, 4, 2, 4])\n",
" capacities = np.array([20, 30, 10, 40, 30, 10, 20, 5, 20])\n",
"\n",
" # Add arcs in bulk. \n",
" # Add arcs in bulk.\n",
" # note: we could have used add_arc_with_capacity(start, end, capacity)\n",
" smf.add_arcs_with_capacity(start_nodes, end_nodes, capacities)\n",
" all_arcs = smf.add_arcs_with_capacity(start_nodes, end_nodes, capacities)\n",
"\n",
" # Find the maximum flow between node 0 and node 4.\n",
" status = smf.solve(0, 4)\n",
@@ -113,10 +113,10 @@
" exit(1)\n",
" print('Max flow:', smf.optimal_flow())\n",
" print('')\n",
" print(' Arc Flow / Capacity')\n",
" for i in range(smf.num_arcs()):\n",
" print('%1s -> %1s %3s / %3s' %\n",
" (smf.tail(i), smf.head(i), smf.flow(i), smf.capacity(i)))\n",
" print(' Arc Flow / Capacity')\n",
" solution_flows = smf.flows(all_arcs)\n",
" for arc, flow, capacity in zip(all_arcs, solution_flows, capacities):\n",
" print(f'{smf.tail(arc)} / {smf.head(arc)} {flow:3} / {capacity:3}')\n",
" print('Source side min-cut:', smf.get_source_side_min_cut())\n",
" print('Sink side min-cut:', smf.get_sink_side_min_cut())\n",
"\n",

View File

@@ -83,10 +83,10 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.graph.python import min_cost_flow\n",
"\n",
"import numpy as np\n",
"\n",
"from ortools.graph.python import min_cost_flow\n",
"\n",
"\n",
"def main():\n",
" \"\"\"MinCostFlow simple interface example.\"\"\"\n",
@@ -105,11 +105,11 @@
" supplies = [20, 0, 0, -5, -15]\n",
"\n",
" # Add arcs, capacities and costs in bulk using numpy.\n",
" smcf.add_arcs_with_capacity_and_unit_cost(start_nodes, end_nodes, capacities, unit_costs)\n",
" all_arcs = smcf.add_arcs_with_capacity_and_unit_cost(\n",
" start_nodes, end_nodes, capacities, unit_costs)\n",
"\n",
" # Add node supply.\n",
" for count, supply in enumerate(supplies):\n",
" smcf.set_node_supply(count, supply)\n",
" # Add supply for each nodes.\n",
" smcf.set_nodes_supply(np.arange(0, len(supplies)), supplies)\n",
"\n",
" # Find the min cost flow.\n",
" status = smcf.solve()\n",
@@ -118,14 +118,15 @@
" print('There was an issue with the min cost flow input.')\n",
" print(f'Status: {status}')\n",
" exit(1)\n",
" print('Minimum cost: ', smcf.optimal_cost())\n",
" print(f'Minimum cost: {smcf.optimal_cost()}')\n",
" print('')\n",
" print(' Arc Flow / Capacity Cost')\n",
" for i in range(smcf.num_arcs()):\n",
" cost = smcf.flow(i) * smcf.unit_cost(i)\n",
" print(' Arc Flow / Capacity Cost')\n",
" solution_flows = smcf.flows(all_arcs)\n",
" costs = solution_flows * unit_costs\n",
" for arc, flow, cost in zip(all_arcs, solution_flows, costs):\n",
" print(\n",
" '%1s -> %1s %3s / %3s %3s' %\n",
" (smcf.tail(i), smcf.head(i), smcf.flow(i), smcf.capacity(i), cost))\n",
" f'{smcf.tail(arc):1} -> {smcf.head(arc)} {flow:3} / {smcf.capacity(arc):3} {cost}'\n",
" )\n",
"\n",
"\n",
"main()\n",

View File

@@ -41,10 +41,10 @@
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/model_builder/assignment_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/linear_solver/assignment_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/model_builder/samples/assignment_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/linear_solver/samples/assignment_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
@@ -82,7 +82,7 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.model_builder.python import model_builder\n",
"from ortools.linear_solver.python import model_builder\n",
"\n",
"\n",
"def main():\n",

View File

@@ -41,10 +41,10 @@
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/model_builder/bin_packing_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/linear_solver/bin_packing_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/model_builder/samples/bin_packing_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/linear_solver/samples/bin_packing_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
@@ -82,7 +82,7 @@
"metadata": {},
"outputs": [],
"source": [
"from ortools.model_builder.python import model_builder\n",
"from ortools.linear_solver.python import model_builder\n",
"\n",
"\n",
"def create_data_model():\n",

View File

@@ -135,7 +135,7 @@
" status = solver.Solve()\n",
"\n",
" if status == pywraplp.Solver.OPTIMAL:\n",
" num_bins = 0.\n",
" num_bins = 0\n",
" for j in data['bins']:\n",
" if y[j].solution_value() == 1:\n",
" bin_items = []\n",
@@ -144,7 +144,7 @@
" if x[i, j].solution_value() > 0:\n",
" bin_items.append(i)\n",
" bin_weight += data['weights'][i]\n",
" if bin_weight > 0:\n",
" if bin_items:\n",
" num_bins += 1\n",
" print('Bin number', j)\n",
" print(' Items packed:', bin_items)\n",

View File

@@ -41,10 +41,10 @@
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/model_builder/simple_lp_program_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/linear_solver/simple_lp_program_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/model_builder/samples/simple_lp_program_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/linear_solver/samples/simple_lp_program_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
@@ -85,7 +85,7 @@
"source": [
"import math\n",
"\n",
"from ortools.model_builder.python import model_builder\n",
"from ortools.linear_solver.python import model_builder\n",
"\n",
"\n",
"def main():\n",

View File

@@ -41,10 +41,10 @@
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/model_builder/simple_mip_program_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/linear_solver/simple_mip_program_mb.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/model_builder/samples/simple_mip_program_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/linear_solver/samples/simple_mip_program_mb.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
@@ -85,7 +85,7 @@
"source": [
"import math\n",
"\n",
"from ortools.model_builder.python import model_builder\n",
"from ortools.linear_solver.python import model_builder\n",
"\n",
"\n",
"def main():\n",

View File

@@ -0,0 +1,186 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2022 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# simple_pdlp_program"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/pdlp/simple_pdlp_program.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/pdlp/samples/simple_pdlp_program.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"!pip install ortools"
]
},
{
"cell_type": "markdown",
"id": "description",
"metadata": {},
"source": [
"Solves a simple LP using PDLP's direct Python API.\n",
"\n",
"Note: The direct API is generally for advanced use cases. It is matrix-based,\n",
"that is, you specify the LP using matrices and vectors instead of algebraic\n",
"expressions. You can also use PDLP via the algebraic pywraplp API (see\n",
"linear_solver/samples/simple_lp_program.py).\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import scipy.sparse\n",
"\n",
"from ortools.pdlp import solve_log_pb2\n",
"from ortools.pdlp import solvers_pb2\n",
"from ortools.pdlp.python import pywrap_pdlp\n",
"from ortools.init import pywrapinit\n",
"\n",
"\n",
"def simple_lp() -> pywrap_pdlp.QuadraticProgram:\n",
" \"\"\"Returns a small LP.\n",
"\n",
" min 5.5 x_0 - 2 x_1 - x_2 + x_3 - 14 s.t.\n",
" 2 x_0 + x_1 + x_2 + 2 x_3 = 12\n",
" x_0 + x_2 <= 7\n",
" 4 x_0 >= -4\n",
" -1 <= 1.5 x_2 - x_3 <= 1\n",
" -infinity <= x_0 <= infinity\n",
" -2 <= x_1 <= infinity\n",
" -infinity <= x_2 <= 6\n",
" 2.5 <= x_3 <= 3.5\n",
" \"\"\"\n",
" lp = pywrap_pdlp.QuadraticProgram()\n",
" lp.objective_offset = -14\n",
" lp.objective_vector = [5.5, -2, -1, 1]\n",
" lp.constraint_lower_bounds = [12, -np.inf, -4, -1]\n",
" lp.constraint_upper_bounds = [12, 7, np.inf, 1]\n",
" lp.variable_lower_bounds = [-np.inf, -2, -np.inf, 2.5]\n",
" lp.variable_upper_bounds = [np.inf, np.inf, 6, 3.5]\n",
" # Most use cases should initialize the sparse constraint matrix without\n",
" # constructing a dense matrix first! We use a np.array here for convenience\n",
" # only.\n",
" constraint_matrix = np.array([[2, 1, 1, 2], [1, 0, 1, 0], [4, 0, 0, 0],\n",
" [0, 0, 1.5, -1]])\n",
" lp.constraint_matrix = scipy.sparse.csc_matrix(constraint_matrix)\n",
" return lp\n",
"\n",
"\n",
"def main() -> None:\n",
" params = solvers_pb2.PrimalDualHybridGradientParams()\n",
" # Below are some common parameters to modify. Here, we just re-assign the\n",
" # defaults.\n",
" optimality_criteria = params.termination_criteria.simple_optimality_criteria\n",
" optimality_criteria.eps_optimal_relative = 1.0e-6\n",
" optimality_criteria.eps_optimal_absolute = 1.0e-6\n",
" params.termination_criteria.time_sec_limit = np.inf\n",
" params.num_threads = 1\n",
" params.verbosity_level = 0\n",
" params.presolve_options.use_glop = False\n",
"\n",
" # Call the main solve function. Note that a quirk of the pywrap11 API forces\n",
" # us to serialize the `params` and deserialize the `solve_log` proto messages.\n",
" result = pywrap_pdlp.primal_dual_hybrid_gradient(simple_lp(),\n",
" params.SerializeToString())\n",
" solve_log = solve_log_pb2.SolveLog.FromString(result.solve_log_str)\n",
"\n",
" if solve_log.termination_reason == solve_log_pb2.TERMINATION_REASON_OPTIMAL:\n",
" print('Solve successful')\n",
" else:\n",
" print(\n",
" 'Solve not successful. Status:',\n",
" solve_log_pb2.TerminationReason.Name(solve_log.termination_reason))\n",
"\n",
" # Solutions vectors are always returned. *However*, their interpretation\n",
" # depends on termination_reason! See primal_dual_hybrid_gradient.h for more\n",
" # details on what the vectors mean if termination_reason is not\n",
" # TERMINATION_REASON_OPTIMAL.\n",
" print('Primal solution:', result.primal_solution)\n",
" print('Dual solution:', result.dual_solution)\n",
" print('Reduced costs:', result.reduced_costs)\n",
"\n",
" solution_type = solve_log.solution_type\n",
" print('Solution type:', solve_log_pb2.PointType.Name(solution_type))\n",
" for ci in solve_log.solution_stats.convergence_information:\n",
" if ci.candidate_type == solution_type:\n",
" print('Primal objective:', ci.primal_objective)\n",
" print('Dual objective:', ci.dual_objective)\n",
"\n",
" print('Iterations:', solve_log.iteration_count)\n",
" print('Solve time (sec):', solve_log.solve_time_sec)\n",
"\n",
"\n",
"pywrapinit.CppBridge.InitLogging('simple_pdlp_program.py')\n",
"cpp_flags = pywrapinit.CppFlags()\n",
"cpp_flags.logtostderr = True\n",
"cpp_flags.log_prefix = False\n",
"pywrapinit.CppBridge.SetFlags(cpp_flags)\n",
"main()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -0,0 +1,131 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "google",
"metadata": {},
"source": [
"##### Copyright 2022 Google LLC."
]
},
{
"cell_type": "markdown",
"id": "apache",
"metadata": {},
"source": [
"Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"you may not use this file except in compliance with the License.\n",
"You may obtain a copy of the License at\n",
"\n",
" http://www.apache.org/licenses/LICENSE-2.0\n",
"\n",
"Unless required by applicable law or agreed to in writing, software\n",
"distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"See the License for the specific language governing permissions and\n",
"limitations under the License.\n"
]
},
{
"cell_type": "markdown",
"id": "basename",
"metadata": {},
"source": [
"# non_linear_sat"
]
},
{
"cell_type": "markdown",
"id": "link",
"metadata": {},
"source": [
"<table align=\"left\">\n",
"<td>\n",
"<a href=\"https://colab.research.google.com/github/google/or-tools/blob/main/examples/notebook/sat/non_linear_sat.ipynb\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/colab_32px.png\"/>Run in Google Colab</a>\n",
"</td>\n",
"<td>\n",
"<a href=\"https://github.com/google/or-tools/blob/main/ortools/sat/samples/non_linear_sat.py\"><img src=\"https://raw.githubusercontent.com/google/or-tools/main/tools/github_32px.png\"/>View source on GitHub</a>\n",
"</td>\n",
"</table>"
]
},
{
"cell_type": "markdown",
"id": "doc",
"metadata": {},
"source": [
"First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "install",
"metadata": {},
"outputs": [],
"source": [
"!pip install ortools"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "code",
"metadata": {},
"outputs": [],
"source": [
"#!/usr/bin/env python3\n",
"# Copyright 2010-2022 Google LLC\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"# Finds a rectangle with maximum available area for given perimeter\n",
"# using AddMultiplicationEquality\n",
"\n",
"from ortools.sat.python import cp_model\n",
"\n",
"\n",
"def NonLinearSat():\n",
" perimeter = 20\n",
"\n",
" model = cp_model.CpModel()\n",
"\n",
" x = model.NewIntVar(0, perimeter, \"x\")\n",
" y = model.NewIntVar(0, perimeter, \"y\")\n",
" model.Add(2 * (x + y) == perimeter)\n",
"\n",
" area = model.NewIntVar(0, perimeter * perimeter, \"s\")\n",
" model.AddMultiplicationEquality(area, x, y)\n",
"\n",
" model.Maximize(area)\n",
"\n",
" solver = cp_model.CpSolver()\n",
"\n",
" status = solver.Solve(model)\n",
"\n",
" if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:\n",
" print('x = %i' % solver.Value(x))\n",
" print('y = %i' % solver.Value(y))\n",
" print('s = %i' % solver.Value(area))\n",
" else:\n",
" print('No solution found.')\n",
"\n",
"\n",
"NonLinearSat()\n",
"\n"
]
}
],
"metadata": {},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -127,12 +127,12 @@
" else:\n",
" max_shifts_per_nurse = min_shifts_per_nurse + 1\n",
" for n in all_nurses:\n",
" num_shifts_worked = []\n",
" shifts_worked = []\n",
" for d in all_days:\n",
" for s in all_shifts:\n",
" num_shifts_worked.append(shifts[(n, d, s)])\n",
" model.Add(min_shifts_per_nurse <= sum(num_shifts_worked))\n",
" model.Add(sum(num_shifts_worked) <= max_shifts_per_nurse)\n",
" shifts_worked.append(shifts[(n, d, s)])\n",
" model.Add(min_shifts_per_nurse <= sum(shifts_worked))\n",
" model.Add(sum(shifts_worked) <= max_shifts_per_nurse)\n",
"\n",
" # Creates the solver and solve.\n",
" solver = cp_model.CpSolver()\n",