diff --git a/.github/skills/cuopt-lp-milp/resources/server_examples.md b/.github/skills/cuopt-lp-milp/resources/server_examples.md index 521d8a6ead..b8d5ddfb43 100644 --- a/.github/skills/cuopt-lp-milp/resources/server_examples.md +++ b/.github/skills/cuopt-lp-milp/resources/server_examples.md @@ -203,6 +203,6 @@ For more complete examples, read these files: | Basic MILP (curl) | `docs/cuopt/source/cuopt-server/examples/milp/examples/basic_milp_example.sh` | MILP shell script | | Incumbent Callback | `docs/cuopt/source/cuopt-server/examples/milp/examples/incumbent_callback_example.py` | MIP progress tracking | | Abort Job | `docs/cuopt/source/cuopt-server/examples/milp/examples/abort_job_example.py` | Canceling requests | -| Batch Mode | `docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh` | Multiple problems | +| Multiple LPs | `docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh` | Multiple problems (sequential; batch deprecated) | These examples are tested by CI (`ci/test_doc_examples.sh`) and represent canonical usage. diff --git a/docs/cuopt/source/cuopt-server/examples/lp-examples.rst b/docs/cuopt/source/cuopt-server/examples/lp-examples.rst index 52d401281c..a5a3e6e3fd 100644 --- a/docs/cuopt/source/cuopt-server/examples/lp-examples.rst +++ b/docs/cuopt/source/cuopt-server/examples/lp-examples.rst @@ -2,7 +2,7 @@ LP Python Examples =============================== -The following example showcases how to use the ``CuOptServiceSelfHostClient`` to solve a simple LP problem in normal mode and batch mode (where multiple problems are solved at once). +The following example showcases how to use the ``CuOptServiceSelfHostClient`` to solve a simple LP problem in normal mode and with multiple problems (solved sequentially; batch mode is deprecated). The OpenAPI specification for the server is available in :doc:`open-api spec <../../open-api>`. The example data is structured as per the OpenAPI specification for the server, please refer :doc:`LPData under "POST /cuopt/request" <../../open-api>` under schema section. LP and MILP share same spec. @@ -15,10 +15,10 @@ If you want to run server locally, please run the following command in a termina export port=5000 python -m cuopt_server.cuopt_service --ip $ip --port $port -.. _generic-example-with-normal-and-batch-mode: +.. _generic-example-with-normal-and-multiple-lps: -Genric Example With Normal Mode and Batch Mode ------------------------------------------------- +Generic Example With Normal Mode and Multiple LPs (Batch Deprecated) +--------------------------------------------------------------------- :download:`basic_lp_example.py ` @@ -402,7 +402,10 @@ In case the user needs to update solver settings through CLI, the option ``-ss`` export port=5000 cuopt_sh data.json -t LP -i $ip -p $port -ss '{"tolerances": {"optimality": 0.0001}, "time_limit": 5}' -In the case of batch mode, you can send a bunch of ``mps`` files at once, and acquire results. The batch mode works only for ``mps`` in the case of CLI: +In the case of batch mode, you can send a bunch of ``mps`` files at once, and acquire results. The batch mode works only for ``mps`` in the case of CLI. + +.. note:: + LP batch mode is deprecated. Multiple problems are now solved sequentially. .. note:: Batch mode is not available for MILP problems. diff --git a/docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh b/docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh index c3b884dc69..d30a378c59 100755 --- a/docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh +++ b/docs/cuopt/source/cuopt-server/examples/lp/examples/batch_mode_example.sh @@ -4,10 +4,11 @@ # # LP Batch Mode CLI Example # -# This example demonstrates how to solve multiple LP problems in batch mode -# using MPS files with the cuopt_sh CLI tool. +# This example demonstrates how to solve multiple LP problems using MPS files +# with the cuopt_sh CLI tool. Multiple problems are solved sequentially. # -# Note: Batch mode works only with MPS files in CLI and is not available for MILP. +# Note: LP batch mode is deprecated. Multiple problems are now solved +# sequentially rather than in parallel. # # Requirements: # - cuOpt server running on localhost:5000 @@ -52,4 +53,4 @@ echo "=== Solving Multiple MPS Files in Batch Mode ===" cuopt_sh "$mps_file" "$mps_file" "$mps_file" -t LP -i $ip -p $port -ss '{"tolerances": {"optimality": 0.0001}, "time_limit": 5}' echo "" -echo "Note: Batch mode is only available for LP with MPS files, not for MILP." +echo "Note: Multiple LPs are solved sequentially (batch mode is deprecated)." diff --git a/docs/cuopt/source/faq.rst b/docs/cuopt/source/faq.rst index 0c3a0e219f..35c0f22e89 100644 --- a/docs/cuopt/source/faq.rst +++ b/docs/cuopt/source/faq.rst @@ -329,7 +329,9 @@ Linear Programming FAQs .. dropdown:: How small and how many problems can I give when using the batch mode? - The batch mode allows solving many LPs in parallel to try to fully utilize the GPU when LP problems are too small. Using H100 SXM, the problem should be of at least 1K elements, and giving more than 100 LPs will usually not increase performance. + LP batch mode is deprecated. Multiple problems are now solved sequentially. + For parallelism, implement your own (e.g. ``concurrent.futures``) with + sequential ``Solve`` calls. .. dropdown:: Can the solver run on dense problems? @@ -349,7 +351,8 @@ Linear Programming FAQs - Hardware: If using self-hosted, you should use a recent server-grade GPU. We recommend H100 SXM (not the PCIE version). - Tolerance: The set tolerance usually has a massive impact on performance. Try the lowest possible value using ``set_optimality_tolerance`` until you have reached your lowest possible acceptable accuracy. - PDLP Solver mode: PDLP solver mode will change the way PDLP internally optimizes the problem. The mode choice can drastically impact how fast a specific problem will be solved. You should test the different modes to see which one fits your problem best. - - Batch mode: In case you know upfront that you need to solve multiple LP problems, instead of solving them sequentially, you should use the batch mode which can solve multiple LPs in parallel. + - Multiple LPs: LP batch mode is deprecated. Solve multiple problems with + sequential ``Solve`` calls, or implement your own parallelism. - Presolve: Presolve can reduce problem size and improve solve time. .. dropdown:: What solver mode should I choose? diff --git a/docs/cuopt/source/lp-qp-features.rst b/docs/cuopt/source/lp-qp-features.rst index 4bd178ed53..81038dd4ca 100644 --- a/docs/cuopt/source/lp-qp-features.rst +++ b/docs/cuopt/source/lp-qp-features.rst @@ -131,7 +131,7 @@ Logging Callback in the Service In the cuOpt service API, the ``log_file`` value in ``solver_configs`` is ignored. -If however you set the ``solver_logs`` flag on the ``/cuopt/request`` REST API call, users can fetch the log file content from the webserver at ``/cuopt/logs/{id}``. Using the logging callback feature through the cuOpt client is shown in :ref:`Examples ` on the self-hosted page. +If however you set the ``solver_logs`` flag on the ``/cuopt/request`` REST API call, users can fetch the log file content from the webserver at ``/cuopt/logs/{id}``. Using the logging callback feature through the cuOpt client is shown in :ref:`Examples ` on the self-hosted page. Infeasibility Detection @@ -155,7 +155,7 @@ The user may specify a time limit to the solver. By default the solver runs unti Batch Mode ---------- -Users can submit a set of problems which will be solved in a batch. Problems will be solved at the same time in parallel to fully utilize the GPU. Checkout :ref:`self-hosted client ` example in thin client. +Users can submit a set of problems which will be solved in a batch. Problems will be solved at the same time in parallel to fully utilize the GPU. Checkout :ref:`self-hosted client ` example in thin client. Multi-GPU Mode -------------- diff --git a/python/cuopt/cuopt/linear_programming/solver/solver.py b/python/cuopt/cuopt/linear_programming/solver/solver.py index e80ad3b6f4..1474d724aa 100644 --- a/python/cuopt/cuopt/linear_programming/solver/solver.py +++ b/python/cuopt/cuopt/linear_programming/solver/solver.py @@ -3,6 +3,7 @@ import os import time +import warnings from cuopt.linear_programming.solver import solver_wrapper from cuopt.linear_programming.solver_settings import SolverSettings @@ -111,6 +112,13 @@ def BatchSolve(data_model_list, solver_settings=None): Solve the list of Linear Programs passed as input and returns the solutions and total solve time. + .. deprecated:: + LP BatchSolve is deprecated and will be removed in a future release. + It runs concurrent LPs in multiple C++ threads, which can be done + independently in user code. Use sequential :func:`Solve` calls instead, + e.g. ``[Solve(dm, solver_settings) for dm in data_model_list]``, or + implement your own parallelism (e.g. ``concurrent.futures``). + Data Model objects can be construed through setters (see linear_programming.DataModel class) or through a MPS file (see cuopt_mps_parser.ParseMps function) @@ -179,6 +187,13 @@ def BatchSolve(data_model_list, solver_settings=None): >>> # Print the value of one specific variable >>> print(solution.get_vars()["var_name"]) """ + warnings.warn( + "LP BatchSolve is deprecated and will be removed in a future release. " + "Use sequential Solve() calls or implement your own parallelism " + "(e.g. concurrent.futures).", + DeprecationWarning, + stacklevel=2, + ) if solver_settings is None: solver_settings = SolverSettings() diff --git a/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py b/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py index 9f94916ff0..86a2bbec40 100644 --- a/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py +++ b/python/cuopt/cuopt/tests/linear_programming/test_lp_solver.py @@ -477,11 +477,13 @@ def test_parser_and_batch_solver(): settings.set_parameter(CUOPT_METHOD, SolverMethod.PDLP) settings.set_optimality_tolerance(1e-4) - # Call BatchSolve + # Call BatchSolve (deprecated; use sequential Solve instead) + # DeprecationWarning is emitted when running against a build with the + # deprecation; CI asserts it via pytest.warns batch_solution, solve_time = solver.BatchSolve(data_model_list, settings) # Call Solve on each individual data model object - individual_solutions = [] * nb_solves + individual_solutions = [] for i in range(nb_solves): individual_solution = solver.Solve( cuopt_mps_parser.ParseMps(file_path), settings @@ -494,6 +496,16 @@ def test_parser_and_batch_solver(): batch_solution[i].get_termination_status() == individual_solutions[i].get_termination_status() ) + assert batch_solution[i].get_primal_objective() == pytest.approx( + individual_solutions[i].get_primal_objective(), rel=1e-6, abs=1e-8 + ) + assert np.array( + batch_solution[i].get_primal_solution() + ) == pytest.approx( + np.array(individual_solutions[i].get_primal_solution()), + rel=1e-5, + abs=1e-7, + ) def test_warm_start(): @@ -570,7 +582,7 @@ def test_batch_solver_warm_start(): settings.set_pdlp_warm_start_data(solution.get_pdlp_warm_start_data()) - # Should raise an exception + # Should raise an exception (BatchSolve does not support warmstart) with pytest.raises(Exception): solver.BatchSolve(data_model_list, settings) diff --git a/python/cuopt_self_hosted/README.md b/python/cuopt_self_hosted/README.md index f15f466936..131cd01934 100644 --- a/python/cuopt_self_hosted/README.md +++ b/python/cuopt_self_hosted/README.md @@ -30,7 +30,7 @@ Check the help with 'cuopt_sh -h' for more detailed information. data: cuOpt problem data file or a request id to repoll. If the -f option is used, this indicates the path of a file accessible to the server. -id: space separated list of reqIds to use as initial solutions for VRP problems. The list is terminated by the next option flag or the end of line. - -wid: reqId of a solution to use as a warmstart for a single LP problem. Not enabled for batch LP problems. + -wid: reqId of a solution to use as a warmstart for a single LP problem. Not enabled when multiple LP problems are passed. -ca: caches a problem on the server so that it may be run multiple times by reqId. Problem is not solved, only cached. -f: Indicates that the DATA argument is the relative path of a cuOpt data file under the server's data directory. -d: Deletes a cached problem or aborts a running or queued solution. diff --git a/python/cuopt_self_hosted/cuopt_sh_client/cuopt_self_host_client.py b/python/cuopt_self_hosted/cuopt_sh_client/cuopt_self_host_client.py index 066e81b026..d1851dd873 100644 --- a/python/cuopt_self_hosted/cuopt_sh_client/cuopt_self_host_client.py +++ b/python/cuopt_self_hosted/cuopt_sh_client/cuopt_self_host_client.py @@ -721,8 +721,6 @@ def get_LP_solve( Parameters ---------- cuopt_data_models : - Note - Batch mode is only supported in LP and not in MILP - File path to mps or json/dict/DataModel returned by cuopt_mps_parser/list[mps file paths]/list[dict]/list[DataModel]. @@ -730,9 +728,8 @@ def get_LP_solve( /DataModel returned by cuopt_mps_parser/ path to json file/ dictionary. - For batch problem, input should be either a list of paths to mps - files/ a list of DataModel returned by cuopt_mps_parser/ a - list of dictionaries. + For multiple problems, a list of paths/dicts/DataModels may be + passed; they are solved sequentially (LP batch mode is deprecated). To use a cached cuopt problem data, input should be a uuid identifying the reqId of the cached data. diff --git a/python/cuopt_self_hosted/cuopt_sh_client/cuopt_sh.py b/python/cuopt_self_hosted/cuopt_sh_client/cuopt_sh.py index 2278731d3b..5c2db4dc11 100755 --- a/python/cuopt_self_hosted/cuopt_sh_client/cuopt_sh.py +++ b/python/cuopt_self_hosted/cuopt_sh_client/cuopt_sh.py @@ -231,6 +231,14 @@ def read_input_data(i_file): elif args.type == "LP": if args.init_ids: raise Exception("Initial ids are not supported for LP") + if ( + isinstance(cuopt_problem_data, list) + and len(cuopt_problem_data) > 1 + and args.warmstart_id + ): + raise Exception( + "Warmstart id is only supported for a single LP problem" + ) def log_callback(name): def print_log(log): @@ -351,9 +359,8 @@ def main(): " " "For LP: " "A single problem file in mps/json format or file_name." - "Batch mode is supported in case of mps files only for LP and" - "not for MILP, where a list of mps" - "files can be shared to be solved in parallel.", + "Multiple mps files may be passed for LP; they are solved " + "sequentially (batch mode is deprecated).", ) parser.add_argument( "-id", @@ -373,7 +380,7 @@ def main(): default=None, help="reqId of a solution to use as a warmstart data for a " "single LP problem. This allows to restart PDLP with a " - "previous solution context. Not enabled for Batch LP problem", + "previous solution context. Not enabled when multiple LP problems are passed.", ) parser.add_argument( "-ca", diff --git a/python/cuopt_server/cuopt_server/utils/linear_programming/solver.py b/python/cuopt_server/cuopt_server/utils/linear_programming/solver.py index 6eeaafbde5..be43ec1e57 100644 --- a/python/cuopt_server/cuopt_server/utils/linear_programming/solver.py +++ b/python/cuopt_server/cuopt_server/utils/linear_programming/solver.py @@ -337,20 +337,29 @@ def create_solution(sol): sol = None total_solve_time = None if type(LP_data) is list: + if len(LP_data) == 0: + raise HTTPException( + status_code=400, detail="LP_data list cannot be empty" + ) is_batch = True - data_model_list = [] - warnings = [] - for i_data in LP_data: - i_warnings, data_model = create_data_model(i_data) - data_model_list.append(data_model) - warnings.extend(i_warnings) + warnings = [ + "LP batch mode is deprecated. Multiple problems are now solved " + "sequentially. Implement your own parallelism if needed." + ] + sol = [] + total_solve_time = 0.0 cswarnings, solver_settings = create_solver( LP_data[0], warmstart_data ) warnings.extend(cswarnings) - sol, total_solve_time = linear_programming.BatchSolve( - data_model_list, solver_settings - ) + for i_data in LP_data: + i_warnings, data_model = create_data_model(i_data) + warnings.extend(i_warnings) + i_sol = linear_programming.Solve( + data_model, solver_settings=solver_settings + ) + total_solve_time += i_sol.get_solve_time() + sol.append(i_sol) else: warnings, data_model = create_data_model(LP_data) cswarnings, solver_settings = create_solver( @@ -382,7 +391,7 @@ def create_solution(sol): if i_sol.get_error_status() != ErrorStatus.Success: res.append( { - "status": i_sol.get_error_status(), + "status": i_sol.get_error_status().name, "solution": i_sol.get_error_message(), } ) diff --git a/python/cuopt_server/cuopt_server/webserver.py b/python/cuopt_server/cuopt_server/webserver.py index 4fc3d608fe..b5f8518c8f 100644 --- a/python/cuopt_server/cuopt_server/webserver.py +++ b/python/cuopt_server/cuopt_server/webserver.py @@ -931,7 +931,7 @@ async def postrequest( default=None, description="If set, the warmstart data in solution identified by id " "will be used by the solver as warmstart data for this request. " - "Enabled for single LP problem, not enabled for Batch LP", + "Enabled for single LP problem. Batch LP is deprecated.", ), validation_only: Optional[bool] = Query( default=False,