Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
258 changes: 232 additions & 26 deletions cpp/src/branch_and_bound/branch_and_bound.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,29 @@ i_t fractional_variables(const simplex_solver_settings_t<i_t, f_t>& settings,
return fractional.size();
}

template <typename i_t, typename f_t>
i_t prune_fixed_fractional_variables(const std::vector<f_t>& lower_bounds,
const std::vector<f_t>& upper_bounds,
const simplex_solver_settings_t<i_t, f_t>& settings,
std::vector<i_t>& fractional)
{
std::vector<i_t> new_fractional;
new_fractional.reserve(fractional.size());

i_t num_fixed = 0;
for (i_t k = 0; k < (i_t)fractional.size(); k++) {
const i_t j = fractional[k];
if (std::abs(upper_bounds[j] - lower_bounds[j]) < settings.fixed_tol) {
num_fixed++;
} else {
new_fractional.push_back(j);
}
}

fractional = std::move(new_fractional);
return num_fixed;
}

template <typename i_t, typename f_t>
void full_variable_types(const user_problem_t<i_t, f_t>& original_problem,
const lp_problem_t<i_t, f_t>& original_lp,
Expand Down Expand Up @@ -2363,6 +2386,7 @@ mip_status_t branch_and_bound_t<i_t, f_t>::solve(mip_solution_t<i_t, f_t>& solut
root_objective_,
root_vstatus_,
edge_norms_,
upper_bound_.load(),
pc_);
}

Expand All @@ -2372,46 +2396,228 @@ mip_status_t branch_and_bound_t<i_t, f_t>::solve(mip_solution_t<i_t, f_t>& solut
return solver_status_;
}

// Exploit infeasible/fathomed branches from strong branching for bounds tightening.
// If branching down on x_j is infeasible, we can tighten lb[j] = ceil(x_j*).
// If branching up on x_j is infeasible, we can tighten ub[j] = floor(x_j*).
// With an incumbent, branches whose objective exceeds the cutoff yield the same deductions.
{
const f_t current_upper = upper_bound_.load();
i_t num_tightened = 0;
i_t num_infeasible = 0;
i_t num_cutoff = 0;
for (i_t k = 0; k < (i_t)fractional.size(); k++) {
const i_t j = fractional[k];
const f_t sb_down = pc_.strong_branch_down[k];
const f_t sb_up = pc_.strong_branch_up[k];
bool down_infeasible = std::isinf(sb_down);
bool up_infeasible = std::isinf(sb_up);
bool down_cutoff = false;
bool up_cutoff = false;

if (!down_infeasible && std::isfinite(sb_down) && std::isfinite(current_upper)) {
down_cutoff = (sb_down + root_objective_ > current_upper + settings_.dual_tol);
down_infeasible = down_cutoff;
}
if (!up_infeasible && std::isfinite(sb_up) && std::isfinite(current_upper)) {
up_cutoff = (sb_up + root_objective_ > current_upper + settings_.dual_tol);
up_infeasible = up_cutoff;
}

if (down_infeasible && up_infeasible) {
bool truly_infeasible = std::isinf(sb_down) && std::isinf(sb_up);
if (truly_infeasible) {
settings_.log.printf("Strong branching: both branches infeasible for variable %d\n", j);
return mip_status_t::INFEASIBLE;
}
// Might happen if the incumbent is already the optimal
settings_.log.printf("Strong branching: both branches fathomed for variable %d\n", j);
bool has_incumbent = false;
mutex_upper_.lock();
has_incumbent = incumbent_.has_incumbent;
mutex_upper_.unlock();
assert(has_incumbent);
solver_status_ = mip_status_t::OPTIMAL;
set_final_solution(solution, upper_bound_.load());
return solver_status_;
}
if (down_infeasible) {
mutex_original_lp_.lock();
f_t new_lb = std::ceil(root_relax_soln_.x[j]);
if (new_lb > original_lp_.lower[j]) {
settings_.log.debug("SB tighten var %d: lb %e -> %e (%s)",
j,
original_lp_.lower[j],
new_lb,
down_cutoff ? "cutoff" : "infeasible");
original_lp_.lower[j] = new_lb;
num_tightened++;
if (down_cutoff) {
num_cutoff++;
} else {
num_infeasible++;
}
}
mutex_original_lp_.unlock();
}
if (up_infeasible) {
mutex_original_lp_.lock();
f_t new_ub = std::floor(root_relax_soln_.x[j]);
if (new_ub < original_lp_.upper[j]) {
settings_.log.debug("SB tighten var %d: ub %e -> %e (%s)",
j,
original_lp_.upper[j],
new_ub,
up_cutoff ? "cutoff" : "infeasible");
original_lp_.upper[j] = new_ub;
num_tightened++;
if (up_cutoff) {
num_cutoff++;
} else {
num_infeasible++;
}
}
mutex_original_lp_.unlock();
}
}
if (num_tightened > 0) {
settings_.log.printf(
"Strong branching bounds tightening: %d tightened (%d infeasible, %d cutoff)\n",
num_tightened,
num_infeasible,
num_cutoff);

std::vector<bool> bounds_changed(original_lp_.num_cols, true);
std::vector<char> row_sense;
std::vector<f_t> new_lower;
std::vector<f_t> new_upper;
mutex_original_lp_.lock();
new_lower = original_lp_.lower;
new_upper = original_lp_.upper;
mutex_original_lp_.unlock();
bounds_strengthening_t<i_t, f_t> sb_presolve(original_lp_, Arow_, row_sense, var_types_);
bool feasible =
sb_presolve.bounds_strengthening(settings_, bounds_changed, new_lower, new_upper);
i_t num_fixed = 0;
if (feasible) {
num_fixed = prune_fixed_fractional_variables(new_lower, new_upper, settings_, fractional);
}
mutex_original_lp_.lock();
original_lp_.lower = new_lower;
original_lp_.upper = new_upper;
mutex_original_lp_.unlock();
if (!feasible) {
settings_.log.printf("Strong branching bounds propagation detected infeasibility\n");
return mip_status_t::INFEASIBLE;
}
if (num_fixed > 0) {
settings_.log.printf(
"Strong branching bounds tightening: %d variables fixed (%d from propagation)\n",
num_fixed,
num_fixed - num_tightened);
Comment on lines +2513 to +2516
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

num_fixed - num_tightened can go negative in the log output.

Line 2516 mixes different counters (num_fixed = fixed fractionals; num_tightened = bound tighten operations), so the “from propagation” number can become negative and misleading.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@cpp/src/branch_and_bound/branch_and_bound.cpp` around lines 2513 - 2516, The
log prints "from propagation" as num_fixed - num_tightened which can be negative
because num_fixed counts fixed fractionals and num_tightened counts tighten
operations; fix by computing an explicit non-negative propagated count before
logging (e.g., int num_from_propagation = num_fixed - num_tightened; if
(num_from_propagation < 0) num_from_propagation = 0) and use that variable in
the settings_.log.printf call (reference settings_.log.printf, num_fixed,
num_tightened) so the log never shows a negative "from propagation" value.

num_fractional = fractional.size();
}

// If no fractionals remain after the fixings - perform a resolve
// to get fractionals to branch on, or return optimality if the root relaxation is integer
if (num_fractional == 0) {
lp_settings.concurrent_halt = NULL;
i_t iter = 0;
bool initialize_basis = false;
dual::status_t lp_status = dual_phase2_with_advanced_basis(2,
0,
initialize_basis,
exploration_stats_.start_time,
original_lp_,
lp_settings,
root_vstatus_,
basis_update,
basic_list,
nonbasic_list,
root_relax_soln_,
iter,
edge_norms_);
exploration_stats_.total_lp_iters += iter;
root_objective_ = compute_objective(original_lp_, root_relax_soln_.x);
if (lp_status == dual::status_t::OPTIMAL) {
fractional.clear();
num_fractional =
fractional_variables(settings_, root_relax_soln_.x, var_types_, fractional);
if (num_fractional == 0) {
set_solution_at_root(solution, cut_info);
return mip_status_t::OPTIMAL;
}
} else if (lp_status == dual::status_t::TIME_LIMIT) {
solver_status_ = mip_status_t::TIME_LIMIT;
set_final_solution(solution, root_objective_);
return solver_status_;
} else {
settings_.log.printf("LP re-solve after SB tightening returned status %d\n", lp_status);
return mip_status_t::NUMERICAL;
}
Comment on lines +2526 to +2556
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Don’t classify non-numerical LP statuses as NUMERICAL in re-solve paths.

At Line 2554-2556 and Line 2617-2619, any status other than OPTIMAL/TIME_LIMIT is treated as numerical. WORK_LIMIT and ITERATION_LIMIT are valid outcomes and should be propagated explicitly (or handled with a fallback solve), not reported as numerical failure.

Suggested status handling adjustment
-        } else {
-          settings_.log.printf("LP re-solve after SB tightening returned status %d\n", lp_status);
-          return mip_status_t::NUMERICAL;
-        }
+        } else if (lp_status == dual::status_t::WORK_LIMIT) {
+          solver_status_ = mip_status_t::WORK_LIMIT;
+          set_final_solution(solution, root_objective_);
+          return solver_status_;
+        } else if (lp_status == dual::status_t::ITERATION_LIMIT) {
+          // Preserve non-numerical termination semantics (or run a fallback full solve).
+          solver_status_ = mip_status_t::WORK_LIMIT;
+          set_final_solution(solution, root_objective_);
+          return solver_status_;
+        } else {
+          settings_.log.printf("LP re-solve after SB tightening returned status %d\n", lp_status);
+          return mip_status_t::NUMERICAL;
+        }
-          } else {
-            settings_.log.printf("LP re-solve after RC tightening returned status %d\n", lp_status);
-            return mip_status_t::NUMERICAL;
-          }
+          } else if (lp_status == dual::status_t::WORK_LIMIT) {
+            solver_status_ = mip_status_t::WORK_LIMIT;
+            set_final_solution(solution, root_objective_);
+            return solver_status_;
+          } else if (lp_status == dual::status_t::ITERATION_LIMIT) {
+            solver_status_ = mip_status_t::WORK_LIMIT;
+            set_final_solution(solution, root_objective_);
+            return solver_status_;
+          } else {
+            settings_.log.printf("LP re-solve after RC tightening returned status %d\n", lp_status);
+            return mip_status_t::NUMERICAL;
+          }

Based on learnings: “In cuOpt, the dual simplex method is dual-feasible throughout execution… when status is ITERATION_LIMIT, the current objective is still a valid lower bound … safe to use in strong branching for bound tightening, fixings, and cutoff logic.”

Also applies to: 2589-2619

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@cpp/src/branch_and_bound/branch_and_bound.cpp` around lines 2526 - 2556, The
current handling of dual::status_t lp_status (from
dual_phase2_with_advanced_basis) treats any non-OPTIMAL/TIME_LIMIT result as
NUMERICAL; change this to explicitly handle WORK_LIMIT and ITERATION_LIMIT (and
any other non-fatal statuses) instead of mapping them to
mip_status_t::NUMERICAL. Locate the lp_status checks around the root solve
(variable lp_status, compute_objective, root_relax_soln_) and the later re-solve
site, and: 1) add explicit branches for dual::status_t::WORK_LIMIT and
dual::status_t::ITERATION_LIMIT that propagate a corresponding solver_status_
(or return a distinct mip_status_t like WORK_LIMIT/ITERATION_LIMIT) OR use the
current objective (root_objective_) as a valid bound for branching/cutoff logic
before continuing; 2) only treat truly numerical failures as
mip_status_t::NUMERICAL. Ensure set_final_solution/set_solution_at_root behavior
is updated to match the new status paths.

}
}
}

if (settings_.reduced_cost_strengthening >= 2 && upper_bound_.load() < last_upper_bound) {
std::vector<f_t> lower_bounds;
std::vector<f_t> upper_bounds;
i_t num_fixed = find_reduced_cost_fixings(upper_bound_.load(), lower_bounds, upper_bounds);
if (num_fixed > 0) {
std::vector<bool> bounds_changed(original_lp_.num_cols, true);
std::vector<char> row_sense;

std::vector<f_t> new_lower = lower_bounds;
std::vector<f_t> new_upper = upper_bounds;
bounds_strengthening_t<i_t, f_t> node_presolve(original_lp_, Arow_, row_sense, var_types_);

mutex_original_lp_.lock();
original_lp_.lower = lower_bounds;
original_lp_.upper = upper_bounds;
bool feasible = node_presolve.bounds_strengthening(
settings_, bounds_changed, original_lp_.lower, original_lp_.upper);
mutex_original_lp_.unlock();
bool feasible =
node_presolve.bounds_strengthening(settings_, bounds_changed, new_lower, new_upper);
if (!feasible) {
settings_.log.printf("Bound strengthening failed\n");
return mip_status_t::NUMERICAL; // We had a feasible integer solution, but bound
// strengthening thinks we are infeasible.
}
// Go through and check the fractional variables and remove any that are now fixed to their
// bounds
std::vector<i_t> to_remove(fractional.size(), 0);
i_t num_to_remove = 0;
for (i_t k = 0; k < fractional.size(); k++) {
const i_t j = fractional[k];
if (std::abs(original_lp_.upper[j] - original_lp_.lower[j]) < settings_.fixed_tol) {
to_remove[k] = 1;
num_to_remove++;
}
}
if (num_to_remove > 0) {
std::vector<i_t> new_fractional;
new_fractional.reserve(fractional.size() - num_to_remove);
for (i_t k = 0; k < fractional.size(); k++) {
if (!to_remove[k]) { new_fractional.push_back(fractional[k]); }
}
fractional = new_fractional;
i_t num_fixed = prune_fixed_fractional_variables(new_lower, new_upper, settings_, fractional);
mutex_original_lp_.lock();
original_lp_.lower = new_lower;
original_lp_.upper = new_upper;
mutex_original_lp_.unlock();
if (num_fixed > 0) {
num_fractional = fractional.size();
if (num_fractional == 0) {
lp_settings.concurrent_halt = NULL;
i_t iter = 0;
bool initialize_basis = false;
dual::status_t lp_status = dual_phase2_with_advanced_basis(2,
0,
initialize_basis,
exploration_stats_.start_time,
original_lp_,
lp_settings,
root_vstatus_,
basis_update,
basic_list,
nonbasic_list,
root_relax_soln_,
iter,
edge_norms_);
exploration_stats_.total_lp_iters += iter;
root_objective_ = compute_objective(original_lp_, root_relax_soln_.x);
if (lp_status == dual::status_t::OPTIMAL) {
fractional.clear();
num_fractional =
fractional_variables(settings_, root_relax_soln_.x, var_types_, fractional);
if (num_fractional == 0) {
set_solution_at_root(solution, cut_info);
return mip_status_t::OPTIMAL;
}
} else if (lp_status == dual::status_t::TIME_LIMIT) {
solver_status_ = mip_status_t::TIME_LIMIT;
set_final_solution(solution, root_objective_);
return solver_status_;
} else {
settings_.log.printf("LP re-solve after RC tightening returned status %d\n", lp_status);
return mip_status_t::NUMERICAL;
}
}
}
}
}
Expand Down
8 changes: 7 additions & 1 deletion cpp/src/branch_and_bound/pseudo_costs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ void strong_branch_helper(i_t start,
const std::vector<f_t>& root_soln,
const std::vector<variable_status_t>& root_vstatus,
const std::vector<f_t>& edge_norms,
f_t upper_bound,
pseudo_costs_t<i_t, f_t>& pc)
{
raft::common::nvtx::range scope("BB::strong_branch_helper");
Expand Down Expand Up @@ -62,6 +63,7 @@ void strong_branch_helper(i_t start,
if (elapsed_time > settings.time_limit) { break; }
child_settings.time_limit = std::max(0.0, settings.time_limit - elapsed_time);
child_settings.iteration_limit = 200;
child_settings.cut_off = upper_bound + settings.dual_tol;
lp_solution_t<i_t, f_t> solution(original_lp.num_rows, original_lp.num_cols);
i_t iter = 0;
std::vector<variable_status_t> vstatus = root_vstatus;
Expand All @@ -80,7 +82,8 @@ void strong_branch_helper(i_t start,
if (status == dual::status_t::DUAL_UNBOUNDED) {
// LP was infeasible
obj = std::numeric_limits<f_t>::infinity();
} else if (status == dual::status_t::OPTIMAL || status == dual::status_t::ITERATION_LIMIT) {
} else if (status == dual::status_t::OPTIMAL || status == dual::status_t::ITERATION_LIMIT ||
status == dual::status_t::CUTOFF) {
obj = compute_objective(child_problem, solution.x);
Comment on lines +85 to 87
Copy link

@coderabbitai coderabbitai bot Mar 3, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

Do not use ITERATION_LIMIT strong-branch results as proof bounds.

Line 85 currently materializes obj for ITERATION_LIMIT, and those deltas are later used for root fixings/cutoff deductions. An iteration-limited solve is not a proven LP bound, so this can incorrectly tighten bounds or prematurely fathom.

🔧 Suggested safe handling
-      } else if (status == dual::status_t::OPTIMAL || status == dual::status_t::ITERATION_LIMIT ||
-                 status == dual::status_t::CUTOFF) {
+      } else if (status == dual::status_t::OPTIMAL || status == dual::status_t::CUTOFF) {
         obj = compute_objective(child_problem, solution.x);
+      } else if (status == dual::status_t::ITERATION_LIMIT) {
+        // Not a proven bound; keep it out of fixing logic.
+        obj = std::numeric_limits<f_t>::quiet_NaN();
       } else {

As per coding guidelines, "Validate algorithm correctness in optimization logic: simplex pivots, branch-and-bound decisions, routing heuristics, and constraint/objective handling must produce correct results."

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@cpp/src/branch_and_bound/pseudo_costs.cpp` around lines 85 - 87, The code
currently treats dual::status_t::ITERATION_LIMIT as a proven bound and calls
compute_objective(child_problem, solution.x); remove ITERATION_LIMIT from that
condition so obj is computed only for proven statuses (dual::status_t::OPTIMAL
and dual::status_t::CUTOFF) and ensure any logic that consumes obj (root fixings
/ cutoff deductions) is not executed when status ==
dual::status_t::ITERATION_LIMIT; update the condition around compute_objective
in pseudo_costs.cpp (the block handling child_problem, solution.x and status) to
skip objective materialization and subsequent bound/fathom logic for
ITERATION_LIMIT.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dual simplex is dual-safe, the bound is always valid even if the termination is early. It will just be looser than the true bound

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@aliceb-nv, thank you for the clarification! You're absolutely right—I missed that crucial property of the dual simplex method. Since dual feasibility is maintained throughout the algorithm, an early termination due to ITERATION_LIMIT still provides a valid (though potentially looser) lower bound on the optimal objective. This makes it safe to use for bound tightening and fixing logic in the strong branching context.

I appreciate you explaining this domain-specific behavior. The code is correct as written.


✏️ Learnings added
Learnt from: aliceb-nv
Repo: NVIDIA/cuopt PR: 922
File: cpp/src/branch_and_bound/pseudo_costs.cpp:85-87
Timestamp: 2026-03-03T15:03:14.218Z
Learning: In cuOpt's dual simplex solver (cpp/src/dual_simplex/ and cpp/src/branch_and_bound/ files), the dual simplex method is "dual-safe": it maintains dual feasibility throughout execution. Therefore, when the solver terminates early with status ITERATION_LIMIT, the current objective value is still a valid lower bound (for minimization), just looser than the optimal bound. This makes ITERATION_LIMIT results safe to use in strong branching for bound tightening, fixings, and cutoff logic.

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


🧠 Learnings used
Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 814
File: cpp/src/dual_simplex/cuts.cpp:403-408
Timestamp: 2026-01-30T22:11:38.999Z
Learning: In the cuOpt dual simplex knapsack cut generation (cpp/src/dual_simplex/cuts.cpp), the greedy_knapsack_problem function intentionally allows division by zero in the ratio calculation `ratios[i] = values[i] / weights[i]`. When weights[i] = 0 and values[i] != 0, the division produces infinity (under IEEE 754), which correctly prioritizes zero-weight items with value in the greedy selection.

Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 599
File: cpp/src/dual_simplex/cuts.cpp:79-99
Timestamp: 2026-01-22T22:55:38.995Z
Learning: In the cuOPT dual simplex cut pool (cpp/src/dual_simplex/cuts.cpp), cuts with zero norm are prevented from entering the cut pool by validation logic elsewhere in the codebase. However, near-zero norms should still be guarded against in functions like cut_orthogonality to ensure numerical stability.

Learnt from: aliceb-nv
Repo: NVIDIA/cuopt PR: 899
File: cpp/src/mip_heuristics/solve.cu:169-193
Timestamp: 2026-02-24T14:37:15.024Z
Learning: In cuOpt's MIP solver (cpp/src/mip_heuristics/solve.cu and related files), the objective_scaling_factor sign encodes the user problem's objective sense: if >= 0, the user problem is minimization; if < 0, the user problem is maximization (which is internally converted to minimization by negating the objective). Therefore, checking the sign of objective_scaling_factor is a valid way to determine the problem direction and set appropriate "no bound" values for callbacks.

Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 814
File: cpp/src/dual_simplex/cuts.cpp:1278-1278
Timestamp: 2026-01-30T23:34:26.318Z
Learning: In the cuOpt dual simplex solver (cpp/src/dual_simplex/cuts.cpp and related files), slack variable coefficients in the constraint matrix are always exactly 1.0 or -1.0 (not floating-point approximations), so exact equality checks like `assert(std::abs(lp.A.x[col_start]) == 1.0)` are appropriate and should not be flagged as requiring epsilon tolerances.

Learnt from: aliceb-nv
Repo: NVIDIA/cuopt PR: 899
File: cpp/src/branch_and_bound/branch_and_bound.hpp:111-117
Timestamp: 2026-02-23T18:58:17.440Z
Learning: In cpp/src/branch_and_bound/branch_and_bound.hpp, the initial_cutoff_ member is only written via set_initial_cutoff() before branch_and_bound_t::solve() is invoked, and all B&B worker threads that read it via get_cutoff() are spawned inside solve(). This sequential ordering guarantees no concurrent read/write, so initial_cutoff_ does not require atomic synchronization.

Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 602
File: cpp/src/linear_programming/solve.cu:732-742
Timestamp: 2025-12-04T20:09:09.264Z
Learning: In cpp/src/linear_programming/solve.cu, the barrier solver does not currently return INFEASIBLE or UNBOUNDED status. It only returns OPTIMAL, TIME_LIMIT, NUMERICAL_ISSUES, or CONCURRENT_LIMIT.

Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 814
File: cpp/src/dual_simplex/branch_and_bound.hpp:154-159
Timestamp: 2026-01-29T04:25:05.434Z
Learning: In cpp/src/dual_simplex/branch_and_bound.hpp and branch_and_bound.cpp, the mutex_original_lp_ protects original_lp_ specifically during the root cut pass phase. The public API set_new_solution() can be called from external threads during this phase while the main thread is adding cuts/slacks to original_lp_. Functions like best_first_thread, exploration_ramp_up, and plunge_from execute after the cut passes complete (after the OpenMP parallel barrier) when original_lp_ is fixed, so they don't need mutex protection for their reads.

Learnt from: CR
Repo: NVIDIA/cuopt PR: 0
File: .github/.coderabbit_review_guide.md:0-0
Timestamp: 2025-11-25T10:20:49.822Z
Learning: Applies to **/*.{cu,cuh,cpp,hpp,h} : Validate algorithm correctness in optimization logic: simplex pivots, branch-and-bound decisions, routing heuristics, and constraint/objective handling must produce correct results

Learnt from: aliceb-nv
Repo: NVIDIA/cuopt PR: 901
File: cpp/src/mip_heuristics/presolve/single_lock_dual_aggregation.hpp:42-53
Timestamp: 2026-02-23T20:28:13.155Z
Learning: In Papilo-based presolvers (cpp/src/mip_heuristics/presolve/*.cpp), Papilo standardizes integer variable bounds during early presolve by rounding fractional bounds (ceiling for lower, floor for upper). By the time custom presolvers execute, all integer and implied-integer variables have exact integer bounds, so exact equality comparisons like `lower_bounds[col] == 0.0 && upper_bounds[col] == 1.0` are correct and don't require epsilon tolerances.
<!-- </add_learning>

Learnt from: CR
Repo: NVIDIA/cuopt PR: 0
File: .github/.coderabbit_review_guide.md:0-0
Timestamp: 2025-11-25T10:20:49.822Z
Learning: Applies to **/*.{cu,cuh,cpp,hpp,h} : Validate correct initialization of variable bounds, constraint coefficients, and algorithm state before solving; ensure reset when transitioning between algorithm phases (presolve, simplex, diving, crossover)

Learnt from: aliceb-nv
Repo: NVIDIA/cuopt PR: 798
File: cpp/src/dual_simplex/phase2.cpp:2658-2684
Timestamp: 2026-02-06T17:38:38.088Z
Learning: The work-unit recording code in cpp/src/dual_simplex/phase2.cpp using the 1e8 divisor (around lines 2658-2684) is a temporary placeholder for release. It will be replaced by a more involved predictor system that was elided from PR `#798`.

Learnt from: CR
Repo: NVIDIA/cuopt PR: 0
File: .github/.coderabbit_review_guide.md:0-0
Timestamp: 2025-11-25T10:20:49.822Z
Learning: Applies to **/*.{cu,cuh,cpp,hpp,h} : Identify assertions with overly strict numerical tolerances that fail on legitimate degenerate/edge cases (near-zero pivots, singular matrices, empty problems)

Learnt from: chris-maes
Repo: NVIDIA/cuopt PR: 856
File: cpp/src/dual_simplex/basis_updates.cpp:2215-2219
Timestamp: 2026-02-13T04:47:36.658Z
Learning: In cpp/src/dual_simplex/basis_updates.cpp, the `update()` method (both dense and sparse vector overloads) requires that `etilde` be non-empty as a precondition. An empty `etilde` would make the update operation ill-defined, so no defensive checks for `etilde.i.size() == 0` are needed when computing work estimates involving `std::log2(etilde.i.size())`.

} else {
settings.log.debug("Thread id %2d remaining %d variable %d branch %d status %d\n",
Expand Down Expand Up @@ -307,6 +310,7 @@ void strong_branching(const user_problem_t<i_t, f_t>& original_problem,
f_t root_obj,
const std::vector<variable_status_t>& root_vstatus,
const std::vector<f_t>& edge_norms,
f_t upper_bound,
pseudo_costs_t<i_t, f_t>& pc)
{
pc.resize(original_lp.num_cols);
Expand Down Expand Up @@ -432,6 +436,7 @@ void strong_branching(const user_problem_t<i_t, f_t>& original_problem,
root_soln,
root_vstatus,
edge_norms,
upper_bound,
pc);
}
}
Expand Down Expand Up @@ -786,6 +791,7 @@ template void strong_branching<int, double>(const user_problem_t<int, double>& o
double root_obj,
const std::vector<variable_status_t>& root_vstatus,
const std::vector<double>& edge_norms,
double upper_bound,
pseudo_costs_t<int, double>& pc);

#endif
Expand Down
1 change: 1 addition & 0 deletions cpp/src/branch_and_bound/pseudo_costs.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -527,6 +527,7 @@ void strong_branching(const user_problem_t<i_t, f_t>& original_problem,
f_t root_obj,
const std::vector<variable_status_t>& root_vstatus,
const std::vector<f_t>& edge_norms,
f_t upper_bound,
pseudo_costs_t<i_t, f_t>& pc);

} // namespace cuopt::linear_programming::dual_simplex
4 changes: 3 additions & 1 deletion cpp/src/mip_heuristics/solver.cu
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,10 @@ solution_t<i_t, f_t> mip_solver_t<i_t, f_t>::run_solver()
branch_and_bound_settings.knapsack_cuts = context.settings.knapsack_cuts;
branch_and_bound_settings.strong_chvatal_gomory_cuts =
context.settings.strong_chvatal_gomory_cuts;
// default is reduced cost strengthening ON
branch_and_bound_settings.reduced_cost_strengthening =
context.settings.reduced_cost_strengthening;
context.settings.reduced_cost_strengthening < 0 ? 2
: context.settings.reduced_cost_strengthening;
branch_and_bound_settings.cut_change_threshold = context.settings.cut_change_threshold;
branch_and_bound_settings.cut_min_orthogonality = context.settings.cut_min_orthogonality;
branch_and_bound_settings.mip_batch_pdlp_strong_branching =
Expand Down
Loading