diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6aaa77f..aae738c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,13 +25,6 @@ jobs: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - uses: julia-actions/cache@v1 - - name: MOI - shell: julia --project=@. {0} - run: | - using Pkg - Pkg.add([ - PackageSpec(name="MathOptInterface", rev="bl/arraydiff"), - ]) - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 with: diff --git a/Project.toml b/Project.toml index 014259e..e45e9b1 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" [compat] DataStructures = "0.18, 0.19" diff --git a/src/ArrayDiff.jl b/src/ArrayDiff.jl index 1f08c1e..faf460f 100644 --- a/src/ArrayDiff.jl +++ b/src/ArrayDiff.jl @@ -10,25 +10,10 @@ import ForwardDiff import MathOptInterface as MOI const Nonlinear = MOI.Nonlinear import SparseArrays +import OrderedCollections: OrderedDict -""" - Mode() <: AbstractAutomaticDifferentiation - -Fork of `MOI.Nonlinear.SparseReverseMode` to add array support. -""" struct Mode <: MOI.Nonlinear.AbstractAutomaticDifferentiation end -function MOI.Nonlinear.Evaluator( - model::MOI.Nonlinear.Model, - ::Mode, - ordered_variables::Vector{MOI.VariableIndex}, -) - return MOI.Nonlinear.Evaluator( - model, - NLPEvaluator(model, ordered_variables), - ) -end - # Override basic math functions to return NaN instead of throwing errors. # This is what NLP solvers expect, and sometimes the results aren't needed # anyway, because the code may compute derivatives wrt constants. @@ -57,5 +42,23 @@ include("utils.jl") include("reverse_mode.jl") include("forward_over_reverse.jl") include("mathoptinterface_api.jl") +include("operators.jl") +include("model.jl") +include("parse.jl") +include("evaluator.jl") + +""" + Mode() <: AbstractAutomaticDifferentiation + +Fork of `MOI.Nonlinear.SparseReverseMode` to add array support. +""" + +function Evaluator( + model::ArrayDiff.Model, + ::Mode, + ordered_variables::Vector{MOI.VariableIndex}, +) + return Evaluator(model, NLPEvaluator(model, ordered_variables)) +end end # module diff --git a/src/evaluator.jl b/src/evaluator.jl new file mode 100644 index 0000000..35b30ab --- /dev/null +++ b/src/evaluator.jl @@ -0,0 +1,156 @@ +# Largely inspired by MathOptInterface/src/Nonlinear/parse.jl +# Most functions have been copy-pasted and slightly modified to adapt to small changes in OperatorRegistry and Model. + +function MOI.initialize(evaluator::Evaluator, features::Vector{Symbol}) + start_time = time() + empty!(evaluator.ordered_constraints) + evaluator.eval_objective_timer = 0.0 + evaluator.eval_objective_gradient_timer = 0.0 + evaluator.eval_constraint_timer = 0.0 + evaluator.eval_constraint_gradient_timer = 0.0 + evaluator.eval_constraint_jacobian_timer = 0.0 + evaluator.eval_hessian_objective_timer = 0.0 + evaluator.eval_hessian_constraint_timer = 0.0 + evaluator.eval_hessian_lagrangian_timer = 0.0 + append!(evaluator.ordered_constraints, keys(evaluator.model.constraints)) + # Every backend supports :ExprGraph, so don't forward it. + filter!(f -> f != :ExprGraph, features) + if evaluator.backend !== nothing + MOI.initialize(evaluator.backend, features) + elseif !isempty(features) + @assert evaluator.backend === nothing # ==> ExprGraphOnly used + error( + "Unable to initialize `Nonlinear.Evaluator` because the " * + "following features are not supported: $features", + ) + end + evaluator.initialize_timer = time() - start_time + return +end + +function MOI.eval_objective(evaluator::Evaluator, x) + start = time() + obj = MOI.eval_objective(evaluator.backend, x) + evaluator.eval_objective_timer += time() - start + return obj +end + +function MOI.eval_objective_gradient(evaluator::Evaluator, g, x) + start = time() + MOI.eval_objective_gradient(evaluator.backend, g, x) + evaluator.eval_objective_gradient_timer += time() - start + return +end + +function MOI.eval_constraint_gradient(evaluator::Evaluator, ∇g, x, i) + start = time() + MOI.eval_constraint_gradient(evaluator.backend, ∇g, x, i) + evaluator.eval_constraint_gradient_timer += time() - start + return +end + +function MOI.constraint_gradient_structure(evaluator::Evaluator, i) + return MOI.constraint_gradient_structure(evaluator.backend, i) +end + +function MOI.eval_constraint(evaluator::Evaluator, g, x) + start = time() + MOI.eval_constraint(evaluator.backend, g, x) + evaluator.eval_constraint_timer += time() - start + return +end + +function MOI.jacobian_structure(evaluator::Evaluator) + return MOI.jacobian_structure(evaluator.backend) +end + +function MOI.eval_constraint_jacobian(evaluator::Evaluator, J, x) + start = time() + MOI.eval_constraint_jacobian(evaluator.backend, J, x) + evaluator.eval_constraint_jacobian_timer += time() - start + return +end + +function MOI.hessian_objective_structure(evaluator::Evaluator) + return MOI.hessian_objective_structure(evaluator.backend) +end + +function MOI.hessian_constraint_structure(evaluator::Evaluator, i) + return MOI.hessian_constraint_structure(evaluator.backend, i) +end + +function MOI.hessian_lagrangian_structure(evaluator::Evaluator) + return MOI.hessian_lagrangian_structure(evaluator.backend) +end + +function MOI.eval_hessian_objective(evaluator::Evaluator, H, x) + start = time() + MOI.eval_hessian_objective(evaluator.backend, H, x) + evaluator.eval_hessian_objective_timer += time() - start + return +end + +function MOI.eval_hessian_constraint(evaluator::Evaluator, H, x, i) + start = time() + MOI.eval_hessian_constraint(evaluator.backend, H, x, i) + evaluator.eval_hessian_constraint_timer += time() - start + return +end + +function MOI.eval_hessian_lagrangian(evaluator::Evaluator, H, x, σ, μ) + start = time() + MOI.eval_hessian_lagrangian(evaluator.backend, H, x, σ, μ) + evaluator.eval_hessian_lagrangian_timer += time() - start + return +end + +function MOI.eval_constraint_jacobian_product(evaluator::Evaluator, y, x, w) + start = time() + MOI.eval_constraint_jacobian_product(evaluator.backend, y, x, w) + evaluator.eval_constraint_jacobian_timer += time() - start + return +end + +function MOI.eval_constraint_jacobian_transpose_product( + evaluator::Evaluator, + y, + x, + w, +) + start = time() + MOI.eval_constraint_jacobian_transpose_product(evaluator.backend, y, x, w) + evaluator.eval_constraint_jacobian_timer += time() - start + return +end + +function MOI.eval_hessian_lagrangian_product( + evaluator::Evaluator, + H, + x, + v, + σ, + μ, +) + start = time() + MOI.eval_hessian_lagrangian_product(evaluator.backend, H, x, v, σ, μ) + evaluator.eval_hessian_lagrangian_timer += time() - start + return +end + +function eval_univariate_hessian( + registry::OperatorRegistry, + id::Integer, + x::T, +) where {T} + if id <= registry.univariate_user_operator_start + ret = Nonlinear._eval_univariate_2nd_deriv(id, x) + if ret === nothing + op = registry.univariate_operators[id] + error("Hessian is not defined for operator $op") + end + return ret::T + end + offset = id - registry.univariate_user_operator_start + operator = registry.registered_univariate_operators[offset] + return eval_univariate_hessian(operator, x) +end diff --git a/src/forward_over_reverse.jl b/src/forward_over_reverse.jl index ea4c7e2..f777865 100644 --- a/src/forward_over_reverse.jl +++ b/src/forward_over_reverse.jl @@ -324,7 +324,7 @@ function _forward_eval_ϵ( d.user_output_buffer, n_children, ) - has_hessian = Nonlinear.eval_multivariate_hessian( + has_hessian = eval_multivariate_hessian( d.data.operators, d.data.operators.multivariate_operators[node.index], H, @@ -351,7 +351,7 @@ function _forward_eval_ϵ( end elseif node.type == Nonlinear.NODE_CALL_UNIVARIATE @inbounds child_idx = children_arr[ex.adj.colptr[k]] - f′′ = Nonlinear.eval_univariate_hessian( + f′′ = eval_univariate_hessian( d.data.operators, node.index, ex.forward_storage[child_idx], diff --git a/src/model.jl b/src/model.jl new file mode 100644 index 0000000..c1f9480 --- /dev/null +++ b/src/model.jl @@ -0,0 +1,85 @@ +# Largely inspired by MathOptInterface/src/Nonlinear/model.jl +# Most functions have been copy-pasted and slightly modified to adapt to small changes in OperatorRegistry and Model. + +function set_objective(model::Model, obj) + model.objective = parse_expression(model, obj) + return +end + +function add_constraint( + model::Model, + func, + set::Union{ + MOI.GreaterThan{Float64}, + MOI.LessThan{Float64}, + MOI.Interval{Float64}, + MOI.EqualTo{Float64}, + }, +) + f = parse_expression(model, func) + model.last_constraint_index += 1 + index = MOI.Nonlinear.ConstraintIndex(model.last_constraint_index) + model.constraints[index] = MOI.Nonlinear.Constraint(f, set) + return index +end + +function add_parameter(model::Model, value::Float64) + push!(model.parameters, value) + return MOI.Nonlinear.ParameterIndex(length(model.parameters)) +end + +function add_expression(model::Model, expr) + push!(model.expressions, parse_expression(model, expr)) + return Nonlinear.ExpressionIndex(length(model.expressions)) +end + +function Base.getindex(model::Model, index::Nonlinear.ExpressionIndex) + return model.expressions[index.value] +end + +function register_operator(model::Model, op::Symbol, nargs::Int, f::Function...) + return register_operator(model.operators, op, nargs, f...) +end + +function register_operator( + registry::OperatorRegistry, + op::Symbol, + nargs::Int, + f::Function..., +) + if nargs == 1 + if haskey(registry.univariate_operator_to_id, op) + error("Operator $op is already registered.") + elseif haskey(registry.multivariate_operator_to_id, op) + error("Operator $op is already registered.") + end + operator = Nonlinear._UnivariateOperator(op, f...) + push!(registry.univariate_operators, op) + push!(registry.registered_univariate_operators, operator) + registry.univariate_operator_to_id[op] = + length(registry.univariate_operators) + else + if haskey(registry.multivariate_operator_to_id, op) + error("Operator $op is already registered.") + elseif haskey(registry.univariate_operator_to_id, op) + error("Operator $op is already registered.") + end + operator = Nonlinear._MultivariateOperator{nargs}(op, f...) + push!(registry.multivariate_operators, op) + push!(registry.registered_multivariate_operators, operator) + registry.multivariate_operator_to_id[op] = + length(registry.multivariate_operators) + end + return +end + +function features_available(evaluator::Evaluator) + features = Symbol[] + if evaluator.backend !== nothing + append!(features, MOI.features_available(evaluator.backend)) + end + if !(:ExprGraph in features) + push!(features, :ExprGraph) + end + return features +end diff --git a/src/operators.jl b/src/operators.jl new file mode 100644 index 0000000..f293928 --- /dev/null +++ b/src/operators.jl @@ -0,0 +1,276 @@ +# Largely inspired by MathOptInterface/src/Nonlinear/operators.jl +# Most functions have been copy-pasted and slightly modified to adapt to small changes in OperatorRegistry and Model. + +const DEFAULT_MULTIVARIATE_OPERATORS = [ + :+, + :-, + :*, + :^, + :/, + :ifelse, + :atan, + :min, + :max, + :vect, + :dot, + :hcat, + :vcat, + :norm, + :sum, + :row, +] + +function eval_logic_function( + ::OperatorRegistry, + op::Symbol, + lhs::T, + rhs::T, +)::Bool where {T} + if op == :&& + return lhs && rhs + else + @assert op == :|| + return lhs || rhs + end +end + +function eval_multivariate_function( + registry::OperatorRegistry, + op::Symbol, + x::AbstractVector{T}, +) where {T} + if op == :+ + return sum(x; init = zero(T)) + elseif op == :- + @assert length(x) == 2 + return x[1] - x[2] + elseif op == :* + return prod(x; init = one(T)) + elseif op == :^ + @assert length(x) == 2 + # Use _nan_pow here to avoid throwing an error in common situations like + # (-1.0)^1.5. + return _nan_pow(x[1], x[2]) + elseif op == :/ + @assert length(x) == 2 + return x[1] / x[2] + elseif op == :ifelse + @assert length(x) == 3 + return ifelse(Bool(x[1]), x[2], x[3]) + elseif op == :atan + @assert length(x) == 2 + return atan(x[1], x[2]) + elseif op == :min + return minimum(x) + elseif op == :max + return maximum(x) + elseif op == :vect + return x + end + id = registry.multivariate_operator_to_id[op] + offset = id - registry.multivariate_user_operator_start + operator = registry.registered_multivariate_operators[offset] + @assert length(x) == operator.N + ret = operator.f(x) + MOI.Nonlinear.check_return_type(T, ret) + return ret::T +end + +function eval_multivariate_hessian( + registry::OperatorRegistry, + op::Symbol, + H, + x::AbstractVector{T}, +) where {T} + if op in (:+, :-, :ifelse) + return false + end + if op == :* + # f(x) = *(x[i] for i in 1:N) + # + # ∇fᵢ(x) = *(x[j] for j in 1:N if i != j) + # + # ∇fᵢⱼ(x) = *(x[k] for k in 1:N if i != k & j != k) + N = length(x) + if N == 1 + # Hessian is zero + elseif N == 2 + H[2, 1] = one(T) + else + for i in 1:N, j in (i+1):N + H[j, i] = + prod(x[k] for k in 1:N if k != i && k != j; init = one(T)) + end + end + elseif op == :^ + # f(x) = x[1]^x[2] + # + # ∇f(x) = x[2]*x[1]^(x[2]-1) + # x[1]^x[2]*log(x[1]) + # + # ∇²f(x) = x[2]*(x[2]-1)*x[1]^(x[2]-2) + # x[1]^(x[2]-1)*(x[2]*log(x[1])+1) x[1]^x[2]*log(x[1])^2 + ln = x[1] > 0 ? log(x[1]) : NaN + if x[2] == one(T) + H[2, 1] = _nan_to_zero(ln + one(T)) + H[2, 2] = _nan_to_zero(x[1] * ln^2) + elseif x[2] == T(2) + H[1, 1] = T(2) + H[2, 1] = _nan_to_zero(x[1] * (T(2) * ln + one(T))) + H[2, 2] = _nan_to_zero(ln^2 * x[1]^2) + else + H[1, 1] = _nan_to_zero(x[2] * (x[2] - 1) * _nan_pow(x[1], x[2] - 2)) + H[2, 1] = _nan_to_zero(_nan_pow(x[1], x[2] - 1) * (x[2] * ln + 1)) + H[2, 2] = _nan_to_zero(ln^2 * _nan_pow(x[1], x[2])) + end + elseif op == :/ + # f(x) = x[1]/x[2] + # + # ∇f(x) = 1/x[2] + # -x[1]/x[2]^2 + # + # ∇²(x) = 0.0 + # -1/x[2]^2 2x[1]/x[2]^3 + d = 1 / x[2]^2 + H[2, 1] = -d + H[2, 2] = 2 * x[1] * d / x[2] + elseif op == :atan + # f(x) = atan(y, x) + # + # ∇f(x) = +x/(x^2+y^2) + # -y/(x^2+y^2) + # + # ∇²(x) = -(2xy)/(x^2+y^2)^2 + # (y^2-x^2)/(x^2+y^2)^2 (2xy)/(x^2+y^2)^2 + base = (x[1]^2 + x[2]^2)^2 + H[1, 1] = -2 * x[2] * x[1] / base + H[2, 1] = (x[1]^2 - x[2]^2) / base + H[2, 2] = 2 * x[2] * x[1] / base + elseif op == :min + _, i = findmin(x) + H[i, i] = one(T) + elseif op == :max + _, i = findmax(x) + H[i, i] = one(T) + else + id = registry.multivariate_operator_to_id[op] + offset = id - registry.multivariate_user_operator_start + operator = registry.registered_multivariate_operators[offset] + if operator.∇²f === nothing + error("Hessian is not defined for operator $op") + end + @assert length(x) == operator.N + operator.∇²f(H, x) + end + return true +end + +function eval_univariate_function_and_gradient( + registry::OperatorRegistry, + id::Integer, + x::T, +) where {T} + if id <= registry.univariate_user_operator_start + return Nonlinear._eval_univariate(id, x)::Tuple{T,T} + end + offset = id - registry.univariate_user_operator_start + operator = registry.registered_univariate_operators[offset] + return Nonlinear.eval_univariate_function_and_gradient(operator, x) +end + +function eval_multivariate_gradient( + registry::OperatorRegistry, + op::Symbol, + g::AbstractVector{T}, + x::AbstractVector{T}, +) where {T} + @assert length(g) == length(x) + if op == :+ + fill!(g, one(T)) + elseif op == :- + g[1] = one(T) + g[2] = -one(T) + elseif op == :* + # Special case performance optimizations for common cases. + if length(x) == 1 + g[1] = one(T) + elseif length(x) == 2 + g[1] = x[2] + g[2] = x[1] + else + total = prod(x) + if iszero(total) + for i in eachindex(x) + g[i] = prod(x[j] for j in eachindex(x) if i != j) + end + else + for i in eachindex(x) + g[i] = total / x[i] + end + end + end + elseif op == :^ + @assert length(x) == 2 + if x[2] == one(T) + g[1] = one(T) + elseif x[2] == T(2) + g[1] = T(2) * x[1] + else + g[1] = x[2] * _nan_pow(x[1], x[2] - one(T)) + end + if x[1] > zero(T) + g[2] = _nan_pow(x[1], x[2]) * log(x[1]) + else + g[2] = T(NaN) + end + elseif op == :/ + @assert length(x) == 2 + g[1] = one(T) / x[2] + g[2] = -x[1] / x[2]^2 + elseif op == :ifelse + @assert length(x) == 3 + g[1] = zero(T) # It doesn't matter what this is. + g[2] = x[1] == one(T) + g[3] = x[1] == zero(T) + elseif op == :atan + @assert length(x) == 2 + base = x[1]^2 + x[2]^2 + g[1] = x[2] / base + g[2] = -x[1] / base + elseif op == :min + fill!(g, zero(T)) + _, i = findmin(x) + g[i] = one(T) + elseif op == :max + fill!(g, zero(T)) + _, i = findmax(x) + g[i] = one(T) + else + id = registry.multivariate_operator_to_id[op] + offset = id - registry.multivariate_user_operator_start + operator = registry.registered_multivariate_operators[offset] + @assert length(x) == operator.N + operator.∇f(g, x) + end + return +end + +function eval_comparison_function( + ::OperatorRegistry, + op::Symbol, + lhs::T, + rhs::T, +)::Bool where {T} + if op == :<= + return lhs <= rhs + elseif op == :>= + return lhs >= rhs + elseif op == :(==) + return lhs == rhs + elseif op == :< + return lhs < rhs + else + @assert op == :> + return lhs > rhs + end +end diff --git a/src/parse.jl b/src/parse.jl new file mode 100644 index 0000000..1b4db48 --- /dev/null +++ b/src/parse.jl @@ -0,0 +1,336 @@ +# Largely inspired by MathOptInterface/src/Nonlinear/parse.jl +# Most functions have been copy-pasted and slightly modified to adapt to small changes in OperatorRegistry and Model. + +function _parse_multivariate_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :call) + id = get(data.operators.multivariate_operator_to_id, x.args[1], nothing) + if id === nothing + if haskey(data.operators.univariate_operator_to_id, x.args[1]) + # It may also be a unary variate operator with splatting. + _parse_univariate_expression(stack, data, expr, x, parent_index) + elseif x.args[1] in data.operators.comparison_operators + # Or it may be a binary (in)equality operator. + _parse_inequality_expression(stack, data, expr, x, parent_index) + else + throw(MOI.UnsupportedNonlinearOperator(x.args[1])) + end + return + end + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_MULTIVARIATE, + id, + parent_index, + ), + ) + for i in length(x.args):-1:2 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function parse_expression( + ::Model, + expr::MOI.Nonlinear.Expression, + x::MOI.VariableIndex, + parent_index::Int, +) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_MOI_VARIABLE, + x.value, + parent_index, + ), + ) + return +end + +function parse_expression(data::Model, input) + expr = MOI.Nonlinear.Expression() + parse_expression(data, expr, input, -1) + return expr +end + +function parse_expression( + ::Model, + expr::MOI.Nonlinear.Expression, + x::Real, + parent_index::Int, +) + push!(expr.values, convert(Float64, x)::Float64) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_VALUE, + length(expr.values), + parent_index, + ), + ) + return +end + +function parse_expression( + ::Model, + expr::MOI.Nonlinear.Expression, + x::MOI.Nonlinear.ParameterIndex, + parent_index::Int, +) + push!( + expr.nodes, + MOI.Nonlinear.Node(MOI.Nonlinear.NODE_PARAMETER, x.value, parent_index), + ) + return +end + +function parse_expression( + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + stack = Tuple{Int,Any}[] + push!(stack, (parent_index, x)) + while !isempty(stack) + parent, item = pop!(stack) + if item isa Expr + _parse_expression(stack, data, expr, item, parent) + else + parse_expression(data, expr, item, parent) + end + end + return +end + +function parse_expression( + ::Model, + expr::Nonlinear.Expression, + x::Nonlinear.ExpressionIndex, + parent_index::Int, +) + push!( + expr.nodes, + Nonlinear.Node(Nonlinear.NODE_SUBEXPRESSION, x.value, parent_index), + ) + return +end + +function _parse_univariate_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :call, 2) + id = get(data.operators.univariate_operator_to_id, x.args[1], nothing) + if id === nothing + # It may also be a multivariate operator like * with one argument. + if haskey(data.operators.multivariate_operator_to_id, x.args[1]) + _parse_multivariate_expression(stack, data, expr, x, parent_index) + return + end + throw(MOI.UnsupportedNonlinearOperator(x.args[1])) + end + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_UNIVARIATE, + id, + parent_index, + ), + ) + push!(stack, (length(expr.nodes), x.args[2])) + return +end + +function _parse_logic_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + id = data.operators.logic_operator_to_id[x.head] + push!( + expr.nodes, + MOI.Nonlinear.Node(MOI.Nonlinear.NODE_LOGIC, id, parent_index), + ) + parent_var = length(expr.nodes) + push!(stack, (parent_var, x.args[2])) + push!(stack, (parent_var, x.args[1])) + return +end + +function _parse_expression(stack, data, expr, x, parent_index) + if Meta.isexpr(x, :call) + if length(x.args) == 2 && !Meta.isexpr(x.args[2], :...) + _parse_univariate_expression(stack, data, expr, x, parent_index) + else + # The call is either n-ary, or it is a splat, in which case we + # cannot tell just yet whether the expression is unary or nary. + # Punt to multivariate and try to recover later. + _parse_multivariate_expression(stack, data, expr, x, parent_index) + end + elseif Meta.isexpr(x, :comparison) + _parse_comparison_expression(stack, data, expr, x, parent_index) + elseif Meta.isexpr(x, :...) + MOI.Nonlinear._parse_splat_expression( + stack, + data, + expr, + x, + parent_index, + ) + elseif Meta.isexpr(x, :&&) || Meta.isexpr(x, :||) + _parse_logic_expression(stack, data, expr, x, parent_index) + elseif Meta.isexpr(x, :vect) + _parse_vect_expression(stack, data, expr, x, parent_index) + elseif Meta.isexpr(x, :hcat) + _parse_hcat_expression(stack, data, expr, x, parent_index) + elseif Meta.isexpr(x, :vcat) + _parse_vcat_expression(stack, data, expr, x, parent_index) + elseif Meta.isexpr(x, :row) + _parse_row_expression(stack, data, expr, x, parent_index) + else + error("Unsupported expression: $x") + end +end + +function _parse_comparison_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + for k in 2:2:(length(x.args)-1) + @assert x.args[k] == x.args[2] # don't handle a <= b >= c + end + operator_id = data.operators.comparison_operator_to_id[x.args[2]] + push!( + expr.nodes, + Nonlinear.Node(Nonlinear.NODE_COMPARISON, operator_id, parent_index), + ) + for i in length(x.args):-2:1 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function _parse_vect_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :vect) + id = get(data.operators.multivariate_operator_to_id, :vect, nothing) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_MULTIVARIATE, + id, + parent_index, + ), + ) + for i in length(x.args):-1:1 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function _parse_row_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :row) + id = get(data.operators.multivariate_operator_to_id, :row, nothing) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_MULTIVARIATE, + id, + parent_index, + ), + ) + for i in length(x.args):-1:1 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function _parse_hcat_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :hcat) + id = get(data.operators.multivariate_operator_to_id, :hcat, nothing) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_MULTIVARIATE, + id, + parent_index, + ), + ) + for i in length(x.args):-1:1 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function _parse_vcat_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::MOI.Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + @assert Meta.isexpr(x, :vcat) + id = get(data.operators.multivariate_operator_to_id, :vcat, nothing) + push!( + expr.nodes, + MOI.Nonlinear.Node( + MOI.Nonlinear.NODE_CALL_MULTIVARIATE, + id, + parent_index, + ), + ) + for i in length(x.args):-1:1 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end + +function _parse_inequality_expression( + stack::Vector{Tuple{Int,Any}}, + data::Model, + expr::Nonlinear.Expression, + x::Expr, + parent_index::Int, +) + operator_id = data.operators.comparison_operator_to_id[x.args[1]] + push!( + expr.nodes, + Nonlinear.Node(Nonlinear.NODE_COMPARISON, operator_id, parent_index), + ) + for i in length(x.args):-1:2 + push!(stack, (length(expr.nodes), x.args[i])) + end + return +end diff --git a/src/reverse_mode.jl b/src/reverse_mode.jl index 505421a..9bfba6e 100644 --- a/src/reverse_mode.jl +++ b/src/reverse_mode.jl @@ -29,6 +29,10 @@ single pass through the tree by iterating forwards through the vector of stored nodes. """ function _reverse_mode(d::NLPEvaluator, x) + # Because the operators are checked with `Int` and not `Symbol` + # if we get a model that didn't add our new operators but had user-defined + # operators, we will think that these are one of our new operators + @assert :vect in d.data.operators.multivariate_operators if d.last_x == x # Fail fast if the primal solution has not changed since last call. return @@ -357,12 +361,12 @@ function _forward_eval( f_input[r] = f.forward_storage[children_arr[i]] ∇f[r] = 0.0 end - f.forward_storage[k] = Nonlinear.eval_multivariate_function( + f.forward_storage[k] = eval_multivariate_function( operators, operators.multivariate_operators[node.index], f_input, ) - Nonlinear.eval_multivariate_gradient( + eval_multivariate_gradient( operators, operators.multivariate_operators[node.index], ∇f, @@ -387,7 +391,7 @@ function _forward_eval( @j f.forward_storage[k] = -val end else - ret_f, ret_f′ = Nonlinear.eval_univariate_function_and_gradient( + ret_f, ret_f′ = eval_univariate_function_and_gradient( operators, node.index, f.forward_storage[child_idx], @@ -402,7 +406,7 @@ function _forward_eval( for r in 2:length(children_idx) lhs = children_arr[children_idx[r-1]] rhs = children_arr[children_idx[r]] - result &= Nonlinear.eval_comparison_function( + result &= eval_comparison_function( operators, operators.comparison_operators[node.index], f.forward_storage[lhs], @@ -416,7 +420,7 @@ function _forward_eval( children_idx = SparseArrays.nzrange(f.adj, k) lhs = children_arr[children_idx[1]] rhs = children_arr[children_idx[2]] - f.forward_storage[k] = Nonlinear.eval_logic_function( + f.forward_storage[k] = eval_logic_function( operators, operators.logic_operators[node.index], f.forward_storage[lhs] == 1, @@ -452,9 +456,8 @@ function _reverse_eval(f::_SubexpressionStorage) node = f.nodes[k] children_indices = SparseArrays.nzrange(f.adj, k) if node.type == MOI.Nonlinear.NODE_CALL_MULTIVARIATE - if node.index in - eachindex(MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS) - op = MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS[node.index] + if node.index in eachindex(DEFAULT_MULTIVARIATE_OPERATORS) + op = DEFAULT_MULTIVARIATE_OPERATORS[node.index] if op == :* if f.sizes.ndims[k] != 0 # Node `k` is not scalar, so we do matrix multiplication diff --git a/src/sizes.jl b/src/sizes.jl index 067736b..819cf5b 100644 --- a/src/sizes.jl +++ b/src/sizes.jl @@ -163,14 +163,11 @@ function _infer_sizes( children_indices = SparseArrays.nzrange(adj, k) N = length(children_indices) if node.type == Nonlinear.NODE_CALL_MULTIVARIATE - if !( - node.index in - eachindex(MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS) - ) + if !(node.index in eachindex(DEFAULT_MULTIVARIATE_OPERATORS)) # TODO user-defined operators continue end - op = MOI.Nonlinear.DEFAULT_MULTIVARIATE_OPERATORS[node.index] + op = DEFAULT_MULTIVARIATE_OPERATORS[node.index] if op == :vect _assert_scalar_children( sizes, diff --git a/src/types.jl b/src/types.jl index 4f1f19a..9f5bd2c 100644 --- a/src/types.jl +++ b/src/types.jl @@ -133,6 +133,162 @@ struct _FunctionStorage end end +struct OperatorRegistry + # NODE_CALL_UNIVARIATE + univariate_operators::Vector{Symbol} + univariate_operator_to_id::Dict{Symbol,Int} + univariate_user_operator_start::Int + registered_univariate_operators::Vector{MOI.Nonlinear._UnivariateOperator} + # NODE_CALL_MULTIVARIATE + multivariate_operators::Vector{Symbol} + multivariate_operator_to_id::Dict{Symbol,Int} + multivariate_user_operator_start::Int + registered_multivariate_operators::Vector{ + MOI.Nonlinear._MultivariateOperator, + } + # NODE_LOGIC + logic_operators::Vector{Symbol} + logic_operator_to_id::Dict{Symbol,Int} + # NODE_COMPARISON + comparison_operators::Vector{Symbol} + comparison_operator_to_id::Dict{Symbol,Int} + function OperatorRegistry() + univariate_operators = copy(MOI.Nonlinear.DEFAULT_UNIVARIATE_OPERATORS) + multivariate_operators = copy(DEFAULT_MULTIVARIATE_OPERATORS) + logic_operators = [:&&, :||] + comparison_operators = [:<=, :(==), :>=, :<, :>] + return new( + # NODE_CALL_UNIVARIATE + univariate_operators, + Dict{Symbol,Int}( + op => i for (i, op) in enumerate(univariate_operators) + ), + length(univariate_operators), + MOI.Nonlinear._UnivariateOperator[], + # NODE_CALL + multivariate_operators, + Dict{Symbol,Int}( + op => i for (i, op) in enumerate(multivariate_operators) + ), + length(multivariate_operators), + MOI.Nonlinear._MultivariateOperator[], + # NODE_LOGIC + logic_operators, + Dict{Symbol,Int}(op => i for (i, op) in enumerate(logic_operators)), + # NODE_COMPARISON + comparison_operators, + Dict{Symbol,Int}( + op => i for (i, op) in enumerate(comparison_operators) + ), + ) + end +end + +""" + Model() + +The core datastructure for representing a nonlinear optimization problem. + +It has the following fields: + * `objective::Union{Nothing,Expression}` : holds the nonlinear objective + function, if one exists, otherwise `nothing`. + * `expressions::Vector{Expression}` : a vector of expressions in the model. + * `constraints::OrderedDict{ConstraintIndex,Constraint}` : a map from + [`ConstraintIndex`](@ref) to the corresponding [`Constraint`](@ref). An + `OrderedDict` is used instead of a `Vector` to support constraint deletion. + * `parameters::Vector{Float64}` : holds the current values of the parameters. + * `operators::OperatorRegistry` : stores the operators used in the model. +""" +mutable struct Model + objective::Union{Nothing,MOI.Nonlinear.Expression} + expressions::Vector{MOI.Nonlinear.Expression} + constraints::OrderedDict{ + MOI.Nonlinear.ConstraintIndex, + MOI.Nonlinear.Constraint, + } + parameters::Vector{Float64} + operators::OperatorRegistry + # This is a private field, used only to increment the ConstraintIndex. + last_constraint_index::Int64 + function Model() + model = new( + nothing, + MOI.Nonlinear.Expression[], + OrderedDict{ + MOI.Nonlinear.ConstraintIndex, + MOI.Nonlinear.Constraint, + }(), + Float64[], + OperatorRegistry(), + 0, + ) + return model + end +end + +mutable struct Evaluator{B} <: MOI.AbstractNLPEvaluator + # The internal datastructure. + model::Model + # The abstract-differentiation backend + backend::B + # ordered_constraints is needed because `OrderedDict` doesn't support + # looking up a key by the linear index. + ordered_constraints::Vector{MOI.Nonlinear.ConstraintIndex} + # Storage for the NLPBlockDual, so that we can query the dual of individual + # constraints without needing to query the full vector each time. + constraint_dual::Vector{Float64} + # Timers + initialize_timer::Float64 + eval_objective_timer::Float64 + eval_constraint_timer::Float64 + eval_objective_gradient_timer::Float64 + eval_constraint_gradient_timer::Float64 + eval_constraint_jacobian_timer::Float64 + eval_hessian_objective_timer::Float64 + eval_hessian_constraint_timer::Float64 + eval_hessian_lagrangian_timer::Float64 + + function Evaluator( + model::Model, + backend::B = nothing, + ) where {B<:Union{Nothing,MOI.AbstractNLPEvaluator}} + return new{B}( + model, + backend, + MOI.ConstraintIndex[], + Float64[], + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + 0.0, + ) + end +end + +""" + MOI.NLPBlockData(evaluator::Evaluator) + +Create an [`MOI.NLPBlockData`](@ref) object from an [`Evaluator`](@ref) +object. +""" +function MOI.NLPBlockData(evaluator::Evaluator) + return MOI.NLPBlockData( + [_bound(c.set) for (_, c) in evaluator.model.constraints], + evaluator, + evaluator.model.objective !== nothing, + ) +end + +_bound(s::MOI.LessThan) = MOI.NLPBoundsPair(-Inf, s.upper) +_bound(s::MOI.GreaterThan) = MOI.NLPBoundsPair(s.lower, Inf) +_bound(s::MOI.EqualTo) = MOI.NLPBoundsPair(s.value, s.value) +_bound(s::MOI.Interval) = MOI.NLPBoundsPair(s.lower, s.upper) + """ NLPEvaluator( model::Nonlinear.Model, @@ -146,7 +302,7 @@ interface. Before using, you must initialize the evaluator using `MOI.initialize`. """ mutable struct NLPEvaluator <: MOI.AbstractNLPEvaluator - data::Nonlinear.Model + data::Model ordered_variables::Vector{MOI.VariableIndex} objective::Union{Nothing,_FunctionStorage} @@ -182,7 +338,7 @@ mutable struct NLPEvaluator <: MOI.AbstractNLPEvaluator max_chunk::Int # chunk size for which we've allocated storage function NLPEvaluator( - data::Nonlinear.Model, + data::Model, ordered_variables::Vector{MOI.VariableIndex}, ) return new(data, ordered_variables) diff --git a/test/ArrayDiff.jl b/test/ArrayDiff.jl index 4b90f3b..1af6554 100644 --- a/test/ArrayDiff.jl +++ b/test/ArrayDiff.jl @@ -22,10 +22,10 @@ function runtests() end function test_objective_dot_univariate() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - Nonlinear.set_objective(model, :(dot([$x], [$x]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.set_objective(model, :(dot([$x], [$x]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Hess]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 1, 0, 1, 0] @@ -41,10 +41,10 @@ function test_objective_dot_univariate() end function test_objective_dot_univariate_and_scalar_mult() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - Nonlinear.set_objective(model, :(2*(dot([$x], [$x])))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.set_objective(model, :(2*(dot([$x], [$x])))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 0, 0, 1, 0, 1, 0] @@ -60,14 +60,14 @@ function test_objective_dot_univariate_and_scalar_mult() end function test_objective_dot_bivariate() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - Nonlinear.set_objective( + ArrayDiff.set_objective( model, :(dot([$x, $y] - [1, 2], -[1, 2] + [$x, $y])), ) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0] @@ -84,13 +84,13 @@ function test_objective_dot_bivariate() end function test_objective_hcat_scalars() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(dot([$x1 $x3], [$x2 $x4]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(dot([$x1 $x3], [$x2 $x4]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 0, 0, 2, 0, 0] @@ -109,16 +109,16 @@ function test_objective_hcat_scalars() end function test_objective_hcat_vectors() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective( + ArrayDiff.set_objective( model, :(dot(hcat([$x1], [$x3]), hcat([$x2], [$x4]))), ) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 1, 0, 1, 0, 2, 1, 0, 1, 0] @@ -137,11 +137,11 @@ function test_objective_hcat_vectors() end function test_objective_dot_bivariate_on_rows() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - Nonlinear.set_objective(model, :(dot([$x $y] - [1 2], -[1 2] + [$x $y]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + ArrayDiff.set_objective(model, :(dot([$x $y] - [1 2], -[1 2] + [$x $y]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 0, 0, 2, 0, 0, 2, 2, 2, 0, 0, 2, 0, 0] @@ -159,10 +159,10 @@ function test_objective_dot_bivariate_on_rows() end function test_objective_norm_univariate() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - Nonlinear.set_objective(model, :(norm([$x]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.set_objective(model, :(norm([$x]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 1, 0] @@ -178,11 +178,11 @@ function test_objective_norm_univariate() end function test_objective_norm_bivariate() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - Nonlinear.set_objective(model, :(norm([$x, $y]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + ArrayDiff.set_objective(model, :(norm([$x, $y]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 1, 0, 0] @@ -203,11 +203,11 @@ function test_objective_norm_bivariate() end function test_objective_norm_of_row_vector() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) - Nonlinear.set_objective(model, :(norm([$x1 $x2]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2]) + ArrayDiff.set_objective(model, :(norm([$x1 $x2]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 0, 0] @@ -224,13 +224,13 @@ function test_objective_norm_of_row_vector() end function test_objective_norm_of_vcat_vector() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm(vcat($x1, $x3)))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm(vcat($x1, $x3)))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 0, 0] @@ -249,13 +249,13 @@ function test_objective_norm_of_vcat_vector() end function test_objective_norm_of_vcat_matrix() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm(vcat([$x1 $x3], [$x2 $x4])))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm(vcat([$x1 $x3], [$x2 $x4])))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 0, 0, 2, 0, 0] @@ -279,11 +279,11 @@ function test_objective_norm_of_vcat_matrix() end function test_objective_norm_of_row() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) - Nonlinear.set_objective(model, :(norm(row($x1, $x2)))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2]) + ArrayDiff.set_objective(model, :(norm(row($x1, $x2)))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 0, 0] @@ -300,13 +300,13 @@ function test_objective_norm_of_row() end function test_objective_norm_of_matrix() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm([$x1 $x2; $x3 $x4]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm([$x1 $x2; $x3 $x4]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 0, 0, 2, 0, 0] @@ -330,13 +330,13 @@ function test_objective_norm_of_matrix() end function test_objective_norm_of_matrix_with_sum() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm([$x1 $x2; $x3 $x4] - [1 1; 1 1]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm([$x1 $x2; $x3 $x4] - [1 1; 1 1]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 2, 0, 0, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0] @@ -357,13 +357,13 @@ function test_objective_norm_of_matrix_with_sum() end function test_objective_norm_of_product_of_matrices() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm([$x1 $x2; $x3 $x4] * [1 0; 0 1]))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm([$x1 $x2; $x3 $x4] * [1 0; 0 1]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 2, 0, 0, 2, 0, 0, 2, 2, 0, 0, 2, 0, 0] @@ -389,16 +389,16 @@ function test_objective_norm_of_product_of_matrices() end function test_objective_norm_of_product_of_matrices_with_sum() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective( + ArrayDiff.set_objective( model, :(norm(([$x1 $x2; $x3 $x4] + [1 1; 1 1]) * [1 0; 0 1])), ) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [ @@ -499,13 +499,13 @@ function test_objective_norm_of_product_of_matrices_with_sum() end function test_objective_norm_of_mtx_vector_product() - model = Nonlinear.Model() + model = ArrayDiff.Model() x1 = MOI.VariableIndex(1) x2 = MOI.VariableIndex(2) x3 = MOI.VariableIndex(3) x4 = MOI.VariableIndex(4) - Nonlinear.set_objective(model, :(norm(([$x1 $x2; $x3 $x4] * [1; 1])))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) + ArrayDiff.set_objective(model, :(norm([$x1 $x2; $x3 $x4] * [1; 1]))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x1, x2, x3, x4]) MOI.initialize(evaluator, [:Grad]) sizes = evaluator.backend.objective.expr.sizes @test sizes.ndims == [0, 2, 2, 2, 0, 0, 2, 0, 0, 2, 0, 0] diff --git a/test/Project.toml b/test/Project.toml index 7ed96af..d66f113 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -4,3 +4,4 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" \ No newline at end of file diff --git a/test/ReverseAD.jl b/test/ReverseAD.jl index 552ff3f..eb204fe 100644 --- a/test/ReverseAD.jl +++ b/test/ReverseAD.jl @@ -29,9 +29,9 @@ end function test_objective_quadratic_univariate() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :($x^2 + 1)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :($x^2 + 1)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2]) == 1.2^2 + 1 g = [NaN] @@ -59,10 +59,10 @@ end function test_objective_and_constraints_quadratic_univariate() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :($x^2 + 1)) - Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :($x^2 + 1)) + ArrayDiff.add_constraint(model, :($x^2), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2]) == 1.2^2 + 1 g = [NaN] @@ -96,9 +96,9 @@ end function test_objective_quadratic_multivariate() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :($x^2 + $x * $y + $y^2)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :($x^2 + $x * $y + $y^2)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2, 2.3]) == 1.2^2 + 1.2 * 2.3 + 2.3^2 g = [NaN, NaN] @@ -130,12 +130,12 @@ end function test_objective_quadratic_multivariate_subexpressions() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - ex = Nonlinear.add_expression(model, :($x^2)) - ey = Nonlinear.add_expression(model, :($y^2)) - exy = Nonlinear.add_expression(model, :($ex + $x * $y)) - Nonlinear.set_objective(model, :($exy + $ey)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ex = ArrayDiff.add_expression(model, :($x^2)) + ey = ArrayDiff.add_expression(model, :($y^2)) + exy = ArrayDiff.add_expression(model, :($ex + $x * $y)) + ArrayDiff.set_objective(model, :($exy + $ey)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) val = [1.2, 2.3] @test MOI.eval_objective(evaluator, val) == 1.2^2 + 1.2 * 2.3 + 2.3^2 @@ -175,9 +175,9 @@ end function test_objective_ifelse_comparison() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(ifelse(1 <= $x <= 2, $x^2, $y^2))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(ifelse(1 <= $x <= 2, $x^2, $y^2))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2, 2.3]) == 1.2^2 @test MOI.eval_objective(evaluator, [2.2, 2.3]) == 2.3^2 @@ -192,9 +192,9 @@ end function test_objective_ifelse_logic() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(ifelse(1 <= $x && $x <= 2, $x^2, $y^2))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(ifelse(1 <= $x && $x <= 2, $x^2, $y^2))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2, 2.3]) == 1.2^2 @test MOI.eval_objective(evaluator, [2.2, 2.3]) == 2.3^2 @@ -208,10 +208,10 @@ end function test_objective_parameter() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - p = Nonlinear.add_parameter(model, 1.2) - Nonlinear.set_objective(model, :($p * $x)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + p = ArrayDiff.add_parameter(model, 1.2) + ArrayDiff.set_objective(model, :($p * $x)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.3]) == 1.2 * 1.3 g = [NaN] @@ -221,13 +221,13 @@ function test_objective_parameter() end function test_objective_subexpression() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) input = :($x^2 + 1) - expr = Nonlinear.add_expression(model, input) - expr_2 = Nonlinear.add_expression(model, :($expr^2)) - Nonlinear.set_objective(model, :($expr_2^2)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + expr = ArrayDiff.add_expression(model, input) + expr_2 = ArrayDiff.add_expression(model, :($expr^2)) + ArrayDiff.set_objective(model, :($expr_2^2)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad]) @test MOI.eval_objective(evaluator, [1.3]) == ((1.3^2 + 1)^2)^2 g = [NaN] @@ -238,9 +238,9 @@ end function test_constraint_quadratic_univariate() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.add_constraint(model, :($x^2), MOI.LessThan(2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.add_constraint(model, :($x^2), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) g = [NaN] x_val = [1.2] @@ -264,9 +264,9 @@ end function test_constraint_quadratic_multivariate() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.add_constraint(model, :($x^2 + $x * $y + $y^2), MOI.LessThan(2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.add_constraint(model, :($x^2 + $x * $y + $y^2), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) g = [NaN] x_val = [1.2, 2.3] @@ -287,12 +287,12 @@ end function test_constraint_quadratic_multivariate_subexpressions() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - ex = Nonlinear.add_expression(model, :($x^2)) - ey = Nonlinear.add_expression(model, :($y^2)) - exy = Nonlinear.add_expression(model, :($ex + $x * $y)) - Nonlinear.add_constraint(model, :($exy + $ey), MOI.LessThan(2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ex = ArrayDiff.add_expression(model, :($x^2)) + ey = ArrayDiff.add_expression(model, :($y^2)) + exy = ArrayDiff.add_expression(model, :($ex + $x * $y)) + ArrayDiff.add_constraint(model, :($exy + $ey), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) g = [NaN] x_val = [1.2, 2.3] @@ -336,11 +336,11 @@ function test_hessian_sparsity_registered_function() H[2, 2] = 2 return end - model = Nonlinear.Model() - Nonlinear.register_operator(model, :f, 2, f, ∇f, ∇²f) - Nonlinear.set_objective(model, :(f($x, $z) + $y^2)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y, z]) - @test :Hess in MOI.features_available(evaluator) + model = ArrayDiff.Model() + ArrayDiff.register_operator(model, :f, 2, f, ∇f, ∇²f) + ArrayDiff.set_objective(model, :(f($x, $z) + $y^2)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y, z]) + @test :Hess in ArrayDiff.features_available(evaluator) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.hessian_lagrangian_structure(evaluator) == [(1, 1), (2, 2), (3, 3), (3, 1)] @@ -366,11 +366,11 @@ function test_hessian_sparsity_registered_rosenbrock() H[2, 2] = 200.0 return end - model = Nonlinear.Model() - Nonlinear.register_operator(model, :rosenbrock, 2, f, ∇f, ∇²f) - Nonlinear.set_objective(model, :(rosenbrock($x, $y))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) - @test :Hess in MOI.features_available(evaluator) + model = ArrayDiff.Model() + ArrayDiff.register_operator(model, :rosenbrock, 2, f, ∇f, ∇²f) + ArrayDiff.set_objective(model, :(rosenbrock($x, $y))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) + @test :Hess in ArrayDiff.features_available(evaluator) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.hessian_lagrangian_structure(evaluator) == [(1, 1), (2, 2), (2, 1)] @@ -396,10 +396,10 @@ function test_hessian_registered_error() H[2, 2] = 200.0 return end - model = Nonlinear.Model() - Nonlinear.register_operator(model, :rosenbrock, 2, f, ∇f, ∇²f) - Nonlinear.set_objective(model, :(rosenbrock($x, $y))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.register_operator(model, :rosenbrock, 2, f, ∇f, ∇²f) + ArrayDiff.set_objective(model, :(rosenbrock($x, $y))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) H = fill(NaN, 3) @test_throws( @@ -494,9 +494,9 @@ end function test_derivatives() a = MOI.VariableIndex(1) b = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(sin($a^2) + cos($b * 4) / 5 - 2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [a, b]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(sin($a^2) + cos($b * 4) / 5 - 2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [a, b]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) x = [2.0, 3.0] @test MOI.eval_objective(evaluator, x) == @@ -516,13 +516,13 @@ function test_derivatives() end function test_NLPBlockData() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - Nonlinear.add_constraint(model, :($x - 1), MOI.LessThan(0.0)) - Nonlinear.add_constraint(model, :($x - 2), MOI.GreaterThan(0.0)) - Nonlinear.add_constraint(model, :($x - 3), MOI.EqualTo(0.0)) - Nonlinear.add_constraint(model, :($x), MOI.Interval(4.0, 5.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.add_constraint(model, :($x - 1), MOI.LessThan(0.0)) + ArrayDiff.add_constraint(model, :($x - 2), MOI.GreaterThan(0.0)) + ArrayDiff.add_constraint(model, :($x - 3), MOI.EqualTo(0.0)) + ArrayDiff.add_constraint(model, :($x), MOI.Interval(4.0, 5.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) block = MOI.NLPBlockData(evaluator) @test block.constraint_bounds == [ MOI.NLPBoundsPair(-Inf, 0.0), @@ -540,8 +540,8 @@ function test_linearity() z = MOI.VariableIndex(3) variables = Dict(x => 1, y => 2, z => 3) function _test_linearity(input, test_value, IJ = [], indices = []) - model = Nonlinear.Model() - ex = Nonlinear.add_expression(model, input) + model = ArrayDiff.Model() + ex = ArrayDiff.add_expression(model, input) expr = model[ex] adj = Nonlinear.adjacency_matrix(expr.nodes) nodes = ArrayDiff._replace_moi_variables(expr.nodes, variables) @@ -631,10 +631,10 @@ end function test_linearity_no_hess() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - ex = Nonlinear.add_expression(model, :($x + 1)) - Nonlinear.set_objective(model, ex) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ex = ArrayDiff.add_expression(model, :($x + 1)) + ArrayDiff.set_objective(model, ex) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac]) # We initialized without the need for the hessian so # the linearity shouldn't be computed. @@ -647,9 +647,9 @@ function test_dual_forward() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) function _test_dual_forward(input, x_input, test_value) - model = Nonlinear.Model() - Nonlinear.set_objective(model, input) - evaluator = Nonlinear.Evaluator( + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, input) + evaluator = ArrayDiff.Evaluator( model, ArrayDiff.Mode(), MOI.VariableIndex[x, y], @@ -687,18 +687,18 @@ function test_gradient_registered_function() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) z = MOI.VariableIndex(3) - model = Nonlinear.Model() + model = ArrayDiff.Model() f(x, y) = (1 / 3) * y^3 - 2x^2 function ∇f(g, x, y) g[1] = -4x g[2] = y^2 return end - Nonlinear.register_operator(model, :Φ, 2, f, ∇f) - Nonlinear.register_operator(model, :c, 1, cos, x -> -sin(x), x -> -cos(x)) - Nonlinear.set_objective(model, :(Φ($y, $x - 1) * c($z))) + ArrayDiff.register_operator(model, :Φ, 2, f, ∇f) + ArrayDiff.register_operator(model, :c, 1, cos, x -> -sin(x), x -> -cos(x)) + ArrayDiff.set_objective(model, :(Φ($y, $x - 1) * c($z))) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y, z]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y, z]) MOI.initialize(evaluator, [:Grad]) ∇f = fill(NaN, 3) x_input = [2.0, 3.0, 4.0] @@ -714,13 +714,13 @@ end function test_gradient_jump_855() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective( + model = ArrayDiff.Model() + ArrayDiff.set_objective( model, :(ifelse($x <= 3.0, ($x - 2.0)^2, 2 * log($x - 2.0) + 1.0)), ) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) ∇f = fill(NaN, 1) MOI.eval_objective_gradient(evaluator, ∇f, [-1.0]) @@ -732,10 +732,10 @@ end function test_gradient_abs() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(abs($x))) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(abs($x))) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) ∇f = fill(NaN, 1) MOI.eval_objective_gradient(evaluator, ∇f, [2.0]) @@ -748,10 +748,10 @@ end function test_gradient_trig() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(sin($x^2) + cos($y * 4) / 5 - 2.0)) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(sin($x^2) + cos($y * 4) / 5 - 2.0)) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) MOI.initialize(evaluator, [:Grad]) ∇f = fill(NaN, 2) MOI.eval_objective_gradient(evaluator, ∇f, [2.0, 3.0]) @@ -761,10 +761,10 @@ end function test_gradient_logical() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :($x > 0.5 && $x < 0.9)) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :($x > 0.5 && $x < 0.9)) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) @test MOI.eval_objective(evaluator, [1.5]) == 0.0 ∇f = fill(NaN, 1) @@ -775,10 +775,10 @@ end function test_gradient_ifelse() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(ifelse($x >= 0.5 || $x < 0.1, $x, 5))) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(ifelse($x >= 0.5 || $x < 0.1, $x, 5))) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) @test MOI.eval_objective(evaluator, [1.5]) == 1.5 ∇f = fill(NaN, 1) @@ -795,10 +795,10 @@ end function test_gradient_sqrt_nan() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(sqrt($x))) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(sqrt($x))) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) @test isnan(MOI.eval_objective(evaluator, [-1.5])) ∇f = fill(Inf, 1) @@ -811,10 +811,10 @@ function test_gradient_variable_power() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) z = MOI.VariableIndex(3) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :((1 / $x)^$y - $z)) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :((1 / $x)^$y - $z)) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y, z]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y, z]) MOI.initialize(evaluator, [:Grad]) x_input = [2.5, 3.5, 1.0] @test MOI.eval_objective(evaluator, x_input) == (1 / 2.5)^3.5 - 1.0 @@ -830,11 +830,11 @@ end function test_single_parameter() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - p = Nonlinear.add_parameter(model, 105.2) - Nonlinear.set_objective(model, :($p)) + model = ArrayDiff.Model() + p = ArrayDiff.add_parameter(model, 105.2) + ArrayDiff.set_objective(model, :($p)) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x]) MOI.initialize(evaluator, [:Grad]) @test MOI.eval_objective(evaluator, [-0.1]) == 105.2 return @@ -843,12 +843,12 @@ end function test_gradient_nested_subexpressions() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - ex1 = Nonlinear.add_expression(model, :(sin($x^2) + cos($y * 4) / 5 - 2.0)) - ex2 = Nonlinear.add_expression(model, :($ex1)) - Nonlinear.set_objective(model, ex2) + model = ArrayDiff.Model() + ex1 = ArrayDiff.add_expression(model, :(sin($x^2) + cos($y * 4) / 5 - 2.0)) + ex2 = ArrayDiff.add_expression(model, :($ex1)) + ArrayDiff.set_objective(model, ex2) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) MOI.initialize(evaluator, [:Grad]) ∇f = fill(NaN, 2) MOI.eval_objective_gradient(evaluator, ∇f, [2.0, 3.0]) @@ -859,10 +859,10 @@ end function test_gradient_view() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(($x - 1)^2 + 4 * ($y - $x^2)^2)) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(($x - 1)^2 + 4 * ($y - $x^2)^2)) evaluator = - Nonlinear.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) + ArrayDiff.Evaluator(model, ArrayDiff.Mode(), MOI.VariableIndex[x, y]) MOI.initialize(evaluator, [:Grad]) X_input = [0.0; 1.0; 2.0; 3.0] for idx in [1:2, 3:4, [1, 3], 4:-2:2] @@ -902,10 +902,10 @@ function test_odd_chunks_Hessian_products() end function _test_odd_chunks_Hessian_products(N) - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:N) - Nonlinear.set_objective(model, Expr(:call, :*, x...)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + ArrayDiff.set_objective(model, Expr(:call, :*, x...)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(evaluator, [:Hess]) hessian_sparsity = MOI.hessian_lagrangian_structure(evaluator) V = zeros(length(hessian_sparsity)) @@ -929,13 +929,13 @@ function _dense_jacobian(jacobian_sparsity, V, m, n) end function test_jacobians_and_jacvec() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:3) a, b, c = x - Nonlinear.set_objective(model, :($a * $b + $c^2)) - Nonlinear.add_constraint(model, :($c * $b), MOI.LessThan(1.0)) - Nonlinear.add_constraint(model, :($a^2 / 2), MOI.LessThan(1.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + ArrayDiff.set_objective(model, :($a * $b + $c^2)) + ArrayDiff.add_constraint(model, :($c * $b), MOI.LessThan(1.0)) + ArrayDiff.add_constraint(model, :($a^2 / 2), MOI.LessThan(1.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(evaluator, [:Jac, :JacVec]) values = [1.0, 2.0, 3.0] # For a, b, c. jacobian_sparsity = MOI.jacobian_structure(evaluator) @@ -960,14 +960,14 @@ function test_jacobians_and_jacvec() end function test_jacobians_and_jacvec_with_subexpressions() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:3) a, b, c = x - bc = Nonlinear.add_expression(model, :($b * $c)) - Nonlinear.set_objective(model, :($a * $b + $c^2)) - Nonlinear.add_constraint(model, :($bc), MOI.LessThan(1.0)) - Nonlinear.add_constraint(model, :($a^2 / 2), MOI.LessThan(1.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + bc = ArrayDiff.add_expression(model, :($b * $c)) + ArrayDiff.set_objective(model, :($a * $b + $c^2)) + ArrayDiff.add_constraint(model, :($bc), MOI.LessThan(1.0)) + ArrayDiff.add_constraint(model, :($a^2 / 2), MOI.LessThan(1.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(evaluator, [:Jac, :JacVec]) values = [1.0, 2.0, 3.0] # For a, b, c. jacobian_sparsity = MOI.jacobian_structure(evaluator) @@ -993,9 +993,9 @@ end function test_pow_complex_result() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(ifelse($x > 0, $x^1.5, -(-$x)^1.5))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(ifelse($x > 0, $x^1.5, -(-$x)^1.5))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) x = [-2.0] @test MOI.eval_objective(evaluator, x) ≈ -(2^1.5) @@ -1012,10 +1012,10 @@ end function test_constraint_gradient() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = Nonlinear.Model() - Nonlinear.add_constraint(model, :($x^2 + $x * $y + $y^2), MOI.LessThan(2.0)) - Nonlinear.add_constraint(model, :(cos($y)), MOI.LessThan(2.0)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.add_constraint(model, :($x^2 + $x * $y + $y^2), MOI.LessThan(2.0)) + ArrayDiff.add_constraint(model, :(cos($y)), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac]) @test MOI.constraint_gradient_structure(evaluator, 1) == [1, 2] @test MOI.constraint_gradient_structure(evaluator, 2) == [2] @@ -1032,9 +1032,9 @@ end function test_hessian_length() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(log($x))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(log($x))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Hess]) H = Float64[] got, want = 0, 1 @@ -1050,9 +1050,9 @@ end function test_jacobian_length() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.add_constraint(model, :(sin($x)), MOI.LessThan(0.5)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.add_constraint(model, :(sin($x)), MOI.LessThan(0.5)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Jac]) J = Float64[] @test_throws BoundsError MOI.eval_constraint_jacobian(evaluator, J, [1.0]) @@ -1061,10 +1061,10 @@ end function test_timers() x = MOI.VariableIndex(1) - model = Nonlinear.Model() - Nonlinear.set_objective(model, :(log($x))) - Nonlinear.add_constraint(model, :(sin($x)), MOI.LessThan(0.5)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(log($x))) + ArrayDiff.add_constraint(model, :(sin($x)), MOI.LessThan(0.5)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) y = [1.2] g = [NaN] MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @@ -1101,10 +1101,10 @@ function test_timers() end function test_varying_length_x() - model = MOI.Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - MOI.Nonlinear.set_objective(model, :(sin($x))) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.set_objective(model, :(sin($x))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, Symbol[:Jac]) ∇f = [NaN] MOI.eval_objective_gradient(evaluator, ∇f, [1.0, 2.0]) @@ -1116,12 +1116,12 @@ end function test_univariate_operator_with_no_second_order() f(x::Float64) = x^2 df(x::Float64) = 2 * x - model = MOI.Nonlinear.Model() - MOI.Nonlinear.register_operator(model, :op_f, 1, f, df) + model = ArrayDiff.Model() + ArrayDiff.register_operator(model, :op_f, 1, f, df) x = MOI.VariableIndex(1) - MOI.Nonlinear.add_constraint(model, :(op_f($x)), MOI.LessThan(2.0)) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) - @test !(:Hess in MOI.features_available(evaluator)) + ArrayDiff.add_constraint(model, :(op_f($x)), MOI.LessThan(2.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) + @test !(:Hess in ArrayDiff.features_available(evaluator)) MOI.initialize(evaluator, [:Grad, :Jac]) J = zeros(length(MOI.jacobian_structure(evaluator))) MOI.eval_constraint_jacobian(evaluator, J, [2.0]) @@ -1130,9 +1130,9 @@ function test_univariate_operator_with_no_second_order() end function test_no_objective() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad]) @test_throws( ErrorException("No nonlinear objective."), @@ -1147,10 +1147,10 @@ function test_no_objective() end function test_x_power_1() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - MOI.Nonlinear.set_objective(model, :($x^1)) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + ArrayDiff.set_objective(model, :($x^1)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Hess]) @test MOI.eval_objective(evaluator, [2.0]) ≈ 2.0 H = [NaN] @@ -1160,11 +1160,11 @@ function test_x_power_1() end function test_variable_first_node_in_tape() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - expr = MOI.Nonlinear.add_expression(model, :($x)) - MOI.Nonlinear.set_objective(model, :(sin($expr))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + expr = ArrayDiff.add_expression(model, :($x)) + ArrayDiff.set_objective(model, :(sin($expr))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) H = [NaN] MOI.eval_hessian_lagrangian(evaluator, H, [2.0], 1.5, []) @@ -1173,12 +1173,12 @@ function test_variable_first_node_in_tape() end function test_subexpression_first_node_in_tape() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - expr = MOI.Nonlinear.add_expression(model, :($x)) - expr2 = MOI.Nonlinear.add_expression(model, :($expr)) - MOI.Nonlinear.set_objective(model, :(sin($expr2))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + expr = ArrayDiff.add_expression(model, :($x)) + expr2 = ArrayDiff.add_expression(model, :($expr)) + ArrayDiff.set_objective(model, :(sin($expr2))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) H = [NaN] MOI.eval_hessian_lagrangian(evaluator, H, [2.0], 1.5, []) @@ -1187,11 +1187,11 @@ function test_subexpression_first_node_in_tape() end function test_parameter_in_hessian() - model = Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex(1) - p = MOI.Nonlinear.add_parameter(model, 3.0) - MOI.Nonlinear.set_objective(model, :(sin($x + $p))) - evaluator = Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + p = ArrayDiff.add_parameter(model, 3.0) + ArrayDiff.set_objective(model, :(sin($x + $p))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) H = [NaN] MOI.eval_hessian_lagrangian(evaluator, H, [2.0], 1.5, []) @@ -1213,9 +1213,9 @@ end function test_classify_linearity_ifelse() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = MOI.Nonlinear.Model() - MOI.Nonlinear.set_objective(model, :(ifelse($y, $x, 1))) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :(ifelse($y, $x, 1))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.2, 1.0]) == 1.2 @test MOI.eval_objective(evaluator, [1.2, 0.0]) == 1.0 @@ -1226,9 +1226,9 @@ end function test_classify_linearity_logic() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = MOI.Nonlinear.Model() - MOI.Nonlinear.set_objective(model, :($x && $y)) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + ArrayDiff.set_objective(model, :($x && $y)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) @test MOI.eval_objective(evaluator, [1.0, 1.0]) == 1.0 @test MOI.eval_objective(evaluator, [0.0, 1.0]) == 0.0 @@ -1241,11 +1241,11 @@ end function test_hessian_sparsity_with_subexpressions() x = MOI.VariableIndex(1) y = MOI.VariableIndex(2) - model = MOI.Nonlinear.Model() - expr = MOI.Nonlinear.add_expression(model, :($x * $y)) - expr2 = MOI.Nonlinear.add_expression(model, :($expr)) - MOI.Nonlinear.set_objective(model, :(sin($expr2))) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x, y]) + model = ArrayDiff.Model() + expr = ArrayDiff.add_expression(model, :($x * $y)) + expr2 = ArrayDiff.add_expression(model, :($expr)) + ArrayDiff.set_objective(model, :(sin($expr2))) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x, y]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) MOI.hessian_lagrangian_structure(evaluator) return @@ -1253,14 +1253,14 @@ end function test_toposort_subexpressions() x = MOI.VariableIndex(1) - model = MOI.Nonlinear.Model() - a = MOI.Nonlinear.add_expression(model, :($x)) - b = MOI.Nonlinear.add_expression(model, :($x)) - c = MOI.Nonlinear.add_expression(model, :($a + $b)) - d = MOI.Nonlinear.add_expression(model, :($c + $b)) - MOI.Nonlinear.add_constraint(model, :($d), MOI.LessThan(1.0)) - MOI.Nonlinear.add_constraint(model, :($c), MOI.LessThan(1.0)) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), [x]) + model = ArrayDiff.Model() + a = ArrayDiff.add_expression(model, :($x)) + b = ArrayDiff.add_expression(model, :($x)) + c = ArrayDiff.add_expression(model, :($a + $b)) + d = ArrayDiff.add_expression(model, :($c + $b)) + ArrayDiff.add_constraint(model, :($d), MOI.LessThan(1.0)) + ArrayDiff.add_constraint(model, :($c), MOI.LessThan(1.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), [x]) MOI.initialize(evaluator, [:Grad, :Jac, :Hess]) g = [NaN, NaN] MOI.eval_constraint(evaluator, g, [2.0]) @@ -1269,20 +1269,20 @@ function test_toposort_subexpressions() end function test_eval_user_defined_operator_ForwardDiff_gradient!() - model = MOI.Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:4) - p = MOI.Nonlinear.add_parameter(model, 2.0) - ex = MOI.Nonlinear.add_expression(model, :($p * $(x[1]))) + p = ArrayDiff.add_parameter(model, 2.0) + ex = ArrayDiff.add_expression(model, :($p * $(x[1]))) ψ(x) = sin(x) t(x, y) = x + 3y - MOI.Nonlinear.register_operator(model, :ψ, 1, ψ) - MOI.Nonlinear.register_operator(model, :t, 2, t) - MOI.Nonlinear.add_constraint( + ArrayDiff.register_operator(model, :ψ, 1, ψ) + ArrayDiff.register_operator(model, :t, 2, t) + ArrayDiff.add_constraint( model, :($ex^3 + sin($(x[2])) / ψ($(x[2])) + t($(x[3]), $(x[4]))), MOI.LessThan(0.0), ) - d = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + d = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(d, [:Jac]) X = [1.1, 1.2, 1.3, 1.4] g = [NaN] @@ -1296,10 +1296,10 @@ function test_eval_user_defined_operator_ForwardDiff_gradient!() end function test_eval_user_defined_operator_type_mismatch() - model = MOI.Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:4) - p = MOI.Nonlinear.add_parameter(model, 2.0) - ex = MOI.Nonlinear.add_expression(model, :($p * $(x[1]))) + p = ArrayDiff.add_parameter(model, 2.0) + ex = ArrayDiff.add_expression(model, :($p * $(x[1]))) ψ(x) = sin(x) t(x, y) = x + 3y function ∇t(ret, x, y) @@ -1307,14 +1307,14 @@ function test_eval_user_defined_operator_type_mismatch() ret[2] = 3 // 1 # These are intentionally the wrong type return end - MOI.Nonlinear.register_operator(model, :ψ, 1, ψ, cos) - MOI.Nonlinear.register_operator(model, :t, 2, t, ∇t) - MOI.Nonlinear.add_constraint( + ArrayDiff.register_operator(model, :ψ, 1, ψ, cos) + ArrayDiff.register_operator(model, :t, 2, t, ∇t) + ArrayDiff.add_constraint( model, :($ex^3 + sin($(x[2])) / ψ($(x[2])) + t($(x[3]), $(x[4]))), MOI.LessThan(0.0), ) - d = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + d = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(d, [:Jac]) X = [1.1, 1.2, 1.3, 1.4] g = [NaN] @@ -1342,15 +1342,15 @@ function test_generate_hessian_slice_inner() end function test_hessian_reinterpret_unsafe() - model = MOI.Nonlinear.Model() + model = ArrayDiff.Model() x = MOI.VariableIndex.(1:5) - MOI.Nonlinear.add_constraint( + ArrayDiff.add_constraint( model, :(($(x[1]) + $(x[2])) * $(x[3])), MOI.EqualTo(0.0), ) - MOI.Nonlinear.add_constraint(model, :($(x[4]) * $(x[5])), MOI.EqualTo(1.0)) - evaluator = MOI.Nonlinear.Evaluator(model, ArrayDiff.Mode(), x) + ArrayDiff.add_constraint(model, :($(x[4]) * $(x[5])), MOI.EqualTo(1.0)) + evaluator = ArrayDiff.Evaluator(model, ArrayDiff.Mode(), x) MOI.initialize(evaluator, [:Hess]) H_s = MOI.hessian_lagrangian_structure(evaluator) H = zeros(length(H_s))