problem
Closed this issue · 3 comments
changlinsen commented
What should I do when fx, gx and hx come from one calculation
mzaffalon commented
You can use fx and gx in the computation of the objective function directly, see https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/. As far as I understand, support for the hessian is only available through the preconditioner, but I have never used so I don't know what to recommend.
odow commented
What should I do when fx, gx and hx come from one calculation
Use a closure or some sort of cache.
using NLopt
function f_and_g(x::Vector, ∇f::Vector, g::Vector, ∇g::Matrix)
# min sqrt(x[2])
fx = sqrt(x[2])
∇f[1] = 0.0
∇f[2] = 0.5 / sqrt(x[2])
# g_1 = (2 * x[1])^3 - x[2]
g[1] = (2 * x[1])^3 - x[2]
∇g[1, 1] = 3 * 2 * (2 * x[1])^2
∇g[2, 1] = -1.0
# g_2 = (-1 * x[1] + 1)^3 - x[2]
g[2] = (-1 * x[1] + 1)^3 - x[2]
∇g[1, 2] = 3 * -1 * (-1 * x[1] + 1)^2
∇g[2, 2] = -1.0
return fx
end
mutable struct Cache
last_x::Vector{Float64}
fx::Float64
∇f::Vector{Float64}
g::Vector{Float64}
∇g::Matrix{Float64}
function Cache(n::Int, m::Int)
return new(
fill(NaN, n),
0.0,
zeros(n),
zeros(m),
zeros(n, m),
)
end
end
function update_cache(cache::Cache, x::Vector)
if x != cache.last_x
cache.fx = f_and_g(x, cache.∇f, cache.g, cache.∇g)
cache.last_x .= copy(x)
end
return
end
function myfunc(cache::Cache, x::Vector, grad::Vector)
update_cache(cache, x)
if length(grad) > 0
grad .= cache.∇f
end
return cache.fx
end
function myconstraint(cache::Cache, x::Vector, grad::Vector, i::Int)
update_cache(cache, x)
if length(grad) > 0
grad .= cache.∇g[:, i]
end
return cache.g[i]
end
opt = Opt(:LD_MMA, 2)
cache = Cache(2, 2)
opt.lower_bounds = [-Inf, 0.0]
opt.xtol_rel = 1e-4
opt.min_objective = (x, grad) -> myfunc(cache, x, grad)
inequality_constraint!(opt, (x, g) -> myconstraint(cache, x, g, 1), 1e-8)
inequality_constraint!(opt, (x, g) -> myconstraint(cache, x, g, 2), 1e-8)
(minf, minx, ret) = optimize(opt, [1.234, 5.678])
odow commented
Closing as stale, and because it doesn't require any changes to NLopt.jl.