diff --git a/Project.toml b/Project.toml index 4313520e..894d4335 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "SummationByPartsOperators" uuid = "9f78cca6-572e-554e-b819-917d2f1cf240" author = ["Hendrik Ranocha"] -version = "0.5.63" +version = "0.5.64" [deps] ArgCheck = "dce04be8-c92d-5529-be00-80e4d2c0e197" diff --git a/ext/SummationByPartsOperatorsOptimForwardDiffExt.jl b/ext/SummationByPartsOperatorsOptimForwardDiffExt.jl index b4801d41..ca9a04b9 100644 --- a/ext/SummationByPartsOperatorsOptimForwardDiffExt.jl +++ b/ext/SummationByPartsOperatorsOptimForwardDiffExt.jl @@ -10,13 +10,14 @@ using SparseArrays: spzeros function SummationByPartsOperators.function_space_operator(basis_functions, nodes::Vector{T}, source::SourceOfCoefficients; derivative_order = 1, accuracy_order = 0, - options = Options(g_tol = 1e-14, iterations = 10000)) where {T, SourceOfCoefficients} + opt_alg = LBFGS(), options = Options(g_tol = 1e-14, iterations = 10000), + verbose = false) where {T, SourceOfCoefficients} if derivative_order != 1 throw(ArgumentError("Derivative order $derivative_order not implemented.")) end sort!(nodes) - weights, D = construct_function_space_operator(basis_functions, nodes, source; options = options) + weights, D = construct_function_space_operator(basis_functions, nodes, source; opt_alg = opt_alg, options = options, verbose = verbose) return MatrixDerivativeOperator(first(nodes), last(nodes), nodes, weights, D, accuracy_order, source) end @@ -98,7 +99,8 @@ end function construct_function_space_operator(basis_functions, nodes, ::GlaubitzNordströmÖffner2023; - options = Options(g_tol = 1e-14, iterations = 10000)) + opt_alg = LBFGS(), options = Options(g_tol = 1e-14, iterations = 10000), + verbose = false) K = length(basis_functions) N = length(nodes) L = div(N * (N - 1), 2) @@ -127,7 +129,8 @@ function construct_function_space_operator(basis_functions, nodes, x0 = zeros(L + N) fg!(F, G, x) = optimization_function_and_grad!(F, G, x, p) - result = optimize(Optim.only_fg!(fg!), x0, LBFGS(), options) + result = optimize(Optim.only_fg!(fg!), x0, opt_alg, options) + verbose && display(result) x = minimizer(result) sigma = x[1:L] diff --git a/src/function_space_operators.jl b/src/function_space_operators.jl index 081cd167..e450d412 100644 --- a/src/function_space_operators.jl +++ b/src/function_space_operators.jl @@ -37,7 +37,8 @@ end """ function_space_operator(basis_functions, nodes, source; derivative_order = 1, accuracy_order = 0, - options = Optim.Options(g_tol = 1e-14, iterations = 10000)) + opt_alg = Optim.LBFGS(), options = Optim.Options(g_tol = 1e-14, iterations = 10000), + verbose = false) Construct an operator that represents a first-derivative operator in a function space spanned by the `basis_functions`, which is an iterable of functions. The operator is constructed on the @@ -45,8 +46,9 @@ interval `[x_min, x_max]` with the nodes `nodes`, where `x_min` is taken as the `nodes` and `x_max` the maximal value. Note that the `nodes` will be sorted internally. The `accuracy_order` is the order of the accuracy of the operator, which can optionally be passed, but does not have any effect on the operator. The operator is constructed solving an optimization -problem with Optim.jl. You can specify the options for the optimization problem with the `options` -argument, see also the [documentation of Optim.jl](https://julianlsolvers.github.io/Optim.jl/stable/user/config/). +problem with Optim.jl. You can specify the optimization algorithm and options for the optimization problem +with the keyword arguments `opt_alg` and `options` respectively, see also the +[documentation of Optim.jl](https://julianlsolvers.github.io/Optim.jl/stable/user/config/) The operator that is returned follows the general interface. Currently, it is wrapped in a [`MatrixDerivativeOperator`](@ref), but this might change in the future.