Skip to content

Commit

Permalink
added geometric tensor and general p-spin Ham
Browse files Browse the repository at this point in the history
  • Loading branch information
RaimelMedina committed May 17, 2023
1 parent 1632ec1 commit e43c061
Show file tree
Hide file tree
Showing 7 changed files with 254 additions and 160 deletions.
2 changes: 1 addition & 1 deletion Manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

julia_version = "1.8.5"
manifest_format = "2.0"
project_hash = "d124a3d707e483ceeafc10bdf792cab086baf113"
project_hash = "c603a3dfe990a8fcf543b485a9847c4780380b99"

[[deps.ANSIColoredPrinters]]
git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c"
Expand Down
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
SimpleWeightedGraphs = "47aef6b3-ad0c-573a-a1e2-d07658019622"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
ThreadsX = "ac1d9e8a-700a-412c-b207-f0111f4b6c0d"
Yao = "5872b779-8223-5990-8dd0-5abbb0748c8c"
Expand Down
18 changes: 10 additions & 8 deletions src/QAOALandscapes.jl
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
module QAOALandscapes

using SparseArrays
const OperatorType{T} = Union{SparseMatrixCSC{T,Int64}, Vector{T}}

# Functions related to an arbitrary QAOA
export QAOA, HxDiag, HxDiagSymmetric, HzzDiag, HzzDiagSymmetric, gradCostFunction, hessianCostFunction, getQAOAState, elementHessianCostFunction, optimizeParameters
export QAOA, HxDiag, HxDiagSymmetric, HzzDiag, HzzDiagSymmetric, generalClassicalHamiltonian, getQAOAState, gradCostFunction, hessianCostFunction, geometricTensor
export elementHessianCostFunction, optimizeParameters

export toFundamentalRegion!
export getStateJacobian, quantumFisherInfoMatrix

export getInitialParameter
# Functions related to different initialization strategies
# Interp
export interpInitialization, rollDownInterp, interpOptimize
# Fourier
export toFourierParams, fromFourierParams, fourierInitialization, fourierJacobian, rollDownFourier, fourierOptimize
# Transition states
export transitionState, permuteHessian, getNegativeHessianEigval, getNegativeHessianEigvec, rollDownTS, greedyOptimize, greedySelect
export transitionState, permuteHessian, getNegativeHessianEigval, getNegativeHessianEigvec, rollDownfromTS, rollDownTS, greedyOptimize, greedySelect

# Some useful Functions
export spinChain
Expand All @@ -20,14 +25,10 @@ export gradStdTest, selectSmoothParameter, whichTSType, _onehot
# Benchmark with respect to Harvard hard harvard instance
export harvardGraph

# Optimization settings
export OptimizationSettings

using DiffResults

using Graphs
using FiniteDiff
using ForwardDiff
using Flux
using SimpleWeightedGraphs
using Optim
using LineSearches
Expand All @@ -38,6 +39,7 @@ using Statistics
using LoopVectorization

include("qaoa.jl")
include("hamiltonians.jl")
include("hessian_tools.jl")
include("transition_states.jl")
include("greedy_ts.jl")
Expand Down
74 changes: 72 additions & 2 deletions src/gradient_adjoint.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,16 @@
@doc raw"""
gradCostFunction(qaoa::QAOA, params::Vector{T}) where T<: Real
Algorithm to compute the gradient of the QAOA cost function using adjoint (a reverse-mode) differentiation.
Compute the gradient of the QAOA cost function using adjoint (a reverse-mode) differentiation. We implement the algorithm
proposed in [*this reference*](https://arxiv.org/abs/2009.02823). https://arxiv.org/pdf/2011.02991.pdf
"""
function gradCostFunction(qaoa::QAOA, params::Vector{T}) where T<: Real
λ = getQAOAState(qaoa, params)
ϕ = copy(λ)
λ .= qaoa.HC .* λ
if typeof(qaoa.hamiltonian) <: Vector
λ .= qaoa.hamiltonian .* λ
else
λ .= qaoa.hamiltonian * λ
end
μ = similar(λ)
#costFun = dot(ϕ, λ) |> real
gradResult = zeros(T, length(params))
Expand Down Expand Up @@ -33,6 +38,18 @@ function applyQAOALayerAdjoint!(qaoa::QAOA, params::Vector{T}, pos::Int, state::
end
end

function applyQAOALayer!(qaoa::QAOA, params::Vector{T}, pos::Int, state::Vector{Complex{T}}) where T<: Real
if isodd(pos)
# γ-type parameter
state .= exp.(-im * params[pos] * qaoa.HC) .* state
else
# β-type parameter
QAOALandscapes.fwht!(state, qaoa.N)
state .= exp.(-im * params[pos] * qaoa.HB) .* state
QAOALandscapes.ifwht!(state, qaoa.N)
end
end

function applyQAOALayerDerivative!(qaoa::QAOA, params::Vector{T}, pos::Int, state::Vector{Complex{T}}) where T<: Real
if isodd(pos)
# γ-type parameter
Expand All @@ -46,3 +63,56 @@ function applyQAOALayerDerivative!(qaoa::QAOA, params::Vector{T}, pos::Int, stat
QAOALandscapes.ifwht!(state, qaoa.N)
end
end

@doc raw"""
geometricTensor(qaoa::QAOA, params::Vector{T}, ψ0::AbstractVector{Complex{T}}) where T<: Real
Compute the geometricTensor of the QAOA cost function using adjoint (a reverse-mode) differentiation. We implement the algorithm
proposed in [*this reference*](https://arxiv.org/pdf/2011.02991.pdf)
"""
function geometricTensor(qaoa::QAOA, params::Vector{T}, ψ0::AbstractVector{Complex{T}}) where T<: Real
T_vec = zeros(Complex{T}, length(params))
L_mat = zeros(Complex{T}, length(params), length(params))
G_mat = zeros(Complex{T}, length(params), length(params))

χ = copy(ψ0)
applyQAOALayer!(qaoa, params, 1, χ)

ψ = copy(χ)
λ = similar(ψ)
μ = similar(ψ)

ϕ = copy(ψ0)
applyQAOALayerDerivative!(qaoa, params, 1, ϕ)

T_vec[1] = dot(χ, ϕ)
L_mat[1, 1] = dot(ϕ, ϕ)

for j 2:length(params)
λ .= copy(ψ)
ϕ .= copy(ψ)
applyQAOALayerDerivative!(qaoa, params, j, ϕ)

L_mat[j, j] = dot(ϕ, ϕ)
for i j-1:-1:1
applyQAOALayerAdjoint!(qaoa, params, i+1, ϕ)
applyQAOALayerAdjoint!(qaoa, params, i, λ)
μ .= copy(λ)
applyQAOALayerDerivative!(qaoa, params, i, μ)
L_mat[i,j] = dot(μ, ϕ)
end
T_vec[j] = dot(χ, ϕ)
applyQAOALayer!(qaoa, params, j, ψ)
end

for j eachindex(params)
for i eachindex(params)
if i j
G_mat[i,j] = L_mat[i,j] - T_vec[i]' * T_vec[j]
else
G_mat[i,j] = L_mat[j,i]' - T_vec[i]' * T_vec[j]
end
end
end

return G_mat
end
8 changes: 4 additions & 4 deletions src/greedy_ts.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
function greedySelect(qaoa::QAOA, Γmin::Vector{Float64}; ϵ=0.001, method=Optim.BFGS(linesearch = Optim.BackTracking(order=3)), threaded=false, chooseSmooth=false)
paramResult, energyResult = rollDownTS(qaoa, Γmin; ϵ=ϵ, method=method, threaded=threaded, chooseSmooth=chooseSmooth)
function greedySelect(qaoa::QAOA, Γmin::Vector{Float64}; ϵ=0.001, method=Optim.BFGS(linesearch = Optim.BackTracking(order=3)), threaded=false)
paramResult, energyResult = rollDownTS(qaoa, Γmin; ϵ=ϵ, method=method, threaded=threaded)
# get key of minimum energy #
valMinimum, keyMinimum = findmin(energyResult);
# determine which point to take #
Expand All @@ -18,7 +18,7 @@ function greedyOptimize(qaoa::QAOA, Γ0::Vector{Float64}, pmax::Int, igamma::Int
println(" p=$(p) | $(round(listMinima[p][1], digits = 7)) | $(norm(gradCostFunction(qaoa, listMinima[p][2])))")

for t p+1:pmax
dataGreedy = rollDownTS(qaoa, listMinima[t-1][end], igamma; ϵ=ϵ, method=method, tsType=tsType)
dataGreedy = rollDownfromTS(qaoa, listMinima[t-1][end], igamma; ϵ=ϵ, method=method, tsType=tsType)
if chooseSmooth
idxSmooth, paramSmooth = selectSmoothParameter(dataGreedy[1], dataGreedy[2])
Eopt = dataGreedy[3][idxSmooth]
Expand All @@ -44,7 +44,7 @@ function greedyOptimize(qaoa::QAOA, Γ0::Vector{Float64}, pmax::Int; ϵ=0.001, m
println(" p=$(p) | $(round(listMinima[p][1], digits = 7)) | $(norm(gradCostFunction(qaoa, listMinima[p][2])))")

for t p+1:pmax
Eopt, Γopt = greedySelect(qaoa, listMinima[t-1][end]; ϵ=ϵ, method=method, threaded=threaded, chooseSmooth=chooseSmooth)
Eopt, Γopt = greedySelect(qaoa, listMinima[t-1][end]; ϵ=ϵ, method=method, threaded=threaded)
listMinima[t] = (Eopt, Γopt)
println(" p=$(t) | $(round(Eopt, digits = 7)) | $(norm(gradCostFunction(qaoa, Γopt)))")
end
Expand Down
144 changes: 144 additions & 0 deletions src/hamiltonians.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
# First, the Hamiltonians correponding to the original QAOA proposal
# We restrict ourselves to the +1 parity sector of the Hilbert space
@doc raw"""
HxDiagSymmetric(g::T) where T<: AbstractGraph
Construct the mixing Hamiltonian in the positive (+1) parity sector of the Hilbert space. This means that if the system
size is N, then `HxDiagSymmetric` would be a vector of size ``2^{N-1}``. This construction, only makes sense if the cost/problem
Hamiltonian ``H_C`` is invariant under the action of the parity operator, that is
```math
[H_C, \prod_{i=1}^N \sigma^x_i] = 0
```
"""
function HxDiagSymmetric(g::T) where T<: AbstractGraph
N = nv(g)
Hx_vec = zeros(ComplexF64, 2^(N-1))
count = 0
for j 0:2^(N-1)-1
if parity_of_integer(j)==0
count += 1
for i 0:N-1
Hx_vec[count] += ComplexF64(-2 * (((j >> (i-1)) & 1) ) + 1)
end
Hx_vec[2^(N-1) - count + 1] = - Hx_vec[count]
end
end
return Hx_vec
end

@doc raw"""
HzzDiagSymmetric(g::T) where T <: AbstractGraph
HzzDiagSymmetric(edge::T) where T <: AbstractEdge
Construct the cost Hamiltonian in the positive (+1) parity sector of the Hilbert space. This means that if the system
size is N, then `HzzDiagSymmetric` would be a vector of size ``2^{N-1}``. This construction, only makes sense if the cost/problem
Hamiltonian ``H_C`` is invariant under the action of the parity operator, that is
```math
[H_C, \prod_{i=1}^N \sigma^x_i] = 0
```
Similarly, if the input is an `edge` then it returns the corresponding ``ZZ`` operator.
"""
function HzzDiagSymmetric(g::T) where T <: AbstractGraph
N = nv(g)
matZZ = zeros(ComplexF64, 2^(N-1));
for edge edges(g)
for j 0:2^(N-1)-1
matZZ[j+1] += ComplexF64(-2 * (((j >> (edge.src -1)) & 1) ((j >> (edge.dst -1)) & 1)) + 1) * getWeight(edge)
end
end
return matZZ
end

function HzzDiagSymmetric(edge::T) where T <: AbstractEdge
N = nv(g)
matZZ = zeros(ComplexF64, 2^(N-1));
for j 0:2^(N-1)-1
matZZ[j+1] += ComplexF64(-2 * (((j >> (edge.src -1)) & 1) ((j >> (edge.dst -1)) & 1)) + 1) * getWeight(edge)
end
return matZZ
end
#####################################################################

function getElementMaxCutHam(x::Int, graph::T) where T <: AbstractGraph
val = 0.
for i edges(graph)
i_elem = ((x>>(i.src-1))&1)
j_elem = ((x>>(i.dst-1))&1)
idx = i_elem j_elem
val += ((-1)^idx)*getWeight(i)
end
return val
end

@doc raw"""
HzzDiag(g::T) where T <: AbstractGraph
Construct the cost Hamiltonian. If the cost Hamiltonian is invariant under the parity operator
``\prod_{i=1}^N \sigma^x_i`` it is better to work in the +1 parity sector of the Hilbert space since
this is more efficient. In practice, if the system size is ``N``, the corresponding Hamiltonian would be a vector of size ``2^{N-1}``.
This function instead returs a vector of size ``2^N``.
"""
function HzzDiag(g::T) where T <: AbstractGraph
result = ThreadsX.map(x->getElementMaxCutHam(x, g), 0:2^nv(g)-1)
return result/2
end

function getElementMixingHam(x::Int, graph::T) where T <: AbstractGraph
val = 0.
N = nv(graph)
for i=1:N
i_elem = ((x>>(i-1))&1)
val += (-1)^i_elem
end
return val
end

@doc raw"""
HxDiag(g::T) where T<: AbstractGraph
Construct the mixing Hamiltonian. If the cost Hamiltonian is invariant under the parity operator
``\prod_{i=1}^N \sigma^x_i`` it is better to work in the +1 parity sector of the Hilbert space since
this is more efficient. In practice, if the system size is ``N``, the corresponding Hamiltonianwould be a vector of size ``2^{N-1}``.
"""
function HxDiag(g::T) where T <: AbstractGraph
result = ThreadsX.map(x->getElementMixingHam(x, g), 0:2^nv(g)-1)
return result
end

# Let us also define a function that given a dictionary of "interaction" terms (keys)
# and "weights" (values) constructs the corresponding classical Hamiltonian.
function getElementGeneralClassicalHam(x::Int, interaction::Vector{Int64}, weight::Float64)
elements = map(i->((x>>(i-1))&1), interaction)
idx = foldl(, elements)
val = ((-1)^idx)*weight
return val
end

function getElementGeneralClassicalHam(x::Int, interaction_dict::Dict{Vector{Int64}, Float64})
val = sum(k->getElementGeneralClassicalHam(x, k, interaction_dict[k]), keys(interaction_dict))
return val
end

@doc raw"""
generalClassicalHamiltonian(interaction_dict::Dict{Vector{Int64}, Float64})
This function computes the classical Hamiltonian for a general ``p``-spin Hamiltonian, that is
```math
H_Z = \sum_{i \in S} J_i \prod_{\alpha \in i} \sigma^z_{i_{\alpha}}
```
Above, ``S`` is the set of interaction terms which is passed as an argument in the for of a dictionary with keys
being the spins participating in a given interaction and values given by the weights of such interaction term.
# Arguments
- `interaction_dict`: a dictionary where each key is a vector of integers representing an interaction, and each value is the weight of that interaction.
# Returns
- `hamiltonian::Vector{Float64}`: The general ``p`` spin Hamiltonian.
"""
function generalClassicalHamiltonian(interaction_dict::Dict{Vector{Int64}, Float64})
n = reduce(vcat, collect(keys(interaction_dict))) |> Set |> length
return ThreadsX.map(x->getElementGeneralClassicalHam(x, interaction_dict), 0:2^n-1)
end

Loading

0 comments on commit e43c061

Please sign in to comment.