Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 3 additions & 21 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ jobs:
run: |
julia --color=yes --project=./test -e '
using Pkg
Pkg.Registry.update()
Pkg.develop(path=".")
Pkg.instantiate()
include("test/runtests.jl")'
Expand Down Expand Up @@ -66,6 +67,7 @@ jobs:
run: |
julia --color=yes --project=./test -e '
using Pkg
Pkg.Registry.update()
Pkg.develop(path=".")
Pkg.instantiate()
include("test/runtests.jl")'
Expand All @@ -92,27 +94,7 @@ jobs:
run: |
julia --color=yes --project=./test -e '
using Pkg
Pkg.Registry.update()
Pkg.develop(path=".")
Pkg.instantiate()
include("test/runtests.jl")'
# test-self-hosted:
# env:
# EXAMODELS_TEST_CUDA: 1
# EXAMODELS_TEST_AMDGPU: 1
# EXAMODELS_TEST_ONEAPI: 1
# runs-on: self-hosted
# strategy:
# matrix:
# julia-version: ['1']
# steps:
# - uses: actions/checkout@v2
# - uses: julia-actions/setup-julia@latest
# with:
# version: ${{ matrix.julia-version }}
# - uses: julia-actions/julia-buildpkg@latest
# - uses: julia-actions/julia-runtest@latest
# - uses: julia-actions/julia-processcoverage@v1
# - uses: codecov/codecov-action@v1
# with:
# file: lcov.info
# token: ${{ secrets.CODECOV_TOKEN }}
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ ExaModelsOneAPI = "oneAPI"
ExaModelsSpecialFunctions = "SpecialFunctions"

[compat]
AMDGPU = "1"
AMDGPU = "1, 2"
CUDA = "5"
Ipopt = "1.6"
JuMP = "1"
Expand Down
9 changes: 0 additions & 9 deletions ext/ExaModelsAMDGPU.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,4 @@ import ExaModels, AMDGPU

ExaModels.convert_array(v, backend::AMDGPU.ROCBackend) = AMDGPU.ROCArray(v)

# Below are type piracy
function Base.findall(f::F, bitarray::A) where {F<:Function,A<:AMDGPU.ROCVector}
a = Array(bitarray)
b = findall(f, a)
c = similar(bitarray, eltype(b), length(b))

return copyto!(c, b)
end
Base.findall(bitarray::A) where {A<:AMDGPU.ROCVector} = Base.findall(identity, bitarray)
end
12 changes: 6 additions & 6 deletions ext/ExaModelsKernelAbstractions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ end
@kernel function kerh(y1, y2, @Const(f), @Const(itr), @Const(x), @Const(θ), @Const(adj1), @Const(adj2))
I = @index(Global)
@inbounds ExaModels.hrpass0(
f.f(itr[I], ExaModels.SecondAdjointNodeSource(x), θ),
f(itr[I], ExaModels.SecondAdjointNodeSource(x), θ),
f.comp2,
y1,
y2,
Expand All @@ -602,7 +602,7 @@ end
)
I = @index(Global)
@inbounds ExaModels.hrpass0(
f.f(itr[I], ExaModels.SecondAdjointNodeSource(x), θ),
f(itr[I], ExaModels.SecondAdjointNodeSource(x), θ),
f.comp2,
y1,
y2,
Expand All @@ -616,7 +616,7 @@ end
@kernel function kerj(y1, y2, @Const(f), @Const(itr), @Const(x), @Const(θ), @Const(adj))
I = @index(Global)
@inbounds ExaModels.jrpass(
f.f(itr[I], ExaModels.AdjointNodeSource(x), θ),
f(itr[I], ExaModels.AdjointNodeSource(x), θ),
f.comp1,
ExaModels.offset0(f, itr, I),
y1,
Expand All @@ -630,7 +630,7 @@ end
@kernel function kerg(y, @Const(f), @Const(itr), @Const(x), @Const(θ), @Const(adj))
I = @index(Global)
@inbounds ExaModels.grpass(
f.f(itr[I], ExaModels.AdjointNodeSource(x), θ),
f(itr[I], ExaModels.AdjointNodeSource(x), θ),
f.comp1,
y,
ExaModels.offset1(f, I),
Expand All @@ -641,11 +641,11 @@ end

@kernel function kerf(y, @Const(f), @Const(itr), @Const(x), @Const(θ))
I = @index(Global)
@inbounds y[ExaModels.offset0(f, itr, I)] = f.f(itr[I], x, θ)
@inbounds y[ExaModels.offset0(f, itr, I)] = f(itr[I], x, θ)
end
@kernel function kerf2(y, @Const(f), @Const(itr), @Const(x), @Const(θ), @Const(oa))
I = @index(Global)
@inbounds y[oa+I] = f.f(itr[I], x, θ)
@inbounds y[oa+I] = f(itr[I], x, θ)
end


Expand Down
44 changes: 0 additions & 44 deletions ext/ExaModelsOneAPI.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,50 +2,6 @@ module ExaModelsOneAPI

import ExaModels, oneAPI

function ExaModels.append!(
backend,
a::A,
b::Base.Generator{UnitRange{I}},
lb,
) where {I,A<:oneAPI.oneArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
map!(b.f, view(aa, (la+1):(la+lb)), b.iter)
return aa
end

function ExaModels.append!(backend, a::A, b::Base.Generator, lb) where {A<:oneAPI.oneArray}
b = ExaModels._adapt_gen(b)
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
map!(b.f, view(aa, (la+1):(la+lb)), ExaModels.convert_array(b.iter, backend))
return aa
end

function ExaModels.append!(
backend,
a::A,
b::V,
lb,
) where {A<:oneAPI.oneArray,V<:AbstractArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
copyto!(view(aa, (la+1):(la+lb)), b)
return aa
end


function ExaModels.append!(backend, a::A, b::Number, lb) where {A<:oneAPI.oneArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
fill!(view(aa, (la+1):(la+lb)), b)
return aa
end

ExaModels.convert_array(v, backend::oneAPI.oneAPIBackend) = oneAPI.oneArray(v)

ExaModels.sort!(array::A; lt = isless) where {A<:oneAPI.oneArray} =
Expand Down
6 changes: 3 additions & 3 deletions src/gradient.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Performs dense gradient evalution
"""
function gradient!(y, f, x, θ, adj)
@simd for k in eachindex(f.itr)
@inbounds gradient!(y, f.f.f, x, θ, f.itr[k], adj)
@inbounds gradient!(y, f.f, x, θ, f.itr[k], adj)
end
return y
end
Expand Down Expand Up @@ -68,7 +68,7 @@ Performs dsparse gradient evaluation via the reverse pass on the computation (su
o1,
cnt,
adj,
) where {D<:Union{AdjointNull,ParIndexed}}
) where {D<:Union{AdjointNull,ParIndexed,Real}}
return cnt
end
@inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<:AdjointNode1}
Expand Down Expand Up @@ -113,7 +113,7 @@ Performs sparse gradient evalution
"""
function sgradient!(y, f, x, θ, adj)
@simd for k in eachindex(f.itr)
@inbounds sgradient!(y, f.f.f, f.itr[k], x, θ, f.itr.comp1, offset1(f, k), adj)
@inbounds sgradient!(y, f.f, f.itr[k], x, θ, f.itr.comp1, offset1(f, k), adj)
end
return y
end
Expand Down
14 changes: 5 additions & 9 deletions src/graph.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,17 +113,13 @@ struct SecondFixed{F}
inner::F
end

@inline Base.getproperty(n::ParSource, s::Symbol) = ParIndexed(n, s)
@inline Base.getindex(n::ParSource, i) = ParIndexed(n, i)
@inline Base.indexed_iterate(n::ParSource, idx, start = 1) = (ParIndexed(n, idx), idx + 1)


@inline Base.getindex(n::VarSource, i) = Var(i)
@inline Base.getindex(::ParameterSource, i) = ParameterNode(i)
Par(iter::Type) = ParSource()
Par(iter, idx...) = ParIndexed(Par(iter, idx[2:end]...), idx[1])
Par(iter::Type{T}, idx...) where {T<:Tuple} =
Tuple(Par(p, i, idx...) for (i, p) in enumerate(T.parameters))

Par(iter::Type{T}, idx...) where {T<:NamedTuple} = NamedTuple{T.parameters[1]}(
Par(p, i, idx...) for (i, p) in enumerate(T.parameters[2].parameters)
)

@inline Node1(f::F, inner::I) where {F,I} = Node1{F,I}(inner)
@inline Node2(f::F, inner1::I1, inner2::I2) where {F,I1,I2} = Node2{F,I1,I2}(inner1, inner2)
Expand All @@ -144,7 +140,7 @@ struct Identity end
@inline (v::ParameterNode{I})(::Identity, x, ::Nothing) where {I<:AbstractNode} = NaN

@inline (v::ParSource)(i, x, θ) = i
@inline (v::ParIndexed{I,n})(i, x, θ) where {I,n} = @inbounds v.inner(i, x, θ)[n]
@inline (v::ParIndexed{I,n})(i, x, θ) where {I,n} = @inbounds getfield(v.inner(i, x, θ), n)

(v::ParIndexed)(i::Identity, x, θ) = NaN # despecialized
(v::ParSource)(i::Identity, x, θ) = NaN # despecialized
Expand Down
6 changes: 3 additions & 3 deletions src/hessian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ Performs sparse hessian evaluation (`d²f/dx²` portion) via the reverse pass on
- `adj`: second adjoint propagated up to the current node
"""

@inline function hrpass(t::SecondAdjointNull, comp, y1, y2, o2, cnt, adj, adj2)
@inline function hrpass(t::D, comp, y1, y2, o2, cnt, adj, adj2) where {D<:Union{SecondAdjointNull,Real}}
cnt
end
@inline function hrpass(
Expand Down Expand Up @@ -623,7 +623,7 @@ function shessian!(y1, y2, f, x, θ, adj1, adj2)
@inbounds shessian!(
y1,
y2,
f.f.f,
f.f,
f.itr[k],
x,
θ,
Expand All @@ -639,7 +639,7 @@ function shessian!(y1, y2, f, x, θ, adj1s::V, adj2) where {V<:AbstractVector}
@inbounds shessian!(
y1,
y2,
f.f.f,
f.f,
f.itr[k],
x,
θ,
Expand Down
4 changes: 2 additions & 2 deletions src/jacobian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Performs sparse jacobian evaluation via the reverse pass on the computation (sub
- `cnt`: counter
- `adj`: adjoint propagated up to the current node
"""
@inline function jrpass(d::AdjointNull, comp, i, y1, y2, o1, cnt, adj)
@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:Union{AdjointNull,Real}}
return cnt
end
@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:AdjointNode1}
Expand Down Expand Up @@ -105,7 +105,7 @@ function sjacobian!(y1, y2, f, x, θ, adj)
@inbounds sjacobian!(
y1,
y2,
f.f.f,
f.f,
f.itr[i],
x,
θ,
Expand Down
4 changes: 2 additions & 2 deletions src/nlp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ end

_obj(objs, x, θ) =
_obj(objs.inner, x, θ) +
(isempty(objs.itr) ? zero(eltype(x)) : sum(objs.f.f(k, x, θ) for k in objs.itr))
(isempty(objs.itr) ? zero(eltype(x)) : sum(objs.f(k, x, θ) for k in objs.itr))
_obj(objs::ObjectiveNull, x, θ) = zero(eltype(x))

function cons_nln!(m::ExaModel, x::AbstractVector, g::AbstractVector)
Expand All @@ -715,7 +715,7 @@ end
function _cons_nln!(cons, x, θ, g)
_cons_nln!(cons.inner, x, θ, g)
@simd for i in eachindex(cons.itr)
g[offset0(cons, i)] += cons.f.f(cons.itr[i], x, θ)
g[offset0(cons, i)] += cons.f(cons.itr[i], x, θ)
end
end
_cons_nln!(cons::ConstraintNull, x, θ, g) = nothing
Expand Down
11 changes: 9 additions & 2 deletions src/simdfunction.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ struct SIMDFunction{F,C1,C2}
o2step::Int
end

(sf::SIMDFunction{F,C1,C2})(i, x, θ) where {F,C1,C2} = sf.f(i, x, θ)
(sf::SIMDFunction{F,C1,C2})(i, x, θ) where {F <: Real,C1,C2} = sf.f

"""
SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0)

Expand All @@ -37,16 +40,20 @@ Returns a `SIMDFunction` using the `gen`.
"""
function SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0)

f = gen.f(Par(eltype(gen.iter)))
f = gen.f(ParSource())

_simdfunction(f, o0, o1, o2)
end

function _simdfunction(f::F, o0, o1, o2) where F <: Real
SIMDFunction(f, ExaModels.Compressor{Tuple{}}(()), ExaModels.Compressor{Tuple{}}(()), o0, o1, o2, 0, 0)
end

function _simdfunction(f, o0, o1, o2)
d = f(Identity(), AdjointNodeSource(nothing), nothing)
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN)

t = f(Identity(), SecondAdjointNodeSource(nothing), nothing)
y2 = []
ExaModels.hrpass0(t, nothing, y2, nothing, nothing, 0, NaN, NaN)
Expand Down
3 changes: 0 additions & 3 deletions src/templates.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
# A template for convert_array. This is extended in extension packages for each device architecture.
convert_array(v, ::Nothing) = v

# template to avoid oneAPI sum issue
sum(a) = Base.sum(a)

# to avoid type privacy
sort!(array; kwargs...) = Base.sort!(array; kwargs...)

Expand Down
5 changes: 4 additions & 1 deletion test/NLPTest/NLPTest.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ const NLP_TEST_ARGUMENTS = [
("luksan_vlcek", 20),
("ac_power", "pglib_opf_case3_lmbd.m"),
("ac_power", "pglib_opf_case14_ieee.m"),
("struct_ac_power", "pglib_opf_case3_lmbd.m"),
("struct_ac_power", "pglib_opf_case14_ieee.m"),
]

const SOLVERS = [
Expand All @@ -20,7 +22,7 @@ const SOLVERS = [
("percival", nlp -> percival(nlp)),
]

const EXCLUDE1 = [("ac_power", "percival")]
const EXCLUDE1 = [("ac_power", "percival"), ("struct_ac_power", "percival")]
const EXCLUDE2 = []

for backend in BACKENDS
Expand All @@ -32,6 +34,7 @@ end
include("luksan.jl")
include("power.jl")
include("parameter_test.jl")
include("power_struct.jl")

function test_nlp(m1, m2; full = false)
@testset "NLP meta tests" begin
Expand Down
Loading
Loading