diff --git a/src/AbstractGPs.jl b/src/AbstractGPs.jl index 784d8a3f..6177a51b 100644 --- a/src/AbstractGPs.jl +++ b/src/AbstractGPs.jl @@ -56,6 +56,9 @@ include("sparse_approximations.jl") # LatentGP and LatentFiniteGP objects to accommodate GPs with non-Gaussian likelihoods. include("latent_gp.jl") +# GPs whose output is a vector. +include("vector_valued_gp.jl") + # Plotting utilities. include(joinpath("util", "plotting.jl")) diff --git a/src/vector_valued_gp.jl b/src/vector_valued_gp.jl new file mode 100644 index 00000000..010c40d0 --- /dev/null +++ b/src/vector_valued_gp.jl @@ -0,0 +1,92 @@ +# Represents a GP whose output is vector-valued. +struct VectorValuedGP{Tf<:AbstractGP} + f::Tf + num_outputs::Int +end + +# I gave up figuring out how to properly subtype MatrixDistribution, but I want this to +# subtype a distribution type which indicates that samples from this distribution produces +# matrix of size num_features x num_outputs, or something like that. +struct FiniteVectorValuedGP{Tv<:VectorValuedGP,Tx<:AbstractVector,TΣy<:Real} + v::Tv + x::Tx + Σy::TΣy +end + +(f::VectorValuedGP)(x...) = FiniteVectorValuedGP(f, x...) + +function Statistics.mean(vx::FiniteVectorValuedGP) + + # Construct equivalent FiniteGP. + x_f = KernelFunctions.MOInputIsotopicByOutputs(vx.x, vx.v.num_outputs) + f = vx.v.f + fx = f(x_f, vx.Σy) + + # Compute quantity under equivalent FiniteGP. + m = mean(fx) + + # Construct the matrix-version of the quantity. + M = reshape(m, length(vx.x), vx.v.num_outputs) + return M +end + +function Statistics.var(vx::FiniteVectorValuedGP) + + # Construct equivalent FiniteGP. + x_f = KernelFunctions.MOInputIsotopicByOutputs(vx.x, vx.v.num_outputs) + f = vx.v.f + fx = f(x_f, vx.Σy) + + # Compute quantity under equivalent FiniteGP. + v = var(fx) + + # Construct the matrix-version of the quantity. + V = reshape(v, length(vx.x), vx.v.num_outputs) + return V +end + +function Random.rand(rng::AbstractRNG, vx::FiniteVectorValuedGP) + + # Construct equivalent FiniteGP. + x_f = KernelFunctions.MOInputIsotopicByOutputs(vx.x, vx.v.num_outputs) + f = vx.v.f + fx = f(x_f, vx.Σy) + + # Compute quantity under equivalent FiniteGP. + y = rand(rng, fx) + + # Construct the matrix-version of the quantity. + Y = reshape(y, length(vx.x), vx.v.num_outputs) + return Y +end + +function Distributions.logpdf(vx::FiniteVectorValuedGP, Y::AbstractMatrix{<:Real}) + + # Construct equivalent FiniteGP. + x_f = KernelFunctions.MOInputIsotopicByOutputs(vx.x, vx.v.num_outputs) + f = vx.v.f + fx = f(x_f, vx.Σy) + + # Construct flattened-version of observations. + y = vec(Y) + + # Compute logpdf using FiniteGP. + return logpdf(fx, y) +end + +function posterior(vx::FiniteVectorValuedGP, Y::AbstractMatrix{<:Real}) + + # Construct equivalent FiniteGP. + x_f = KernelFunctions.MOInputIsotopicByOutputs(vx.x, vx.v.num_outputs) + f = vx.v.f + fx = f(x_f, vx.Σy) + + # Construct flattened-version of observations. + y = vec(Y) + + # Construct posterior AbstractGP + f_post = posterior(fx, y) + + # Construct a new vector-valued GP. + return VectorValuedGP(f_post, vx.v.num_outputs) +end diff --git a/test/runtests.jl b/test/runtests.jl index fc365ddc..6e22e7ee 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -77,6 +77,10 @@ include("test_util.jl") println(" ") @info "Ran latent_gp tests" + include("vector_valued_gp.jl") + println(" ") + @info "Ran vector_valued_gp tests" + include("deprecations.jl") println(" ") @info "Ran deprecation tests" diff --git a/test/vector_valued_gp.jl b/test/vector_valued_gp.jl new file mode 100644 index 00000000..97500941 --- /dev/null +++ b/test/vector_valued_gp.jl @@ -0,0 +1,16 @@ +@testset "vector_valued_gp" begin + f = GP(LinearMixingModelKernel([Matern52Kernel(), Matern12Kernel()], randn(2, 2))) + x = range(0.0, 10.0; length=3) + Σy = 0.1 + + v = AbstractGPs.VectorValuedGP(f, 2) + vx = v(x, Σy) + + M = mean(vx) + + rng = MersenneTwister(123456) + Y = rand(rng, vx) + logpdf(vx, Y) + + v_post = posterior(vx, Y) +end