From 78dac30b6e9a7681e084c9ba50801056afb9d400 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Sat, 29 Apr 2023 01:39:50 +0200 Subject: [PATCH 01/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 58cb3c519..eb2c1b463 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] check that there is an error if he cant find SS - [ ] check if you can do analytic derivatives for higher order derivatives - [ ] kick out unsused parameters from m.parameters - [ ] higher order solution derivs with Zygote From 7e42f99331ed76598117915d66f0a648d719fa7b Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sun, 30 Apr 2023 12:26:49 +0200 Subject: [PATCH 02/83] more export funcs, more explicit solve algo --- src/MacroModelling.jl | 6 +++--- src/dynare.jl | 10 ++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index debbefde9..51a6bc541 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -68,7 +68,7 @@ export plotlyjs_backend, gr_backend export Beta, InverseGamma, Gamma, Normal export translate_mod_file, translate_dynare_file, import_model, import_dynare -export write_mod_file, write_dynare_file, write_to_dynare_file, export_dynare, export_to_dynare, export_mod_file +export write_mod_file, write_dynare_file, write_to_dynare_file, write_to_dynare, export_dynare, export_to_dynare, export_mod_file, export_model # Internal export irf, girf @@ -1462,7 +1462,7 @@ function solve!(𝓂::β„³; end if dynamics - if any([:riccati, :first_order, :second_order, :third_order] .∈ ([algorithm],)) && any([:riccati, :first_order, :second_order, :third_order] .∈ (𝓂.solution.outdated_algorithms,)) + if (any([:riccati, :first_order] .∈ ([algorithm],)) && any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (:third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) @@ -1479,7 +1479,7 @@ function solve!(𝓂::β„³; end - if any([:second_order, :third_order] .∈ ([algorithm],)) && any([:second_order, :third_order] .∈ (𝓂.solution.outdated_algorithms,)) + if (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (:third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms) stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." diff --git a/src/dynare.jl b/src/dynare.jl index 94b1b5acb..400ef789d 100644 --- a/src/dynare.jl +++ b/src/dynare.jl @@ -193,6 +193,16 @@ See [`write_mod_file`](@ref) """ write_to_dynare_file = write_mod_file +""" +See [`write_mod_file`](@ref) +""" +write_to_dynare = write_mod_file + +""" +See [`write_mod_file`](@ref) +""" +export_model = write_mod_file + """ See [`write_mod_file`](@ref) """ From 7951f0edb9a9435dc584552cb71bc9fc3dbb09fa Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sun, 30 Apr 2023 15:03:49 +0200 Subject: [PATCH 03/83] export model new func names --- src/dynare.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dynare.jl b/src/dynare.jl index 400ef789d..9ace2345e 100644 --- a/src/dynare.jl +++ b/src/dynare.jl @@ -206,4 +206,4 @@ export_model = write_mod_file """ See [`write_mod_file`](@ref) """ -write_to_dynare = write_mod_file \ No newline at end of file +export_dynare = write_mod_file \ No newline at end of file From 476b48cec1986844a61be0588903905ceed5719a Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:15:56 +0200 Subject: [PATCH 04/83] double export dynare entry --- src/dynare.jl | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/dynare.jl b/src/dynare.jl index 9ace2345e..748e2ad23 100644 --- a/src/dynare.jl +++ b/src/dynare.jl @@ -201,9 +201,4 @@ write_to_dynare = write_mod_file """ See [`write_mod_file`](@ref) """ -export_model = write_mod_file - -""" -See [`write_mod_file`](@ref) -""" -export_dynare = write_mod_file \ No newline at end of file +export_model = write_mod_file \ No newline at end of file From 2ef523764cb0c672cd060edc4b00cb7db74c5836 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:19:52 +0200 Subject: [PATCH 05/83] add initial state to cond fcast; fix plot --- src/get_functions.jl | 17 ++++--- src/plotting.jl | 103 ++++++++++++++++--------------------------- 2 files changed, 47 insertions(+), 73 deletions(-) diff --git a/src/get_functions.jl b/src/get_functions.jl index 4bc9647fc..7aecf7d56 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -342,7 +342,7 @@ Limited to the first order perturbation solution of the model. - `periods` [Default: `40`, Type: `Int`]: the total number of periods is the sum of the argument provided here and the maximum of periods of the shocks or conditions argument. - $PARAMETERS - $VARIABLES -- `conditions_in_levels` [Default: `false`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. +- `conditions_in_levels` [Default: `true`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. - $LEVELS - $VERBOSE @@ -383,7 +383,7 @@ conditions[2,2] = .02 shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) shocks[1,1] = .05 -get_conditional_forecast(RBC_CME, conditions, shocks = shocks) +get_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables_and_shocks ∈ 9-element Vector{Symbol} @@ -421,10 +421,11 @@ And data, 9Γ—42 Matrix{Float64}: function get_conditional_forecast(𝓂::β„³, conditions::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}}; shocks::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}, Nothing} = nothing, + initial_state::Vector{Float64} = [0.0], periods::Int = 40, parameters = nothing, variables::Symbol_input = :all_including_auxilliary, - conditions_in_levels::Bool = false, + conditions_in_levels::Bool = true, levels::Bool = false, verbose::Bool = false) @@ -486,6 +487,7 @@ function get_conditional_forecast(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) + initial_state = initial_state == [0.0] ? zeros(𝓂.timings.nVars) : initial_state - reference_steady_state[1:length(𝓂.var)] var_idx = parse_variables_input_to_index(variables, 𝓂.timings) @@ -515,9 +517,9 @@ function get_conditional_forecast(𝓂::β„³, shocks[free_shock_idx,1] .= 0 - shocks[free_shock_idx,1] = CC \ (conditions[cond_var_idx,1] - state_update(zeros(size(C,1)), Float64[shocks[:,1]...])[cond_var_idx]) + shocks[free_shock_idx,1] = CC \ (conditions[cond_var_idx,1] - state_update(initial_state, Float64[shocks[:,1]...])[cond_var_idx]) - Y[:,1] = state_update(zeros(size(C,1)), Float64[shocks[:,1]...]) + Y[:,1] = state_update(initial_state, Float64[shocks[:,1]...]) for i in 2:size(conditions,2) cond_var_idx = findall(conditions[:,i] .!= nothing) @@ -531,11 +533,12 @@ function get_conditional_forecast(𝓂::β„³, @assert length(free_shock_idx) >= length(cond_var_idx) "Exact matching only possible with more free shocks than conditioned variables. Period " * repr(i) * " has " * repr(length(free_shock_idx)) * " free shock(s) and " * repr(length(cond_var_idx)) * " conditioned variable(s)." - CC = C[cond_var_idx,free_shock_idx] + CC = C[cond_var_idx,free_shock_idx] if length(cond_var_idx) == 1 @assert any(CC .!= 0) "Free shocks have no impact on conditioned variable in period " * repr(i) * "." elseif length(free_shock_idx) == length(cond_var_idx) + CC = RF.lu(CC, check = false) @assert β„’.issuccess(CC) "Numerical stabiltiy issues for restrictions in period " * repr(i) * "." @@ -624,7 +627,7 @@ function get_irf(𝓂::β„³, shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] diff --git a/src/plotting.jl b/src/plotting.jl index 105226b9d..6ba958f4d 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -1047,7 +1047,7 @@ Limited to the first order perturbation solution of the model. - $SHOCK_CONDITIONS - `periods` [Default: `40`, Type: `Int`]: the total number of periods is the sum of the argument provided here and the maximum of periods of the shocks or conditions argument. - $VARIABLES -`conditions_in_levels` [Default: `false`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. +`conditions_in_levels` [Default: `true`, Type: `Bool`]: indicator whether the conditions are provided in levels. If `true` the input to the conditions argument will have the non stochastic steady state substracted. - $LEVELS - `show_plots` [Default: `true`, Type: `Bool`]: show plots. Separate plots per shocks and varibles depending on number of variables and `plots_per_page`. - `save_plots` [Default: `false`, Type: `Bool`]: switch to save plots using path and extension from `save_plots_path` and `save_plots_format`. Separate files per shocks and variables depending on number of variables and `plots_per_page` @@ -1091,7 +1091,7 @@ conditions[2,2] = .02 shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) shocks[1,1] = .05 -plot_conditional_forecast(RBC_CME, conditions, shocks = shocks) +plot_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) # The same can be achieved with the other input formats: # conditions = Matrix{Union{Nothing,Float64}}(undef,7,2) @@ -1114,10 +1114,11 @@ plot_conditional_forecast(RBC_CME, conditions, shocks = shocks) function plot_conditional_forecast(𝓂::β„³, conditions::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}}; shocks::Union{Matrix{Union{Nothing,Float64}}, SparseMatrixCSC{Float64}, KeyedArray{Union{Nothing,Float64}}, KeyedArray{Float64}, Nothing} = nothing, + initial_state::Vector{Float64} = [0.0], periods::Int = 40, parameters = nothing, variables::Symbol_input = :all_including_auxilliary, - conditions_in_levels::Bool = false, + conditions_in_levels::Bool = true, levels::Bool = false, show_plots::Bool = true, save_plots::Bool = false, @@ -1139,6 +1140,7 @@ function plot_conditional_forecast(𝓂::β„³, Y = get_conditional_forecast(𝓂, conditions, shocks = shocks, + initial_state = initial_state, periods = periods, parameters = parameters, variables = variables, @@ -1218,7 +1220,7 @@ function plot_conditional_forecast(𝓂::β„³, return_plots = [] for i in 1:length(var_idx) - if all(isapprox.(Y[i,:], 0, atol = eps(Float32))) + if all(isapprox.(Y[i,:], 0, atol = eps(Float32))) && !(any(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) n_subplots -= 1 end end @@ -1226,75 +1228,44 @@ function plot_conditional_forecast(𝓂::β„³, for i in 1:length(var_idx) SS = reference_steady_state[i] if !(all(isapprox.(Y[i,:],0,atol = eps(Float32)))) || length(findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) > 0 - if !(plot_count % plots_per_page == 0) - plot_count += 1 - if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") - end) - end - else - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - end) - end - - end + + if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + end) else - - plot_count = 1 - if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + end) + end + else + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) end) - else + else push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") end) end - else - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx,vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - end) - end - end + end + + if !(plot_count % plots_per_page == 0) + plot_count += 1 + else + plot_count = 1 shock_string = "Conditional forecast" From 6323cc5737cb0f5da8b78c2160c7830bdb5a48c2 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:20:22 +0200 Subject: [PATCH 06/83] more flexible keyed array shock input --- src/MacroModelling.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 51a6bc541..58741bfd9 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2665,12 +2665,12 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] @assert length(setdiff(shock_input, T.exo)) == 0 "Provided shocks which are not part of the model." - + shock_history = zeros(T.nExo, periods) shock_history[indexin(shock_input,T.exo),1:size(shocks)[2]] = shocks @@ -2749,7 +2749,7 @@ function girf(state_update::Function, shock_idx = 1 elseif shocks isa KeyedArray{Float64} - shock_input = axiskeys(shocks)[1] + shock_input = map(x->Symbol(replace(string(x),"β‚β‚“β‚Ž" => "")),axiskeys(shocks)[1]) periods += size(shocks)[2] From 0b13aa29464210692e482175b946a5e5b7a7d1b5 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:20:42 +0200 Subject: [PATCH 07/83] change conditions_in_levels in docs --- docs/src/tutorials/rbc.md | 6 ++++-- docs/src/tutorials/sw03.md | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docs/src/tutorials/rbc.md b/docs/src/tutorials/rbc.md index 7768f2906..d6a7bab37 100644 --- a/docs/src/tutorials/rbc.md +++ b/docs/src/tutorials/rbc.md @@ -206,7 +206,7 @@ Note that for the first 4 periods the shock has no predetermined value and is de Finally we can get the conditional forecast: ```@repl tutorial_1 -get_conditional_forecast(RBC, conditions, shocks = shocks) +get_conditional_forecast(RBC, conditions, shocks = shocks, conditions_in_levels = false) ``` The function returns a `KeyedArray` with the values of the endogenous variables and shocks matching the conditions exactly. @@ -214,9 +214,11 @@ The function returns a `KeyedArray` with the values of the endogenous variables We can also plot the conditional forecast. Please note that you need to import the `StatsPlots` packages once before the first plot. In order to plot we can use: ```@repl tutorial_1 -plot_conditional_forecast(RBC, conditions, shocks = shocks) +plot_conditional_forecast(RBC, conditions, shocks = shocks, conditions_in_levels = false) ``` ![RBC conditional forecast](../assets/conditional_fcst__RBC__conditional_forecast__1.png) +and we need to set `conditions_in_levels = false` since the conditions are defined in deviations. + Note that the stars indicate the values the model is conditioned on. diff --git a/docs/src/tutorials/sw03.md b/docs/src/tutorials/sw03.md index b1cd8c66b..51533a3f6 100644 --- a/docs/src/tutorials/sw03.md +++ b/docs/src/tutorials/sw03.md @@ -340,7 +340,7 @@ The above shock `Matrix` means that for the first two periods shocks 1, 2, 3, 5, Finally we can get the conditional forecast: ```@repl tutorial_2 -get_conditional_forecast(SW03, conditions, shocks = shocks, variables = [:Y,:pi,:W]) +get_conditional_forecast(SW03, conditions, shocks = shocks, variables = [:Y,:pi,:W], conditions_in_levels = false) ``` The function returns a `KeyedArray` with the values of the endogenous variables and shocks matching the conditions exactly. @@ -348,11 +348,13 @@ The function returns a `KeyedArray` with the values of the endogenous variables We can also plot the conditional forecast. Please note that you need to import the `StatsPlots` packages once before the first plot. ```@repl tutorial_2 -plot_conditional_forecast(SW03,conditions, shocks = shocks, plots_per_page = 6,variables = [:Y,:pi,:W]) +plot_conditional_forecast(SW03,conditions, shocks = shocks, plots_per_page = 6,variables = [:Y,:pi,:W],conditions_in_levels = false) ``` ![SW03 conditional forecast 1](../assets/conditional_fcst__SW03__conditional_forecast__1.png) ![SW03 conditional forecast 2](../assets/conditional_fcst__SW03__conditional_forecast__2.png) +and we need to set `conditions_in_levels = false` since the conditions are defined in deviations. + Note that the stars indicate the values the model is conditioned on. From 3d2980f77edbfd01fe8793d32188dd8071299161 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:46:30 +0200 Subject: [PATCH 08/83] plot solution only vars with impact --- src/plotting.jl | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/src/plotting.jl b/src/plotting.jl index 6ba958f4d..e271e3b0b 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -882,12 +882,16 @@ function plot_solution(𝓂::β„³, label = "Non Stochastic Steady State") end if :second_order ∈ algorithm + SSS2 = 𝓂.solution.perturbation.second_order.stochastic_steady_state + StatsPlots.scatter!(fill(0,1,1), framestyle = :none, legend = :inside, label = "Stochastic Steady State (2nd order)") end if :third_order ∈ algorithm + SSS3 = 𝓂.solution.perturbation.third_order.stochastic_steady_state + StatsPlots.scatter!(fill(0,1,1), framestyle = :none, legend = :inside, @@ -904,12 +908,20 @@ function plot_solution(𝓂::β„³, framestyle = :none, legend = :inside) + variable_first_list = [] + variable_second_list = [] + variable_third_list = [] + has_impact_list = [] for k in vars_to_plot - kk = Symbol(replace(string(k), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")) has_impact = false + + variable_first = [] + variable_second = [] + variable_third = [] + if :first_order ∈ algorithm variable_first = [𝓂.solution.perturbation.first_order.state_update(state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] @@ -919,8 +931,6 @@ function plot_solution(𝓂::β„³, end if :second_order ∈ algorithm - SSS2 = 𝓂.solution.perturbation.second_order.stochastic_steady_state - variable_second = [𝓂.solution.perturbation.second_order.state_update(SSS2 - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] variable_second = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_second] @@ -929,8 +939,6 @@ function plot_solution(𝓂::β„³, end if :third_order ∈ algorithm - SSS3 = 𝓂.solution.perturbation.third_order.stochastic_steady_state - variable_third = [𝓂.solution.perturbation.third_order.state_update(SSS3 - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] variable_third = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_third] @@ -938,27 +946,41 @@ function plot_solution(𝓂::β„³, has_impact = has_impact || sum(abs2,variable_third .- sum(variable_third)/length(variable_third))/(length(variable_third)-1) > eps() end - if !has_impact continue end + push!(variable_first_list, variable_first) + push!(variable_second_list, variable_second) + push!(variable_third_list, variable_third) + push!(has_impact_list, has_impact) + + if !has_impact + n_subplots -= 1 + end + end + + + for (i,k) in enumerate(vars_to_plot) + kk = Symbol(replace(string(k), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")) + + if !has_impact_list[i] continue end push!(pp,begin Pl = StatsPlots.plot() if :first_order ∈ algorithm StatsPlots.plot!(state_range .+ SS_and_std[1](state), - variable_first, + variable_first_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end if :second_order ∈ algorithm StatsPlots.plot!(state_range .+ SSS2[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], - variable_second, + variable_second_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end if :third_order ∈ algorithm StatsPlots.plot!(state_range .+ SSS3[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], - variable_third, + variable_third_list[i], ylabel = string(k)*"β‚β‚€β‚Ž", xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") From ce033390e28a416084bd87edf92e3a6c890c5a01 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 1 May 2023 23:47:06 +0200 Subject: [PATCH 09/83] bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 0cc716bc2..773a5f177 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MacroModelling" uuid = "687ffad2-3618-405e-ac50-e0f7b9c75e44" authors = ["Thore Kockerols "] -version = "0.1.19" +version = "0.1.20" [deps] AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" From ae23b65aac1a987518802577c74a005c59a8b6bf Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 2 May 2023 00:01:15 +0200 Subject: [PATCH 10/83] tests with conditions_in_levels --- test/functionality_tests.jl | 40 ++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 018dbf265..eacb3f3bf 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -148,53 +148,53 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = Matrix{Union{Nothing, Float64}}(undef,size(new_sub_irfs_all,3),1) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end conditions = spzeros(size(new_sub_irfs_all,1),2) conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = spzeros(size(new_sub_irfs_all,3),1) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end conditions = KeyedArray(Matrix{Union{Nothing, Float64}}(undef,2,2), Variables = varnames[var_idxs[1:2]], Periods = 1:2) conditions[var_idxs[1],1] = .01 conditions[var_idxs[2],2] = .02 - cond_fcst = get_conditional_forecast(m, conditions) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) if all(vec(sum(sol[end-length(shocknames)+1:end,var_idxs[1:2]] .!= 0, dims = 1)) .> 0) shocks = KeyedArray(Matrix{Union{Nothing, Float64}}(undef,1,1), Shocks = [shocknames[1]], Periods = [1]) shocks[1,1] = .1 - cond_fcst = get_conditional_forecast(m, conditions, shocks = shocks) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, shocks = shocks) end - cond_fcst = get_conditional_forecast(m, conditions) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) if plots - plot_conditional_forecast(m, conditions, save_plots = false, show_plots = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - plot_conditional_forecast(m, conditions, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = false, show_plots = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = :all, verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) + plot_conditional_forecast(m, conditions, conditions_in_levels = false, save_plots = true, show_plots = false, periods = 10, parameters = old_par_vals, variables = varnames[1], verbose = true) end NSSS = get_SS(m,derivatives = false) @@ -206,9 +206,9 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = conditions_lvl[var_idxs[1],1] = .01 + reference_steady_state[var_idxs[1]] conditions_lvl[var_idxs[2],2] = .02 + reference_steady_state[var_idxs[2]] - cond_fcst = get_conditional_forecast(m, conditions_lvl, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], conditions_in_levels = true, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions_lvl, periods = 10, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001), variables = varnames[1], verbose = true) - cond_fcst = get_conditional_forecast(m, conditions, periods = 10, parameters = old_par_vals, variables = varnames[1], levels = true, verbose = true) + cond_fcst = get_conditional_forecast(m, conditions, conditions_in_levels = false, periods = 10, parameters = old_par_vals, variables = varnames[1], levels = true, verbose = true) # Test filtering and smoothing sol = get_solution(m) From 2c4e1e3fb3e64b278336db7f24b3fb1a62103755 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 2 May 2023 00:17:58 +0200 Subject: [PATCH 11/83] decomposition only with keyedarrays --- src/get_functions.jl | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/get_functions.jl b/src/get_functions.jl index 7aecf7d56..1592cd74f 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -63,7 +63,7 @@ And data, 4Γ—2Γ—40 Array{Float64, 3}: ``` """ function get_shock_decomposition(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, # shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, @@ -77,7 +77,9 @@ function get_shock_decomposition(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -85,7 +87,7 @@ function get_shock_decomposition(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) # var_idx = parse_variables_input_to_index(variables, 𝓂.timings) @@ -140,7 +142,7 @@ And data, 1Γ—40 Matrix{Float64}: ``` """ function get_estimated_shocks(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, data_in_levels::Bool = true, smooth::Bool = true, @@ -152,7 +154,9 @@ function get_estimated_shocks(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -160,7 +164,7 @@ function get_estimated_shocks(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 3 : 7]; Shocks = map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.timings.exo), Periods = 1:size(data,2)) end @@ -219,7 +223,7 @@ And data, 4Γ—40 Matrix{Float64}: ``` """ function get_estimated_variables(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, data_in_levels::Bool = true, @@ -233,7 +237,9 @@ function get_estimated_variables(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -241,7 +247,7 @@ function get_estimated_variables(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) return KeyedArray(levels ? filtered_and_smoothed[smooth ? 1 : 5] .+ reference_steady_state[1:length(𝓂.var)] : filtered_and_smoothed[smooth ? 1 : 5]; Variables = 𝓂.timings.var, Periods = 1:size(data,2)) end @@ -298,7 +304,7 @@ And data, 4Γ—40 Matrix{Float64}: ``` """ function get_estimated_variable_standard_deviations(𝓂::β„³, - data::AbstractArray{Float64}; + data::KeyedArray{Float64}; parameters = nothing, # variables::Symbol_input = :all_including_auxilliary, data_in_levels::Bool = true, @@ -311,7 +317,9 @@ function get_estimated_variable_standard_deviations(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) if data_in_levels data_in_deviations = data .- reference_steady_state[obs_idx] @@ -319,7 +327,7 @@ function get_estimated_variable_standard_deviations(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 2 : 6]; Standard_deviations = 𝓂.timings.var, Periods = 1:size(data,2)) end From 8369c35d7f0424001a182265a49d24c9779e7a49 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 2 May 2023 09:43:06 +0200 Subject: [PATCH 12/83] fix data handling in decomp --- src/get_functions.jl | 8 ++++---- src/plotting.jl | 8 +++++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/get_functions.jl b/src/get_functions.jl index 1592cd74f..cc27e0100 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -87,7 +87,7 @@ function get_shock_decomposition(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) # var_idx = parse_variables_input_to_index(variables, 𝓂.timings) @@ -164,7 +164,7 @@ function get_estimated_shocks(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 3 : 7]; Shocks = map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.timings.exo), Periods = 1:size(data,2)) end @@ -247,7 +247,7 @@ function get_estimated_variables(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(levels ? filtered_and_smoothed[smooth ? 1 : 5] .+ reference_steady_state[1:length(𝓂.var)] : filtered_and_smoothed[smooth ? 1 : 5]; Variables = 𝓂.timings.var, Periods = 1:size(data,2)) end @@ -327,7 +327,7 @@ function get_estimated_variable_standard_deviations(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, axiskeys(data,1); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) return KeyedArray(filtered_and_smoothed[smooth ? 2 : 6]; Standard_deviations = 𝓂.timings.var, Periods = 1:size(data,2)) end diff --git a/src/plotting.jl b/src/plotting.jl index e271e3b0b..d622d9f69 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -108,7 +108,9 @@ function plot_model_estimates(𝓂::β„³, reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) - obs_idx = parse_variables_input_to_index(collect(axiskeys(data)[1]), 𝓂.timings) + data = data(sort(axiskeys(data,1))) + + obs_idx = parse_variables_input_to_index(collect(axiskeys(data,1)), 𝓂.timings) var_idx = parse_variables_input_to_index(variables, 𝓂.timings) shock_idx = parse_shocks_input_to_index(shocks,𝓂.timings) @@ -118,7 +120,7 @@ function plot_model_estimates(𝓂::β„³, data_in_deviations = data end - filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, sort(axiskeys(data)[1]); verbose = verbose) + filtered_and_smoothed = filter_and_smooth(𝓂, data_in_deviations, collect(axiskeys(data,1)); verbose = verbose) variables_to_plot = filtered_and_smoothed[smooth ? 1 : 5] shocks_to_plot = filtered_and_smoothed[smooth ? 3 : 7] @@ -959,7 +961,7 @@ function plot_solution(𝓂::β„³, for (i,k) in enumerate(vars_to_plot) kk = Symbol(replace(string(k), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")) - + if !has_impact_list[i] continue end push!(pp,begin From a8d058114f8b5c3a33d5160c3160cb94a89af90f Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 2 May 2023 15:04:16 +0200 Subject: [PATCH 13/83] update todos --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index eb2c1b463..d7e013851 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -4,6 +4,7 @@ - [ ] add balanced growth path handling - [ ] check that there is an error if he cant find SS +- [ ] plot_model_estimates with unconditional forecast at the end - [ ] check if you can do analytic derivatives for higher order derivatives - [ ] kick out unsused parameters from m.parameters - [ ] higher order solution derivs with Zygote From 48e801b393f91bc9b8247187a55a2654fe960176 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 13:17:44 +0200 Subject: [PATCH 14/83] update todos --- docs/src/unfinished_docs/todo.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index d7e013851..43f66cddc 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -6,7 +6,7 @@ - [ ] check that there is an error if he cant find SS - [ ] plot_model_estimates with unconditional forecast at the end - [ ] check if you can do analytic derivatives for higher order derivatives -- [ ] kick out unsused parameters from m.parameters +- [ ] kick out unused parameters from m.parameters - [ ] higher order solution derivs with Zygote - [ ] use cache for gradient calc in estimation (see DifferentiableStateSpaceModels) - [ ] use krylov instead of linearsolve and speed up sparse matrix calcs in implicit diff of higher order funcs @@ -98,7 +98,6 @@ - [ ] rewrite first order with riccati equation MatrixEquations.jl - [ ] exploit variable incidence and compression for higher order derivatives - [ ] for estimation use CUDA with st order: linear time iteration starting from last 1st order solution and then LinearSolveCUDA solvers for higher orders. this should bring benefits for large models and HANK models -- [ ] test on highly [nonlinear model](https://www.sciencedirect.com/science/article/pii/S0165188917300970) - [ ] pull request in StatsFuns to have norminv... accept type numbers and add translation from matlab: norminv to StatsFuns norminvcdf - [ ] more informative errors when declaring equations/ calibration - [ ] unit equation errors @@ -108,6 +107,7 @@ - [ ] print legend for algorithm in last subplot of plot only - [ ] select variables for moments +- [x] test on highly [nonlinear model](https://www.sciencedirect.com/science/article/pii/S0165188917300970) # caldara et al is actually epstein zin wiht stochastic vol - [x] conditional forecasting - [x] find way to recover from failed SS solution which is written to init guess - [x] redo ugly solution for selecting parameters to differentiate for From 9eb85238c17f957651db6dc14e7b712646fe56b9 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 18:12:27 +0200 Subject: [PATCH 15/83] new get_statistics function --- src/MacroModelling.jl | 2 +- src/get_functions.jl | 81 ++++++++++++++++++++++---------- test/test_standalone_function.jl | 50 +++++++++----------- 3 files changed, 78 insertions(+), 55 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 58741bfd9..023f4a809 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -58,7 +58,7 @@ export get_irfs, get_irf, get_IRF, simulate, get_simulation export get_conditional_forecast, plot_conditional_forecast export get_solution, get_first_order_solution, get_perturbation_solution export get_steady_state, get_SS, get_ss, get_non_stochastic_steady_state, get_stochastic_steady_state, get_SSS, steady_state, SS, SSS -export get_moments, get_covariance, get_standard_deviation, get_variance, get_var, get_std, get_cov, var, std, cov +export get_moments, get_statistics, get_covariance, get_standard_deviation, get_variance, get_var, get_std, get_cov, var, std, cov export get_autocorrelation, get_correlation, get_variance_decomposition, get_corr, get_autocorr, get_var_decomp, corr, autocorr export get_fevd, fevd, get_forecast_error_variance_decomposition, get_conditional_variance_decomposition export calculate_jacobian, calculate_hessian, calculate_third_order_derivatives diff --git a/src/get_functions.jl b/src/get_functions.jl index cc27e0100..4d09bc7d5 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -1749,10 +1749,12 @@ Function to use when differentiating model moments with repect to parameters. - $MODEL - $PARAMETER_VALUES # Keyword Arguments -- `non_stochastic_steady_state` [Default: `true`, Type: `Bool`]: switch to return SS of endogenous variables -- `standard_deviation` [Default: `true`, Type: `Bool`]: switch to return standard deviation of endogenous variables -- `variance` [Default: `false`, Type: `Bool`]: switch to return variance of endogenous variables -- `covariance` [Default: `false`, Type: `Bool`]: switch to return covariance matrix of endogenous variables +- `non_stochastic_steady_state` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: switch to return SS of endogenous variables +- `standard_deviation` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the standard deviation of the mentioned variables +- `variance` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the variance of the mentioned variables +- `covariance` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the covariance of the mentioned variables +- `autocorrelation` [Default: `Symbol[]`, Type: `Vector{Symbol}`]: if values are provided the function returns the autocorrelation of the mentioned variables +- `autocorrelation_periods` [Default: `1:5`]: periods for which to return the autocorrelation of the mentioned variables - $VERBOSE # Examples @@ -1774,53 +1776,80 @@ end; Ξ² = 0.95 end; -get_moments(RBC, RBC.parameter_values) +get_statistics(RBC, RBC.parameter_values, parameters = RBC.parameters, standard_deviation = RBC.var) # output -2-element Vector{Any}: - [5.936252888048724, 47.39025414828808, 6.884057971014486, 0.0] - [0.026664203785255254, 0.26467737291222343, 0.07393254045396497, 0.010206207261596576] +1-element Vector{Any}: + [0.02666420378525503, 0.26467737291221793, 0.07393254045396483, 0.010206207261596574] ``` """ -function get_moments(𝓂::β„³, parameters::Vector; - non_stochastic_steady_state::Bool = true, - standard_deviation::Bool = true, - variance::Bool = false, - covariance::Bool = false, - verbose::Bool = false) +function get_statistics(𝓂, parameter_values::Vector{T}; + parameters::Vector{Symbol} = Symbol[], + non_stochastic_steady_state::Vector{Symbol} = Symbol[], + standard_deviation::Vector{Symbol} = Symbol[], + variance::Vector{Symbol} = Symbol[], + covariance::Vector{Symbol} = Symbol[], + autocorrelation::Vector{Symbol} = Symbol[], + autocorrelation_periods::U = 1:5, + verbose::Bool = false) where {U,T} + + @assert !(non_stochastic_steady_state == Symbol[]) || !(standard_deviation == Symbol[]) || !(variance == Symbol[]) || !(covariance == Symbol[]) || !(autocorrelation == Symbol[]) "Provide variables for at least one output." + + SS_var_idx = indexin(non_stochastic_steady_state, 𝓂.var) + + std_var_idx = indexin(standard_deviation, 𝓂.var) + + var_var_idx = indexin(variance, 𝓂.var) + + covar_var_idx = indexin(covariance, 𝓂.var) + + autocorr_var_idx = indexin(autocorrelation, 𝓂.var) + + other_parameter_values = 𝓂.parameter_values[indexin(setdiff(𝓂.parameters, parameters), 𝓂.parameters)] + + sort_idx = sortperm(vcat(indexin(setdiff(𝓂.parameters, parameters), 𝓂.parameters), indexin(parameters, 𝓂.parameters))) + + all_parameters = vcat(other_parameter_values, parameter_values)[sort_idx] solve!(𝓂, verbose = verbose) - covar_dcmp, __, _, SS_and_pars = calculate_covariance(parameters,𝓂, verbose = verbose) + covar_dcmp, sol, _, SS_and_pars = calculate_covariance(all_parameters,𝓂, verbose = verbose) SS = SS_and_pars[1:end - length(𝓂.calibration_equations)] - if variance + if !(variance == Symbol[]) varrs = convert(Vector{Real},β„’.diag(covar_dcmp)) - if standard_deviation + if !(standard_deviation == Symbol[]) st_dev = sqrt.(varrs) end + elseif !(autocorrelation == Symbol[]) + A = @views sol[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[𝓂.timings.past_not_future_and_mixed_idx,:] + + autocorr = reduce(hcat,[β„’.diag(A ^ i * covar_dcmp ./ β„’.diag(covar_dcmp)) for i in autocorrelation_periods]) else - if standard_deviation + if !(standard_deviation == Symbol[]) st_dev = sqrt.(convert(Vector{Real},β„’.diag(covar_dcmp))) end end ret = [] - if non_stochastic_steady_state - push!(ret,SS) + if !(non_stochastic_steady_state == Symbol[]) + push!(ret,SS[SS_var_idx]) end - if standard_deviation - push!(ret,st_dev) + if !(standard_deviation == Symbol[]) + push!(ret,st_dev[std_var_idx]) end - if variance - push!(ret,varrs) + if !(variance == Symbol[]) + push!(ret,varrs[var_var_idx]) end - if covariance + if !(covariance == Symbol[]) covar_dcmp_sp = sparse(β„’.triu(covar_dcmp)) droptol!(covar_dcmp_sp,eps(Float64)) - push!(ret,covar_dcmp_sp) + push!(ret,covar_dcmp_sp[covar_var_idx,covar_var_idx]) + end + if !(autocorrelation == Symbol[]) + push!(ret,autocorr[autocorr_var_idx,:] ) end return ret diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index 61047fddf..a240ab55c 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -275,70 +275,64 @@ end end - @testset verbose = true "NSSS and std derivatives" begin # derivatives of paramteres wrt standard deviations - stdev_deriv = ForwardDiff.jacobian(x -> get_moments(RBC_CME, x)[2], Float64.(RBC_CME.parameter_values)) + stdev_deriv = ForwardDiff.jacobian(x -> get_statistics(RBC_CME, x, parameters = RBC_CME.parameters, standard_deviation = RBC_CME.var)[1], RBC_CME.parameter_values) + stdev_deriv[9] @test isapprox(stdev_deriv[5,6],1.3135107627695757, rtol = 1e-6) - # derivatives of paramteres wrt non stochastic steady state - nsss_deriv = ForwardDiff.jacobian(x -> get_moments(RBC_CME, x)[1], Float64.(RBC_CME.parameter_values)) + nsss_deriv = ForwardDiff.jacobian(x -> get_statistics(RBC_CME, x, parameters = RBC_CME.parameters, non_stochastic_steady_state = RBC_CME.var)[1], RBC_CME.parameter_values) @test isapprox(nsss_deriv[4,1],3.296074644820076, rtol = 1e-6) end @testset verbose = true "Method of moments" begin # Method of moments: with varying steady states and derivatives of steady state numerical solved_vars - sol = Optim.optimize(x -> sum(abs2, get_moments(RBC_CME, vcat(x, RBC_CME.parameter_values[2:end]))[2][[5]] - [.21]), + sol = Optim.optimize(x -> sum(abs2, get_statistics(RBC_CME, x, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] - [.21]), [0], [1], [.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[2:end]))[2][5],.21,rtol = 1e-6) - - + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] ,[.21], rtol = 1e-6) # multiple parameter inputs and targets - sol = Optim.optimize(x -> sum(abs2,get_moments(RBC_CME, vcat(x[1], RBC_CME.parameter_values[2:end-1],x[2]))[2][[2,5]] - [.0008,.21]), + sol = Optim.optimize(x -> sum(abs2,get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1] - [.0008,.21]), [0,0], [1,1], [.006,.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_moments(RBC_CME, vcat(sol.minimizer[1],RBC_CME.parameter_values[2:end-1],sol.minimizer[2]))[2][[2,5]],[.0008,.21],rtol=1e-6) - + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1], [.0008,.21], rtol=1e-6) # function combining targets for SS and St.Dev. - function get_variances_optim(x,p) - out = get_moments(RBC_CME, vcat(x,p)) - sum(abs2,[out[1][6] - 1.45, out[2][5] - .2]) + function get_variances_optim(x) + out = get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.45, out[2][1] - .2]) end - out = get_variances_optim([.157,.999],RBC_CME.parameter_values[3:end]) + out = get_variances_optim([.157,.999]) - out = get_moments(RBC_CME, vcat([.157,.999],RBC_CME.parameter_values[3:end])) - sum(abs2,[out[1][6] - 1.4, out[2][5] - .21]) + out = get_statistics(RBC_CME, [.157,.999], parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.4, out[2][1] - .21]) - sol = Optim.optimize(x -> get_variances_optim(x,RBC_CME.parameter_values[3:end]), + sol = Optim.optimize(x -> get_variances_optim(x), [0,0.95], [1,1], [.16, .999], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox([get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[3:end]))[1][6] - get_moments(RBC_CME, vcat(sol.minimizer,RBC_CME.parameter_values[3:end]))[2][5]],[1.45,.2],rtol = 1e-6) + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]),[[1.45],[.2]],rtol = 1e-6) # function combining targets for SS, St.Dev., and parameter - function get_variances_optim2(x,p) - out = get_moments(RBC_CME, vcat(x,p)) - sum(abs2,[out[1][6] - 1.45, out[2][5] - .2, x[3] - .02]) + function get_variances_optim2(x) + out = get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:3], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sum(abs2,[out[1][1] - 1.45, out[2][1] - .2, x[3] - .02]) end - out = get_variances_optim2([.157,.999,.022],RBC_CME.parameter_values[4:end]) + out = get_variances_optim2([.157,.999,.022]) - sol = Optim.optimize(x -> get_variances_optim2(x, RBC_CME.parameter_values[4:end]), + sol = Optim.optimize(x -> get_variances_optim2(x), [0,0.95,0], [1,1,1], [.16, .999,.022], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox([get_moments(RBC_CME, vcat(sol.minimizer, RBC_CME.parameter_values[4:end]))[1][6] - get_moments(RBC_CME, vcat(sol.minimizer, RBC_CME.parameter_values[4:end]))[2][5] - sol.minimizer[3]],[1.45,.2,.02],rtol = 1e-6) + @test isapprox([get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:3], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) + sol.minimizer[3]],[[1.45],[.2],.02],rtol = 1e-6) end RBC_CME = nothing From b1d54574e95f9274d7f7fccb33535393ab029c8f Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 19:10:40 +0200 Subject: [PATCH 16/83] return solved flag in first order solution --- src/MacroModelling.jl | 111 ++++++++++++++++++++++--------- src/get_functions.jl | 32 ++++++--- test/test_standalone_function.jl | 2 +- 3 files changed, 106 insertions(+), 39 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 023f4a809..5f47db336 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1294,7 +1294,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁ = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1403,7 +1403,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁ = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1465,10 +1465,14 @@ function solve!(𝓂::β„³; if (any([:riccati, :first_order] .∈ ([algorithm],)) && any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (:third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) + # @assert solution_error < eps() "Could not find non stochastic steady steady." + βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol_mat = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + @assert solved "Could not find stable first order solution." + state_update₁ = function(state::Vector{Float64}, shock::Vector{Float64}) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end 𝓂.solution.perturbation.first_order = perturbation_solution(sol_mat, state_update₁) @@ -2256,7 +2260,7 @@ end -function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false)::Matrix{Float64} +function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false)::Tuple{Matrix{Float64},Bool} βˆ‡β‚Š = @view βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] βˆ‡β‚€ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed .+ range(1, T.nVars)] βˆ‡β‚‹ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed + T.nVars .+ range(1, T.nPast_not_future_and_mixed)] @@ -2297,7 +2301,15 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = S₁₁ = @view schdcmp.S[1:T.nPast_not_future_and_mixed, 1:T.nPast_not_future_and_mixed] T₁₁ = @view schdcmp.T[1:T.nPast_not_future_and_mixed, 1:T.nPast_not_future_and_mixed] - Z₁₁inv = β„’.pinv(Z₁₁) + Ẑ₁₁ = RF.lu(Z₁₁, check = false) + + if !β„’.issuccess(Ẑ₁₁) + Ẑ₁₁ = β„’.svd(Z₁₁, check = false) + end + + if !β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false + end else eigenselect = abs.(schdcmp.Ξ² ./ schdcmp.Ξ±) .< 1 @@ -2309,11 +2321,29 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = S₁₁ = @view schdcmp.S[1:T.nPast_not_future_and_mixed, 1:T.nPast_not_future_and_mixed] T₁₁ = @view schdcmp.T[1:T.nPast_not_future_and_mixed, 1:T.nPast_not_future_and_mixed] - Z₁₁inv = inv(Z₁₁) + Ẑ₁₁ = RF.lu(Z₁₁, check = false) + + if !β„’.issuccess(Ẑ₁₁) + Ẑ₁₁ = β„’.svd(Z₁₁, check = false) + end + + if !β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false + end + end + + Ŝ₁₁ = RF.lu(S₁₁, check = false) + + if !β„’.issuccess(Ŝ₁₁) + Ŝ₁₁ = β„’.svd(S₁₁, check = false) + end + + if !β„’.issuccess(Ŝ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false end - D = Z₂₁ * Z₁₁inv - L = Z₁₁ * (S₁₁ \ T₁₁) * Z₁₁inv + D = Z₂₁ / Ẑ₁₁ + L = Z₁₁ * (Ŝ₁₁ \ T₁₁) / Ẑ₁₁ sol = @views vcat(L[T.not_mixed_in_past_idx,:], D) @@ -2322,9 +2352,19 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = AΜƒβ‚€α΅€ = @view Aβ‚€[1:T.nPresent_only, T.present_but_not_only_idx] Aβ‚‹α΅€ = @view Aβ‚‹[1:T.nPresent_only,:] - A = @views vcat(- AΜ„β‚€α΅€ \ (Aβ‚Šα΅€ * D * L + AΜƒβ‚€α΅€ * sol[T.dynamic_order,:] + Aβ‚‹α΅€), sol) + AΜ„Μ‚β‚€α΅€ = RF.lu(AΜ„β‚€α΅€, check = false) + + if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) + AΜ„Μ‚β‚€α΅€ = β„’.svd(AΜ„β‚€α΅€, check = false) + end + + if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false + end + + A = @views vcat(-(AΜ„Μ‚β‚€α΅€ \ (Aβ‚Šα΅€ * D * L + AΜƒβ‚€α΅€ * sol[T.dynamic_order,:] + Aβ‚‹α΅€)), sol) - @view A[T.reorder,:] + return @view(A[T.reorder,:]), true end @@ -2351,37 +2391,44 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos ps = mapreduce(β„±.partials, hcat, βˆ‡β‚)' # get f(vs) - val = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) + val, solved = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) - # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) - A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) - # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] - # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] + if solved + # get J(f, vs) * ps (cheating). Write your custom rule here + B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) + A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) + # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] + # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] - AΜ‚ = RF.lu(A, check = false) + AΜ‚ = RF.lu(A, check = false) - if !β„’.issuccess(AΜ‚) - AΜ‚ = β„’.svd(A) + if !β„’.issuccess(AΜ‚) + AΜ‚ = β„’.svd(A) + end + + jvp = -(AΜ‚ \ B) * ps + else + jvp = fill(0,length(val),length(βˆ‡Μ‚β‚)) * ps end - - jvp = -(AΜ‚ \ B) * ps # pack: SoA -> AoS return reshape(map(val, eachrow(jvp)) do v, p β„±.Dual{Z}(v, p...) # Z is the tag - end,size(val)) + end,size(val)), solved end # riccati_AD = ImplicitFunction(riccati_forward, riccati_conditions) -riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive), (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) +riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive), (x,y)->(riccati_conditions(x,y[1],T=T,explosive=explosive),y[2])) -function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Matrix{S} where S <: Real +function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) - A = riccati(βˆ‡β‚) - # A = riccati_forward(βˆ‡β‚, T = T, explosive = explosive) + A, solved = riccati(βˆ‡β‚) + + if !solved + return hcat(A, zeros(size(A,1),T.nExo)), solved + end Jm = @view(β„’.diagm(ones(S,T.nVars))[T.past_not_future_and_mixed_idx,:]) @@ -2391,7 +2438,7 @@ function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive B = -((βˆ‡β‚Š * A * Jm + βˆ‡β‚€) \ βˆ‡β‚‘) - return hcat(A, B) + return hcat(A, B), solved end @@ -2895,7 +2942,7 @@ function calculate_covariance(parameters::Vector{<: Real}, 𝓂::β„³; verbose::B βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) covar_raw = calculate_covariance_forward(sol,T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars)) @@ -2994,7 +3041,11 @@ function calculate_kalman_filter_loglikelihood(𝓂::β„³, data::AbstractArray{Fl βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + + if !solved + return -Inf + end observables_and_states = @ignore_derivatives sort(union(𝓂.timings.past_not_future_and_mixed_idx,indexin(observables,sort(union(𝓂.aux,𝓂.var,𝓂.exo_present))))) @@ -3066,7 +3117,7 @@ function filter_and_smooth(𝓂::β„³, data_in_deviations::AbstractArray{Float64} βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views sol[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[𝓂.timings.past_not_future_and_mixed_idx,:] diff --git a/src/get_functions.jl b/src/get_functions.jl index 4d09bc7d5..9c8570a36 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -654,7 +654,7 @@ function get_irf(𝓂::β„³, βˆ‡β‚ = calculate_jacobian(parameters, reference_steady_state, 𝓂) - sol_mat = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) state_update = function(state::Vector, shock::Vector) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end @@ -1147,19 +1147,35 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) if solution_error > tol || isnan(solution_error) - return -Inf + if algorithm == :second_order + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), spzeros(𝓂.var,2), false + elseif algorithm == :third_order + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), spzeros(𝓂.var,2), spzeros(𝓂.var,2), false + else + return SS_and_pars[1:length(𝓂.var)], zeros(𝓂.var,2), false + end end βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁ = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + + if !solved + if algorithm == :second_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), false + elseif algorithm == :third_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), spzeros(𝓂.var,2), false + else + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, false + end + end if algorithm == :second_order βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂ + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, true elseif algorithm == :third_order βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1169,9 +1185,9 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol 𝐒₃ = calculate_third_order_solution(βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃ + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃, true else - return SS_and_pars[1:length(𝓂.var)], 𝐒₁ + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, true end end @@ -1267,7 +1283,7 @@ function get_conditional_variance_decomposition(𝓂::β„³; βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - 𝑺₁ = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝑺₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views 𝑺₁[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[indexin(𝓂.timings.past_not_future_and_mixed_idx,1:𝓂.timings.nVars),:] @@ -1387,7 +1403,7 @@ function get_variance_decomposition(𝓂::β„³; βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) variances_by_shock = reduce(hcat,[β„’.diag(calculate_covariance_forward(sol[:,[1:𝓂.timings.nPast_not_future_and_mixed..., 𝓂.timings.nPast_not_future_and_mixed+i]], T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars))) for i in 1:𝓂.timings.nExo]) diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index a240ab55c..50c1c9714 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -67,7 +67,7 @@ get_irf(RBC_CME, algorithm = :third_order) T = timings([:R, :y], [:Pi, :c], [:k, :z_delta], [:A], [:A, :Pi, :c], [:A, :k, :z_delta], [:A, :Pi, :c, :k, :z_delta], [:A], [:k, :z_delta], [:A], [:delta_eps, :eps_z], [:A, :Pi, :R, :c, :k, :y, :z_delta], Symbol[], Symbol[], 2, 1, 3, 3, 5, 7, 2, [3, 6], [1, 2, 4, 5, 7], [1, 2, 4], [2, 3], [1, 5, 7], [1], [1], [5, 7], [5, 6, 1, 7, 3, 2, 4], [3, 4, 5, 1, 2]) -first_order_solution = calculate_first_order_solution(βˆ‡β‚; T = T, explosive = false)# |> Matrix{Float32} +first_order_solution, solved = calculate_first_order_solution(βˆ‡β‚; T = T, explosive = false)# |> Matrix{Float32} second_order_solution = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, From 5b2ed78953c660afc1778bde675c6500a94d3ca3 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 19:10:55 +0200 Subject: [PATCH 17/83] add estimation plots to gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 2f122df4c..e8c3ac551 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,4 @@ fevd*.png solution*.png conditional_fcst*.png .CondaPkg/ +estimation__* From 4e28e0159076de835ff436f07c18e0e91dde76c6 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 20:02:02 +0200 Subject: [PATCH 18/83] fix zygote --- src/MacroModelling.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 5f47db336..228899fe5 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2419,12 +2419,14 @@ end # riccati_AD = ImplicitFunction(riccati_forward, riccati_conditions) -riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive), (x,y)->(riccati_conditions(x,y[1],T=T,explosive=explosive),y[2])) +riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) - A, solved = riccati(βˆ‡β‚) + A = riccati(βˆ‡β‚) + + solved = @ignore_derivatives !(isapprox(sum(abs,A), 0, rtol = eps())) if !solved return hcat(A, zeros(size(A,1),T.nExo)), solved From d4fdad4f6049ef97d5146206959995dc8b8d5e1f Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 20:55:18 +0200 Subject: [PATCH 19/83] remove svd --- docs/src/unfinished_docs/todo.md | 1 + src/MacroModelling.jl | 14 +------------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 43f66cddc..8727b00e3 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] bring solution error into an object of the model so we dont have to pass it on as output - [ ] check that there is an error if he cant find SS - [ ] plot_model_estimates with unconditional forecast at the end - [ ] check if you can do analytic derivatives for higher order derivatives diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 228899fe5..54a281f68 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2323,10 +2323,6 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ẑ₁₁ = RF.lu(Z₁₁, check = false) - if !β„’.issuccess(Ẑ₁₁) - Ẑ₁₁ = β„’.svd(Z₁₁, check = false) - end - if !β„’.issuccess(Ẑ₁₁) return zeros(T.nVars,T.nPast_not_future_and_mixed), false end @@ -2334,10 +2330,6 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ŝ₁₁ = RF.lu(S₁₁, check = false) - if !β„’.issuccess(Ŝ₁₁) - Ŝ₁₁ = β„’.svd(S₁₁, check = false) - end - if !β„’.issuccess(Ŝ₁₁) return zeros(T.nVars,T.nPast_not_future_and_mixed), false end @@ -2355,11 +2347,7 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = AΜ„Μ‚β‚€α΅€ = RF.lu(AΜ„β‚€α΅€, check = false) if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) - AΜ„Μ‚β‚€α΅€ = β„’.svd(AΜ„β‚€α΅€, check = false) - end - - if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) - return zeros(T.nVars,T.nPast_not_future_and_mixed), false + AΜ„Μ‚β‚€α΅€ = β„’.svd(collect(AΜ„β‚€α΅€)) end A = @views vcat(-(AΜ„Μ‚β‚€α΅€ \ (Aβ‚Šα΅€ * D * L + AΜƒβ‚€α΅€ * sol[T.dynamic_order,:] + Aβ‚‹α΅€)), sol) From 579d027b82335d2a9086883b7665830daea0aafa Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 21:23:44 +0200 Subject: [PATCH 20/83] bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 773a5f177..9d61ad21a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MacroModelling" uuid = "687ffad2-3618-405e-ac50-e0f7b9c75e44" authors = ["Thore Kockerols "] -version = "0.1.20" +version = "0.1.21" [deps] AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" From 862a70c03756020186d66588407788bfaf23d602 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 22:04:09 +0200 Subject: [PATCH 21/83] change tol on method of moments --- test/test_standalone_function.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index 50c1c9714..1a2936228 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -292,14 +292,14 @@ end [0], [1], [.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] ,[.21], rtol = 1e-6) + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = [RBC_CME.parameters[1]], standard_deviation = [RBC_CME.var[5]])[1] ,[.21], atol = 1e-6) # multiple parameter inputs and targets sol = Optim.optimize(x -> sum(abs2,get_statistics(RBC_CME, x, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1] - [.0008,.21]), [0,0], [1,1], [.006,.16], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1], [.0008,.21], rtol=1e-6) + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], standard_deviation = RBC_CME.var[[2,5]])[1], [.0008,.21], atol=1e-6) # function combining targets for SS and St.Dev. function get_variances_optim(x) @@ -315,7 +315,7 @@ end [0,0.95], [1,1], [.16, .999], Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) - @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]),[[1.45],[.2]],rtol = 1e-6) + @test isapprox(get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:2], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]),[[1.45],[.2]],atol = 1e-6) @@ -332,7 +332,7 @@ end Optim.Fminbox(Optim.LBFGS(linesearch = LineSearches.BackTracking(order = 3))); autodiff = :forward) @test isapprox([get_statistics(RBC_CME, sol.minimizer, parameters = RBC_CME.parameters[1:3], non_stochastic_steady_state = [RBC_CME.var[6]], standard_deviation = [RBC_CME.var[5]]) - sol.minimizer[3]],[[1.45],[.2],.02],rtol = 1e-6) + sol.minimizer[3]],[[1.45],[.2],.02],atol = 1e-6) end RBC_CME = nothing From 31372737d31ba41b441702bd69fa16471252a946 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 3 May 2023 23:54:58 +0200 Subject: [PATCH 22/83] spacing in SW03 model --- models/SW03.jl | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/models/SW03.jl b/models/SW03.jl index fbf7c4c35..22b8c574f 100644 --- a/models/SW03.jl +++ b/models/SW03.jl @@ -1,57 +1,110 @@ @model SW03 begin -q[0] + beta * ((1 - tau) * q[1] + epsilon_b[1] * (r_k[1] * z[1] - psi^-1 * r_k[ss] * (-1 + exp(psi * (-1 + z[1])))) * (C[1] - h * C[0])^(-sigma_c)) + -q_f[0] + beta * ((1 - tau) * q_f[1] + epsilon_b[1] * (r_k_f[1] * z_f[1] - psi^-1 * r_k_f[ss] * (-1 + exp(psi * (-1 + z_f[1])))) * (C_f[1] - h * C_f[0])^(-sigma_c)) + -r_k[0] + alpha * epsilon_a[0] * mc[0] * L[0]^(1 - alpha) * (K[-1] * z[0])^(-1 + alpha) + -r_k_f[0] + alpha * epsilon_a[0] * mc_f[0] * L_f[0]^(1 - alpha) * (K_f[-1] * z_f[0])^(-1 + alpha) + -G[0] + T[0] + -G[0] + G_bar * epsilon_G[0] + -G_f[0] + T_f[0] + -G_f[0] + G_bar * epsilon_G[0] + -L[0] + nu_w[0]^-1 * L_s[0] + -L_s_f[0] + L_f[0] * (W_i_f[0] * W_f[0]^-1)^(lambda_w^-1 * (-1 - lambda_w)) + L_s_f[0] - L_f[0] + L_s_f[0] + lambda_w^-1 * L_f[0] * W_f[0]^-1 * (-1 - lambda_w) * (-W_disutil_f[0] + W_i_f[0]) * (W_i_f[0] * W_f[0]^-1)^(-1 + lambda_w^-1 * (-1 - lambda_w)) + Pi_ws_f[0] - L_s_f[0] * (-W_disutil_f[0] + W_i_f[0]) + Pi_ps_f[0] - Y_f[0] * (-mc_f[0] + P_j_f[0]) * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + -Q[0] + epsilon_b[0]^-1 * q[0] * (C[0] - h * C[-1])^(sigma_c) + -Q_f[0] + epsilon_b[0]^-1 * q_f[0] * (C_f[0] - h * C_f[-1])^(sigma_c) + -W[0] + epsilon_a[0] * mc[0] * (1 - alpha) * L[0]^(-alpha) * (K[-1] * z[0])^alpha + -W_f[0] + epsilon_a[0] * mc_f[0] * (1 - alpha) * L_f[0]^(-alpha) * (K_f[-1] * z_f[0])^alpha + -Y_f[0] + Y_s_f[0] + Y_s[0] - nu_p[0] * Y[0] + -Y_s_f[0] + Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + beta * epsilon_b[1] * (C_f[1] - h * C_f[0])^(-sigma_c) - epsilon_b[0] * R_f[0]^-1 * (C_f[0] - h * C_f[-1])^(-sigma_c) + beta * epsilon_b[1] * pi[1]^-1 * (C[1] - h * C[0])^(-sigma_c) - epsilon_b[0] * R[0]^-1 * (C[0] - h * C[-1])^(-sigma_c) + Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) - lambda_p^-1 * Y_f[0] * (1 + lambda_p) * (-mc_f[0] + P_j_f[0]) * P_j_f[0]^(-1 - lambda_p^-1 * (1 + lambda_p)) + epsilon_b[0] * W_disutil_f[0] * (C_f[0] - h * C_f[-1])^(-sigma_c) - omega * epsilon_b[0] * epsilon_L[0] * L_s_f[0]^sigma_l + -1 + xi_p * (pi[0]^-1 * pi[-1]^gamma_p)^(-lambda_p^-1) + (1 - xi_p) * pi_star[0]^(-lambda_p^-1) + -1 + (1 - xi_w) * (w_star[0] * W[0]^-1)^(-lambda_w^-1) + xi_w * (W[-1] * W[0]^-1)^(-lambda_w^-1) * (pi[0]^-1 * pi[-1]^gamma_w)^(-lambda_w^-1) + -Phi - Y_s[0] + epsilon_a[0] * L[0]^(1 - alpha) * (K[-1] * z[0])^alpha + -Phi - Y_f[0] * P_j_f[0]^(-lambda_p^-1 * (1 + lambda_p)) + epsilon_a[0] * L_f[0]^(1 - alpha) * (K_f[-1] * z_f[0])^alpha + std_eta_b * eta_b[x] - log(epsilon_b[0]) + rho_b * log(epsilon_b[-1]) + -std_eta_L * eta_L[x] - log(epsilon_L[0]) + rho_L * log(epsilon_L[-1]) + std_eta_I * eta_I[x] - log(epsilon_I[0]) + rho_I * log(epsilon_I[-1]) + std_eta_w * eta_w[x] - f_1[0] + f_2[0] + std_eta_a * eta_a[x] - log(epsilon_a[0]) + rho_a * log(epsilon_a[-1]) + std_eta_p * eta_p[x] - g_1[0] + g_2[0] * (1 + lambda_p) + std_eta_G * eta_G[x] - log(epsilon_G[0]) + rho_G * log(epsilon_G[-1]) + -f_1[0] + beta * xi_w * f_1[1] * (w_star[0]^-1 * w_star[1])^(lambda_w^-1) * (pi[1]^-1 * pi[0]^gamma_w)^(-lambda_w^-1) + epsilon_b[0] * w_star[0] * L[0] * (1 + lambda_w)^-1 * (C[0] - h * C[-1])^(-sigma_c) * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)) + -f_2[0] + beta * xi_w * f_2[1] * (w_star[0]^-1 * w_star[1])^(lambda_w^-1 * (1 + lambda_w) * (1 + sigma_l)) * (pi[1]^-1 * pi[0]^gamma_w)^(-lambda_w^-1 * (1 + lambda_w) * (1 + sigma_l)) + omega * epsilon_b[0] * epsilon_L[0] * (L[0] * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)))^(1 + sigma_l) + -g_1[0] + beta * xi_p * pi_star[0] * g_1[1] * pi_star[1]^-1 * (pi[1]^-1 * pi[0]^gamma_p)^(-lambda_p^-1) + epsilon_b[0] * pi_star[0] * Y[0] * (C[0] - h * C[-1])^(-sigma_c) + -g_2[0] + beta * xi_p * g_2[1] * (pi[1]^-1 * pi[0]^gamma_p)^(-lambda_p^-1 * (1 + lambda_p)) + epsilon_b[0] * mc[0] * Y[0] * (C[0] - h * C[-1])^(-sigma_c) + -nu_w[0] + (1 - xi_w) * (w_star[0] * W[0]^-1)^(-lambda_w^-1 * (1 + lambda_w)) + xi_w * nu_w[-1] * (W[-1] * pi[0]^-1 * W[0]^-1 * pi[-1]^gamma_w)^(-lambda_w^-1 * (1 + lambda_w)) + -nu_p[0] + (1 - xi_p) * pi_star[0]^(-lambda_p^-1 * (1 + lambda_p)) + xi_p * nu_p[-1] * (pi[0]^-1 * pi[-1]^gamma_p)^(-lambda_p^-1 * (1 + lambda_p)) + -K[0] + K[-1] * (1 - tau) + I[0] * (1 - 0.5 * varphi * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])^2) + -K_f[0] + K_f[-1] * (1 - tau) + I_f[0] * (1 - 0.5 * varphi * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])^2) + U[0] - beta * U[1] - epsilon_b[0] * ((1 - sigma_c)^-1 * (C[0] - h * C[-1])^(1 - sigma_c) - omega * epsilon_L[0] * (1 + sigma_l)^-1 * L_s[0]^(1 + sigma_l)) + U_f[0] - beta * U_f[1] - epsilon_b[0] * ((1 - sigma_c)^-1 * (C_f[0] - h * C_f[-1])^(1 - sigma_c) - omega * epsilon_L[0] * (1 + sigma_l)^-1 * L_s_f[0]^(1 + sigma_l)) + -epsilon_b[0] * (C[0] - h * C[-1])^(-sigma_c) + q[0] * (1 - 0.5 * varphi * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])^2 - varphi * I[-1]^-1 * epsilon_I[0] * I[0] * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])) + beta * varphi * I[0]^-2 * epsilon_I[1] * q[1] * I[1]^2 * (-1 + I[0]^-1 * epsilon_I[1] * I[1]) + -epsilon_b[0] * (C_f[0] - h * C_f[-1])^(-sigma_c) + q_f[0] * (1 - 0.5 * varphi * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])^2 - varphi * I_f[-1]^-1 * epsilon_I[0] * I_f[0] * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])) + beta * varphi * I_f[0]^-2 * epsilon_I[1] * q_f[1] * I_f[1]^2 * (-1 + I_f[0]^-1 * epsilon_I[1] * I_f[1]) + std_eta_pi * eta_pi[x] - log(pi_obj[0]) + rho_pi_bar * log(pi_obj[-1]) + log(calibr_pi_obj) * (1 - rho_pi_bar) + -C[0] - I[0] - T[0] + Y[0] - psi^-1 * r_k[ss] * K[-1] * (-1 + exp(psi * (-1 + z[0]))) + -calibr_pi + std_eta_R * eta_R[x] - log(R[ss]^-1 * R[0]) + r_Delta_pi * (-log(pi[ss]^-1 * pi[-1]) + log(pi[ss]^-1 * pi[0])) + r_Delta_y * (-log(Y[ss]^-1 * Y[-1]) + log(Y[ss]^-1 * Y[0]) + log(Y_f[ss]^-1 * Y_f[-1]) - log(Y_f[ss]^-1 * Y_f[0])) + rho * log(R[ss]^-1 * R[-1]) + (1 - rho) * (log(pi_obj[0]) + r_pi * (-log(pi_obj[0]) + log(pi[ss]^-1 * pi[-1])) + r_Y * (log(Y[ss]^-1 * Y[0]) - log(Y_f[ss]^-1 * Y_f[0]))) + -C_f[0] - I_f[0] + Pi_ws_f[0] - T_f[0] + Y_f[0] + L_s_f[0] * W_disutil_f[0] - L_f[0] * W_f[0] - psi^-1 * r_k_f[ss] * K_f[-1] * (-1 + exp(psi * (-1 + z_f[0]))) + epsilon_b[0] * (K[-1] * r_k[0] - r_k[ss] * K[-1] * exp(psi * (-1 + z[0]))) * (C[0] - h * C[-1])^(-sigma_c) + epsilon_b[0] * (K_f[-1] * r_k_f[0] - r_k_f[ss] * K_f[-1] * exp(psi * (-1 + z_f[0]))) * (C_f[0] - h * C_f[-1])^(-sigma_c) end From 96c0f590edfb851b39c1346e1926360bc0941816 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Thu, 4 May 2023 00:01:26 +0200 Subject: [PATCH 23/83] making SW03 pretty --- models/SW03.jl | 80 ++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/models/SW03.jl b/models/SW03.jl index 22b8c574f..1505c9b55 100644 --- a/models/SW03.jl +++ b/models/SW03.jl @@ -94,65 +94,69 @@ -epsilon_b[0] * (C[0] - h * C[-1])^(-sigma_c) + q[0] * (1 - 0.5 * varphi * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])^2 - varphi * I[-1]^-1 * epsilon_I[0] * I[0] * (-1 + I[-1]^-1 * epsilon_I[0] * I[0])) + beta * varphi * I[0]^-2 * epsilon_I[1] * q[1] * I[1]^2 * (-1 + I[0]^-1 * epsilon_I[1] * I[1]) -epsilon_b[0] * (C_f[0] - h * C_f[-1])^(-sigma_c) + q_f[0] * (1 - 0.5 * varphi * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])^2 - varphi * I_f[-1]^-1 * epsilon_I[0] * I_f[0] * (-1 + I_f[-1]^-1 * epsilon_I[0] * I_f[0])) + beta * varphi * I_f[0]^-2 * epsilon_I[1] * q_f[1] * I_f[1]^2 * (-1 + I_f[0]^-1 * epsilon_I[1] * I_f[1]) - - std_eta_pi * eta_pi[x] - log(pi_obj[0]) + rho_pi_bar * log(pi_obj[-1]) + log(calibr_pi_obj) * (1 - rho_pi_bar) - + -C[0] - I[0] - T[0] + Y[0] - psi^-1 * r_k[ss] * K[-1] * (-1 + exp(psi * (-1 + z[0]))) - - -calibr_pi + std_eta_R * eta_R[x] - log(R[ss]^-1 * R[0]) + r_Delta_pi * (-log(pi[ss]^-1 * pi[-1]) + log(pi[ss]^-1 * pi[0])) + r_Delta_y * (-log(Y[ss]^-1 * Y[-1]) + log(Y[ss]^-1 * Y[0]) + log(Y_f[ss]^-1 * Y_f[-1]) - log(Y_f[ss]^-1 * Y_f[0])) + rho * log(R[ss]^-1 * R[-1]) + (1 - rho) * (log(pi_obj[0]) + r_pi * (-log(pi_obj[0]) + log(pi[ss]^-1 * pi[-1])) + r_Y * (log(Y[ss]^-1 * Y[0]) - log(Y_f[ss]^-1 * Y_f[0]))) - + -C_f[0] - I_f[0] + Pi_ws_f[0] - T_f[0] + Y_f[0] + L_s_f[0] * W_disutil_f[0] - L_f[0] * W_f[0] - psi^-1 * r_k_f[ss] * K_f[-1] * (-1 + exp(psi * (-1 + z_f[0]))) epsilon_b[0] * (K[-1] * r_k[0] - r_k[ss] * K[-1] * exp(psi * (-1 + z[0]))) * (C[0] - h * C[-1])^(-sigma_c) epsilon_b[0] * (K_f[-1] * r_k_f[0] - r_k_f[ss] * K_f[-1] * exp(psi * (-1 + z_f[0]))) * (C_f[0] - h * C_f[-1])^(-sigma_c) + + + # Perceived inflation objective + std_eta_pi * eta_pi[x] - log(pi_obj[0]) + rho_pi_bar * log(pi_obj[-1]) + log(calibr_pi_obj) * (1 - rho_pi_bar) + + # Taylor rule + -calibr_pi + std_eta_R * eta_R[x] - log(R[ss]^-1 * R[0]) + r_Delta_pi * (-log(pi[ss]^-1 * pi[-1]) + log(pi[ss]^-1 * pi[0])) + r_Delta_y * (-log(Y[ss]^-1 * Y[-1]) + log(Y[ss]^-1 * Y[0]) + log(Y_f[ss]^-1 * Y_f[-1]) - log(Y_f[ss]^-1 * Y_f[0])) + rho * log(R[ss]^-1 * R[-1]) + (1 - rho) * (log(pi_obj[0]) + r_pi * (-log(pi_obj[0]) + log(pi[ss]^-1 * pi[-1])) + r_Y * (log(Y[ss]^-1 * Y[0]) - log(Y_f[ss]^-1 * Y_f[0]))) + end @parameters SW03 begin - lambda_p = .368 - G_bar = .362 + lambda_p = 0.368 + G_bar = 0.362 lambda_w = 0.5 - Phi = .819 + Phi = 0.819 - alpha = 0.3 - beta = 0.99 + alpha = 0.3 + beta = 0.99 gamma_w = 0.763 gamma_p = 0.469 - h = 0.573 - omega = 1 - psi = 0.169 + h = 0.573 + omega = 1 + psi = 0.169 - r_pi = 1.684 - r_Y = 0.099 - r_Delta_pi = 0.14 - r_Delta_y = 0.159 + r_pi = 1.684 + r_Y = 0.099 + r_Delta_pi = 0.14 + r_Delta_y = 0.159 sigma_c = 1.353 sigma_l = 2.4 - tau = 0.025 - varphi = 6.771 - xi_w = 0.737 - xi_p = 0.908 + tau = 0.025 + varphi = 6.771 + xi_w = 0.737 + xi_p = 0.908 - rho = 0.961 - rho_b = 0.855 - rho_L = 0.889 - rho_I = 0.927 - rho_a = 0.823 - rho_G = 0.949 - rho_pi_bar = 0.924 + rho = 0.961 + rho_b = 0.855 + rho_L = 0.889 + rho_I = 0.927 + rho_a = 0.823 + rho_G = 0.949 + rho_pi_bar = 0.924 std_scaling_factor = 10 - std_eta_b = 0.336 / std_scaling_factor - std_eta_L = 3.52 / std_scaling_factor - std_eta_I = 0.085 / std_scaling_factor - std_eta_a = 0.598 / std_scaling_factor - std_eta_w = 0.6853261 / std_scaling_factor - std_eta_p = 0.7896512 / std_scaling_factor - std_eta_G = 0.325 / std_scaling_factor - std_eta_R = 0.081 / std_scaling_factor - std_eta_pi = 0.017 / std_scaling_factor + std_eta_b = 0.336 / std_scaling_factor + std_eta_L = 3.52 / std_scaling_factor + std_eta_I = 0.085 / std_scaling_factor + std_eta_a = 0.598 / std_scaling_factor + std_eta_w = 0.6853261 / std_scaling_factor + std_eta_p = 0.7896512 / std_scaling_factor + std_eta_G = 0.325 / std_scaling_factor + std_eta_R = 0.081 / std_scaling_factor + std_eta_pi = 0.017 / std_scaling_factor calibr_pi_obj | 1 = pi_obj[ss] calibr_pi | pi[ss] = pi_obj[ss] From 9f6fe49ba417d4399745c5c1127e6f726aa4ebdc Mon Sep 17 00:00:00 2001 From: thorek1 Date: Thu, 4 May 2023 18:28:17 +0200 Subject: [PATCH 24/83] fix kalman filter bounds check --- src/MacroModelling.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 54a281f68..53f66d601 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -3002,7 +3002,7 @@ function calculate_kalman_filter_loglikelihood(𝓂::β„³, data::AbstractArray{Fl if isnothing(parameters) parameters = 𝓂.parameter_values else - ub = @ignore_derivatives fill(1e12+rand(),length(𝓂.parameters)) + ub = @ignore_derivatives fill(1e12+rand(),length(𝓂.parameters) + length(𝓂.βž•_vars)) lb = @ignore_derivatives -ub for (i,v) in enumerate(𝓂.bounded_vars) From eceee77c3e4706117b9cb7d5e6f85ebb7ce31765 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Thu, 4 May 2023 18:29:10 +0200 Subject: [PATCH 25/83] bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9d61ad21a..f70f772ea 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MacroModelling" uuid = "687ffad2-3618-405e-ac50-e0f7b9c75e44" authors = ["Thore Kockerols "] -version = "0.1.21" +version = "0.1.22" [deps] AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" From 0603594feca1aca6f5e356af50fce10464553d1d Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Fri, 5 May 2023 18:53:16 +0200 Subject: [PATCH 26/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 8727b00e3..5693d7fe2 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] initial state accept keyed array - [ ] bring solution error into an object of the model so we dont have to pass it on as output - [ ] check that there is an error if he cant find SS - [ ] plot_model_estimates with unconditional forecast at the end From 11bf6cb0ef13aaa7f7fc82ca08a163299a732c56 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Sat, 6 May 2023 00:37:05 +0200 Subject: [PATCH 27/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 5693d7fe2..051bbf9f3 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] more informative errors when something gors wrong when writing a model - [ ] initial state accept keyed array - [ ] bring solution error into an object of the model so we dont have to pass it on as output - [ ] check that there is an error if he cant find SS From 5fa04cca36f0bdab5806199e1aa133067e0779a6 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Sat, 6 May 2023 14:47:28 +0200 Subject: [PATCH 28/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 051bbf9f3..c557f1862 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] get solution hugher order with multidimensional array (states, 1 and 2 prtial derivatives variables names as dimensions in 2order case) - [ ] more informative errors when something gors wrong when writing a model - [ ] initial state accept keyed array - [ ] bring solution error into an object of the model so we dont have to pass it on as output From 4fa93c630d62c9cc9bd6099e425814aee4532de8 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sun, 7 May 2023 00:56:18 +0200 Subject: [PATCH 29/83] comment parameter deifnitions in dynare --- src/dynare.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dynare.jl b/src/dynare.jl index 748e2ad23..105934921 100644 --- a/src/dynare.jl +++ b/src/dynare.jl @@ -142,7 +142,7 @@ function write_mod_file(m::β„³) [print(io,string(p) * " ") for p in m.parameters_in_equations] - println(io,";\n\n# Parameter definitions:") + println(io,";\n\n% Parameter definitions:") for (i,p) in enumerate(m.parameters) println(io, "\t" * string(p) * "\t=\t" * string(m.parameter_values[i]) * ";") end From a4a1cee4f407eba002f419224c5eb8d2321d7cda Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sun, 7 May 2023 00:57:20 +0200 Subject: [PATCH 30/83] update todos --- docs/src/unfinished_docs/todo.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 8727b00e3..c416ccf8e 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,8 @@ ## High priority - [ ] add balanced growth path handling +- [ ] have get_std take variables as an input +- [ ] get_solution for higher order - [ ] bring solution error into an object of the model so we dont have to pass it on as output - [ ] check that there is an error if he cant find SS - [ ] plot_model_estimates with unconditional forecast at the end From 2df9ca18f45ae367d945fa314b85079c725a5bfc Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Mon, 8 May 2023 14:13:26 +0200 Subject: [PATCH 31/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 0263c6fc6..854585c0b 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies - [ ] have get_std take variables as an input - [ ] get_solution for higher order - [ ] get solution higher order with multidimensional array (states, 1 and 2 partial derivatives variables names as dimensions in 2order case) From d0561e2ae077f9953463877892ed58ef7a7c5131 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Mon, 8 May 2023 14:15:45 +0200 Subject: [PATCH 32/83] Update ci.yml --- .github/workflows/ci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 79ee55403..4c3c3d427 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ jobs: fail-fast: false matrix: version: - - '1' + - '1.8' - '^1.9.0-0' #^1-0 and ^1.9-0 are not recognised # - 'nightly' # fails for zygote os: @@ -26,6 +26,10 @@ jobs: include: - os: ubuntu-latest prefix: xvfb-run + - version: 'nightly' + os: ubuntu-latest + arch: x64 + allow_failure: true steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@latest From f831c5abea8ffbed05e4845a22b62cd0a93bdfcc Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 19:49:20 +0200 Subject: [PATCH 33/83] update todos --- docs/src/unfinished_docs/todo.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 854585c0b..c71761f69 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,12 @@ ## High priority - [ ] add balanced growth path handling +- [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input +- [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then +- [ ] have parser accept rss | (r[ss] - 1) * 400 = rss +- [ ] when doing calibration with optiimiser have better return values when he doesnt find a solution (probably NaN) +- [ ] add pruning +- [ ] sampler returned negative std. investigate and come up with solution ensuring sampler can continue - [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies - [ ] have get_std take variables as an input - [ ] get_solution for higher order From 31d761b079adf8ccae09b8ac2eb201ce6573a2a9 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 20:00:13 +0200 Subject: [PATCH 34/83] let SSS calc start from NSSS --- src/MacroModelling.jl | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 53f66d601..617dd0ece 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1211,11 +1211,11 @@ end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³; +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, initial_state::Vector{Float64}, 𝓂::β„³; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ - state = zeros(𝓂.timings.nVars) + state = copy(initial_state) shock = zeros(𝓂.timings.nExo) aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] @@ -1250,16 +1250,17 @@ function second_order_stochastic_steady_state_iterative_solution_condition(𝐒 end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, initial_state::Vector{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚ = β„±.value.(𝐒₁𝐒₂) + init_state = β„±.value.(initial_state) # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂)' # get f(vs) - val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂) + val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, init_state, 𝓂) if converged # get J(f, vs) * ps (cheating). Write your custom rule here @@ -1302,7 +1303,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂) + state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), SS_and_pars, 𝓂) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1321,11 +1322,11 @@ end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³; +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, initial_state::Vector{Float64}, 𝓂::β„³; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ - state = zeros(𝓂.timings.nVars) + state = copy(initial_state) shock = zeros(𝓂.timings.nExo) aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] @@ -1359,16 +1360,17 @@ function third_order_stochastic_steady_state_iterative_solution_condition(𝐒 end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, initial_state::Vector{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚S₃ = β„±.value.(𝐒₁𝐒₂𝐒₃) + init_state = β„±.value.(initial_state) # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂𝐒₃)' # get f(vs) - val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂) + val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, init_state, 𝓂) if converged # get J(f, vs) * ps (cheating). Write your custom rule here @@ -1415,7 +1417,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂) + state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), SS_and_pars, 𝓂) all_SS = expand_steady_state(SS_and_pars,𝓂) From f5dc80af2ffd351abc59916b16471d78d69e0a96 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 20:58:13 +0200 Subject: [PATCH 35/83] Revert "let SSS calc start from NSSS" This reverts commit 31d761b079adf8ccae09b8ac2eb201ce6573a2a9. --- src/MacroModelling.jl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 617dd0ece..53f66d601 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1211,11 +1211,11 @@ end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, initial_state::Vector{Float64}, 𝓂::β„³; +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ - state = copy(initial_state) + state = zeros(𝓂.timings.nVars) shock = zeros(𝓂.timings.nExo) aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] @@ -1250,17 +1250,16 @@ function second_order_stochastic_steady_state_iterative_solution_condition(𝐒 end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, initial_state::Vector{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚ = β„±.value.(𝐒₁𝐒₂) - init_state = β„±.value.(initial_state) # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂)' # get f(vs) - val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, init_state, 𝓂) + val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂) if converged # get J(f, vs) * ps (cheating). Write your custom rule here @@ -1303,7 +1302,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), SS_and_pars, 𝓂) + state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1322,11 +1321,11 @@ end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, initial_state::Vector{Float64}, 𝓂::β„³; +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ - state = copy(initial_state) + state = zeros(𝓂.timings.nVars) shock = zeros(𝓂.timings.nExo) aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] @@ -1360,17 +1359,16 @@ function third_order_stochastic_steady_state_iterative_solution_condition(𝐒 end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, initial_state::Vector{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚S₃ = β„±.value.(𝐒₁𝐒₂𝐒₃) - init_state = β„±.value.(initial_state) # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂𝐒₃)' # get f(vs) - val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, init_state, 𝓂) + val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂) if converged # get J(f, vs) * ps (cheating). Write your custom rule here @@ -1417,7 +1415,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), SS_and_pars, 𝓂) + state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂) all_SS = expand_steady_state(SS_and_pars,𝓂) From f415f0e15df1b40ed1aed172dbe349779b577ff7 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Mon, 8 May 2023 22:16:42 +0200 Subject: [PATCH 36/83] Update ci.yml --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c3c3d427..bf7edbda2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: matrix: version: - '1.8' - - '^1.9.0-0' #^1-0 and ^1.9-0 are not recognised + - '1.9' #^1-0 and ^1.9-0 are not recognised # - 'nightly' # fails for zygote os: - ubuntu-latest @@ -26,10 +26,10 @@ jobs: include: - os: ubuntu-latest prefix: xvfb-run - - version: 'nightly' - os: ubuntu-latest - arch: x64 - allow_failure: true +# - version: 'nightly' +# os: ubuntu-latest +# arch: x64 +# allow_failure: true steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@latest From 7b9d7b852a52a6e9b6529d139a14343ddd30d008 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 22:42:03 +0200 Subject: [PATCH 37/83] add pruning to get_SSS --- src/MacroModelling.jl | 334 +++++++++++++++++++++++++++--------------- src/get_functions.jl | 28 ++++ src/macros.jl | 6 +- src/structures.jl | 2 + 4 files changed, 248 insertions(+), 122 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 53f66d601..0477667fd 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1211,7 +1211,7 @@ end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³; +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{Float64}, 𝓂::β„³, pruning::Bool; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ @@ -1222,22 +1222,35 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: 1 shock] - sol = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] + if pruning + pruned_aug_state = copy(aug_state) + + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + end, + tol = tol, maps_limit = 10000) + else + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - end, - tol = tol, maps_limit = 10000) + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + end, + tol = tol, maps_limit = 10000) + end return sol.minimizer, sol.converged - end -function second_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂, SSS, 𝓂::β„³) +function second_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂, SSS, 𝓂::β„³, pruning::Bool) (; 𝐒₁, 𝐒₂) = 𝐒₁𝐒₂ shock = zeros(𝓂.timings.nExo) @@ -1245,12 +1258,20 @@ function second_order_stochastic_steady_state_iterative_solution_condition(𝐒 aug_state = [SSS[𝓂.timings.past_not_future_and_mixed_idx] 1 shock] - - 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - SSS + + if pruning + pruned_aug_state = [zeros(𝓂.timings.nPast_not_future_and_mixed) + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 - SSS + else + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - SSS + end end -function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³, pruning::Bool) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚ = β„±.value.(𝐒₁𝐒₂) @@ -1259,12 +1280,12 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂)' # get f(vs) - val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂) + val, converged = second_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚, 𝓂, pruning) if converged # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂), S₁Sβ‚‚) - A = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚, x, 𝓂), val) + B = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂, pruning), S₁Sβ‚‚) + A = β„±.jacobian(x -> second_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚, x, 𝓂, pruning), val) AΜ‚ = RF.lu(A, check = false) @@ -1289,7 +1310,7 @@ function second_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂: end -function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false) where M +function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where M SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) @@ -1302,7 +1323,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂) + state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), 𝓂, pruning) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1321,7 +1342,7 @@ end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³; +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{Float64}, 𝓂::β„³, pruning::Bool; tol::AbstractFloat = 1e-10) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ @@ -1332,21 +1353,36 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ 1 shock] - sol = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - end, - tol = tol, maps_limit = 10000) + if pruning + pruned_aug_state = copy(aug_state) + + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6 + end, + tol = tol, maps_limit = 10000) + else + sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 + end, + tol = tol, maps_limit = 10000) + end + return sol.minimizer, sol.converged end -function third_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂𝐒₃, SSS, 𝓂::β„³) +function third_order_stochastic_steady_state_iterative_solution_condition(𝐒₁𝐒₂𝐒₃, SSS, 𝓂::β„³, pruning::Bool) (; 𝐒₁, 𝐒₂, 𝐒₃) = 𝐒₁𝐒₂𝐒₃ shock = zeros(𝓂.timings.nExo) @@ -1355,11 +1391,19 @@ function third_order_stochastic_steady_state_iterative_solution_condition(𝐒 1 shock] - 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - SSS + if pruning + pruned_aug_state = [zeros(𝓂.timings.nPast_not_future_and_mixed) + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6 - SSS + else + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(aug_state,aug_state),aug_state) / 6 - SSS + end end -function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³) where {Z,S,N} +function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂𝐒₃::AbstractArray{β„±.Dual{Z,S,N}}, 𝓂::β„³, pruning::Bool) where {Z,S,N} # unpack: AoS -> SoA S₁Sβ‚‚S₃ = β„±.value.(𝐒₁𝐒₂𝐒₃) @@ -1368,12 +1412,12 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ ps = mapreduce(β„±.partials, hcat, 𝐒₁𝐒₂𝐒₃)' # get f(vs) - val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂) + val, converged = third_order_stochastic_steady_state_iterative_solution(S₁Sβ‚‚S₃, 𝓂, pruning) if converged # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂), S₁Sβ‚‚S₃) - A = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚S₃, x, 𝓂), val) + B = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(x, val, 𝓂, pruning), S₁Sβ‚‚S₃) + A = β„±.jacobian(x -> third_order_stochastic_steady_state_iterative_solution_condition(S₁Sβ‚‚S₃, x, 𝓂, pruning), val) AΜ‚ = RF.lu(A, check = false) @@ -1398,7 +1442,7 @@ function third_order_stochastic_steady_state_iterative_solution(𝐒₁𝐒₂ end -function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false) where M +function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where M SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) @@ -1415,7 +1459,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, 𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂) + state, converged = third_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂, 𝐒₃), 𝓂, pruning) all_SS = expand_steady_state(SS_and_pars,𝓂) @@ -1442,7 +1486,7 @@ function solve!(𝓂::β„³; verbose::Bool = false, silent::Bool = false) - @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :third_order] + @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order] if dynamics 𝓂.solution.outdated_algorithms = union(intersect(𝓂.solution.algorithms,[algorithm]),𝓂.solution.outdated_algorithms) @@ -1451,18 +1495,18 @@ function solve!(𝓂::β„³; write_parameters_input!(𝓂, parameters, verbose = verbose) - if 𝓂.model_hessian == Function[] && algorithm == :second_order + if 𝓂.model_hessian == Function[] && algorithm ∈ [:second_order, :pruned_second_order] start_time = time() write_functions_mapping!(𝓂, 2) if !silent println("Take symbolic derivatives up to second order:\t",round(time() - start_time, digits = 3), " seconds") end - elseif 𝓂.model_third_order_derivatives == Function[] && algorithm == :third_order + elseif 𝓂.model_third_order_derivatives == Function[] && algorithm ∈ [:third_order, :pruned_third_order] start_time = time() write_functions_mapping!(𝓂, 3) if !silent println("Take symbolic derivatives up to third order:\t",round(time() - start_time, digits = 3), " seconds") end end if dynamics - if (any([:riccati, :first_order] .∈ ([algorithm],)) && any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (:third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms) + if (any([:riccati, :first_order] .∈ ([algorithm],)) && any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || (any([:second_order,:pruned_second_order] .∈ ([algorithm],)) && any([:second_order,:pruned_second_order] .∈ (𝓂.solution.outdated_algorithms,))) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) # @assert solution_error < eps() "Could not find non stochastic steady steady." @@ -1483,7 +1527,7 @@ function solve!(𝓂::β„³; end - if (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (:third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms) + if (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1500,6 +1544,28 @@ function solve!(𝓂::β„³; 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:second_order]) end + if (:pruned_second_order == algorithm && :pruned_second_order ∈ 𝓂.solution.outdated_algorithms) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) + + @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." + + state_updateβ‚‚ = function(state::Vector{Float64}, shock::Vector{Float64}, pruned_state::Vector{Float64}) + aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + pruned_aug_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2, 𝐒₁ * pruned_aug_state + end + + 𝓂.solution.perturbation.pruned_second_order = higher_order_perturbation_solution(𝐒₂,stochastic_steady_state,state_updateβ‚‚) + + 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:pruned_second_order]) + end + if :third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @@ -1517,6 +1583,28 @@ function solve!(𝓂::β„³; 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:third_order]) end + if :pruned_third_order == algorithm && :pruned_third_order ∈ 𝓂.solution.outdated_algorithms + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) + + @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." + + state_update₃ = function(state::Vector{Float64}, shock::Vector{Float64}, pruned_state::Vector{Float64}) + aug_state = [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + pruned_aug_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + return 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(pruned_aug_state, pruned_aug_state) / 2 + 𝐒₃ * β„’.kron(β„’.kron(pruned_aug_state,pruned_aug_state),pruned_aug_state) / 6, 𝐒₁ * pruned_aug_state + end + + 𝓂.solution.perturbation.pruned_third_order = higher_order_perturbation_solution(𝐒₃,stochastic_steady_state,state_update₃) + + 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:pruned_third_order]) + end + if any([:quadratic_iteration, :binder_pesaran] .∈ ([algorithm],)) && any([:quadratic_iteration, :binder_pesaran] .∈ (𝓂.solution.outdated_algorithms,)) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) @@ -1883,7 +1971,7 @@ function write_parameters_input!(𝓂::β„³, parameters::Dict{Symbol,Float64}; ve if !all(𝓂.parameter_values[ntrsct_idx] .== collect(values(parameters))) if verbose println("Parameter changes: ") end - 𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + 𝓂.solution.outdated_algorithms = 𝓂.solution.algorithms end for i in 1:length(parameters) @@ -1977,11 +2065,11 @@ end -function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1989,11 +2077,11 @@ function SSS_third_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N end -function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_third_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -2001,11 +2089,11 @@ function SSS_third_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, para end -function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S,N}}, parameters_idx, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -2013,11 +2101,11 @@ function SSS_second_order_parameter_derivatives(parameters::Vector{β„±.Dual{Z,S, end -function SSS_second_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false) where {Z,S,N} +function SSS_second_order_parameter_derivatives(parameters::β„±.Dual{Z,S,N}, parameters_idx::Int, 𝓂::β„³; verbose::Bool = false, pruning::Bool = false) where {Z,S,N} params = copy(𝓂.parameter_values) params = convert(Vector{β„±.Dual{Z,S,N}},params) params[parameters_idx] = parameters - SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose) + SSS = calculate_second_order_stochastic_steady_state(params, 𝓂, verbose = verbose, pruning = pruning) @assert SSS[2] "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -2919,8 +3007,14 @@ function parse_algorithm_to_state_update(algorithm::Symbol, 𝓂::β„³) elseif :second_order == algorithm state_update = 𝓂.solution.perturbation.second_order.state_update + elseif :pruned_second_order == algorithm + state_update = 𝓂.solution.perturbation.pruned_second_order.state_update + elseif :third_order == algorithm state_update = 𝓂.solution.perturbation.third_order.state_update + + elseif :pruned_third_order == algorithm + state_update = 𝓂.solution.perturbation.pruned_third_order.state_update end return state_update @@ -3191,74 +3285,74 @@ end -@setup_workload begin - # Putting some things in `setup` can reduce the size of the - # precompile file and potentially make loading faster. - @model FS2000 begin - dA[0] = exp(gam + z_e_a * e_a[x]) - log(m[0]) = (1 - rho) * log(mst) + rho * log(m[-1]) + z_e_m * e_m[x] - - P[0] / (c[1] * P[1] * m[0]) + bet * P[1] * (alp * exp( - alp * (gam + log(e[1]))) * k[0] ^ (alp - 1) * n[1] ^ (1 - alp) + (1 - del) * exp( - (gam + log(e[1])))) / (c[2] * P[2] * m[1])=0 - W[0] = l[0] / n[0] - - (psi / (1 - psi)) * (c[0] * P[0] / (1 - n[0])) + l[0] / n[0] = 0 - R[0] = P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ ( - alp) / W[0] - 1 / (c[0] * P[0]) - bet * P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) / (m[0] * l[0] * c[1] * P[1]) = 0 - c[0] + k[0] = exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) + (1 - del) * exp( - (gam + z_e_a * e_a[x])) * k[-1] - P[0] * c[0] = m[0] - m[0] - 1 + d[0] = l[0] - e[0] = exp(z_e_a * e_a[x]) - y[0] = k[-1] ^ alp * n[0] ^ (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) - gy_obs[0] = dA[0] * y[0] / y[-1] - gp_obs[0] = (P[0] / P[-1]) * m[-1] / dA[0] - log_gy_obs[0] = log(gy_obs[0]) - log_gp_obs[0] = log(gp_obs[0]) - end - - @parameters FS2000 silent = true begin - alp = 0.356 - bet = 0.993 - gam = 0.0085 - mst = 1.0002 - rho = 0.129 - psi = 0.65 - del = 0.01 - z_e_a = 0.035449 - z_e_m = 0.008862 - end - - ENV["GKSwstype"] = "nul" - - @compile_workload begin - # all calls in this block will be precompiled, regardless of whether - # they belong to your package or not (on Julia 1.8 and higher) - @model RBC begin - 1 / c[0] = (0.95 / c[1]) * (Ξ± * exp(z[1]) * k[0]^(Ξ± - 1) + (1 - Ξ΄)) - c[0] + k[0] = (1 - Ξ΄) * k[-1] + exp(z[0]) * k[-1]^Ξ± - z[0] = 0.2 * z[-1] + 0.01 * eps_z[x] - end - - @parameters RBC silent = true precompile = true begin - Ξ΄ = 0.02 - Ξ± = 0.5 - end - - get_SS(FS2000) - get_SS(FS2000, parameters = :alp => 0.36) - get_solution(FS2000) - get_solution(FS2000, parameters = :alp => 0.35) - get_standard_deviation(FS2000) - get_correlation(FS2000) - get_autocorrelation(FS2000) - get_variance_decomposition(FS2000) - get_conditional_variance_decomposition(FS2000) - get_irf(FS2000) - # get_SSS(FS2000, silent = true) - # get_SSS(FS2000, algorithm = :third_order, silent = true) - - # import Plots, StatsPlots - # plot_irf(FS2000) - # plot_solution(FS2000,:k) # fix warning when there is no sensitivity and all values are the same. triggers: no strict ticks found... - # plot_conditional_variance_decomposition(FS2000) - end -end +# @setup_workload begin +# # Putting some things in `setup` can reduce the size of the +# # precompile file and potentially make loading faster. +# @model FS2000 begin +# dA[0] = exp(gam + z_e_a * e_a[x]) +# log(m[0]) = (1 - rho) * log(mst) + rho * log(m[-1]) + z_e_m * e_m[x] +# - P[0] / (c[1] * P[1] * m[0]) + bet * P[1] * (alp * exp( - alp * (gam + log(e[1]))) * k[0] ^ (alp - 1) * n[1] ^ (1 - alp) + (1 - del) * exp( - (gam + log(e[1])))) / (c[2] * P[2] * m[1])=0 +# W[0] = l[0] / n[0] +# - (psi / (1 - psi)) * (c[0] * P[0] / (1 - n[0])) + l[0] / n[0] = 0 +# R[0] = P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ ( - alp) / W[0] +# 1 / (c[0] * P[0]) - bet * P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) / (m[0] * l[0] * c[1] * P[1]) = 0 +# c[0] + k[0] = exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) + (1 - del) * exp( - (gam + z_e_a * e_a[x])) * k[-1] +# P[0] * c[0] = m[0] +# m[0] - 1 + d[0] = l[0] +# e[0] = exp(z_e_a * e_a[x]) +# y[0] = k[-1] ^ alp * n[0] ^ (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) +# gy_obs[0] = dA[0] * y[0] / y[-1] +# gp_obs[0] = (P[0] / P[-1]) * m[-1] / dA[0] +# log_gy_obs[0] = log(gy_obs[0]) +# log_gp_obs[0] = log(gp_obs[0]) +# end + +# @parameters FS2000 silent = true begin +# alp = 0.356 +# bet = 0.993 +# gam = 0.0085 +# mst = 1.0002 +# rho = 0.129 +# psi = 0.65 +# del = 0.01 +# z_e_a = 0.035449 +# z_e_m = 0.008862 +# end + +# ENV["GKSwstype"] = "nul" + +# @compile_workload begin +# # all calls in this block will be precompiled, regardless of whether +# # they belong to your package or not (on Julia 1.8 and higher) +# @model RBC begin +# 1 / c[0] = (0.95 / c[1]) * (Ξ± * exp(z[1]) * k[0]^(Ξ± - 1) + (1 - Ξ΄)) +# c[0] + k[0] = (1 - Ξ΄) * k[-1] + exp(z[0]) * k[-1]^Ξ± +# z[0] = 0.2 * z[-1] + 0.01 * eps_z[x] +# end + +# @parameters RBC silent = true precompile = true begin +# Ξ΄ = 0.02 +# Ξ± = 0.5 +# end + +# get_SS(FS2000) +# get_SS(FS2000, parameters = :alp => 0.36) +# get_solution(FS2000) +# get_solution(FS2000, parameters = :alp => 0.35) +# get_standard_deviation(FS2000) +# get_correlation(FS2000) +# get_autocorrelation(FS2000) +# get_variance_decomposition(FS2000) +# get_conditional_variance_decomposition(FS2000) +# get_irf(FS2000) +# # get_SSS(FS2000, silent = true) +# # get_SSS(FS2000, algorithm = :third_order, silent = true) + +# # import Plots, StatsPlots +# # plot_irf(FS2000) +# # plot_solution(FS2000,:k) # fix warning when there is no sensitivity and all values are the same. triggers: no strict ticks found... +# # plot_conditional_variance_decomposition(FS2000) +# end +# end end diff --git a/src/get_functions.jl b/src/get_functions.jl index 9c8570a36..0bf92e165 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -933,6 +933,12 @@ function get_steady_state(𝓂::β„³; if algorithm == :third_order solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) + SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm, silent = silent) + SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state else solve!(𝓂, verbose = verbose, dynamics = true, algorithm = :second_order, silent = silent) SS[1:length(𝓂.var)] = 𝓂.solution.perturbation.second_order.stochastic_steady_state#[indexin(sort(union(𝓂.var,𝓂.exo_present)),sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))] @@ -954,19 +960,41 @@ function get_steady_state(𝓂::β„³; if derivatives if stochastic if algorithm == :third_order + dSSS = β„±.jacobian(x->begin SSS = SSS_third_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose) [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] end, 𝓂.parameter_values[param_idx]) return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + + elseif algorithm == :pruned_third_order + + dSSS = β„±.jacobian(x->begin + SSS = SSS_third_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose, pruning = true) + [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] + end, 𝓂.parameter_values[param_idx]) + + return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + + elseif algorithm == :pruned_second_order + + dSSS = β„±.jacobian(x->begin + SSS = SSS_second_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose, pruning = true) + [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] + end, 𝓂.parameter_values[param_idx]) + + return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + else + dSSS = β„±.jacobian(x->begin SSS = SSS_second_order_parameter_derivatives(x, param_idx, 𝓂, verbose = verbose) [collect(SSS[1])[var_idx]...,collect(SSS[3])[calib_idx]...] end, 𝓂.parameter_values[param_idx]) return KeyedArray(hcat(SS[[var_idx...,calib_idx...]], dSSS); Variables_and_calibrated_parameters = [vars_in_ss_equations...,𝓂.calibration_equations_parameters...], Steady_state_and_βˆ‚steady_stateβˆ‚parameter = vcat(:Steady_state, 𝓂.parameters[param_idx])) + end else # dSS = β„±.jacobian(x->𝓂.SS_solve_func(x, 𝓂),𝓂.parameter_values) diff --git a/src/macros.jl b/src/macros.jl index 699038e7c..e882873d4 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -787,11 +787,13 @@ macro model(𝓂,ex) perturbation_solution(SparseMatrixCSC{Float64, Int64}(β„’.I,0,0), x->x), perturbation_solution(SparseMatrixCSC{Float64, Int64}(β„’.I,0,0), x->x), higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), + higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), + higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x), higher_order_perturbation_solution(Matrix{Float64}(undef,0,0), [],x->x) ), Float64[], Set([:first_order]), - Set([:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :third_order]), + Set([:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order]), true, false ) @@ -1304,7 +1306,7 @@ macro parameters(𝓂,ex...) # time_dynamic_derivs = @elapsed write_functions_mapping!(mod.$𝓂, $perturbation_order) - mod.$𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + mod.$𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :pruned_second_order, :third_order, :pruned_third_order]) if !$silent if $perturbation_order == 1 diff --git a/src/structures.jl b/src/structures.jl index cf9a208e6..4a2ad72ba 100644 --- a/src/structures.jl +++ b/src/structures.jl @@ -165,7 +165,9 @@ mutable struct perturbation linear_time_iteration::perturbation_solution quadratic_iteration::perturbation_solution second_order::higher_order_perturbation_solution + pruned_second_order::higher_order_perturbation_solution third_order::higher_order_perturbation_solution + pruned_third_order::higher_order_perturbation_solution end From 98daefb365c2067d780f91e1f83759310fb0127a Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 22:48:11 +0200 Subject: [PATCH 38/83] add pruning to tests --- test/functionality_tests.jl | 2 +- test/runtests.jl | 20 ++++++++++++-------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index eacb3f3bf..7eb445f1c 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -272,7 +272,7 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = estim_decomp2 = get_shock_decomposition(m, data, data_in_levels = false, smooth = false, verbose = true, parameters = old_par_vals) end - if algorithm ∈ [:second_order, :third_order] + if algorithm ∈ [:second_order, :pruned_second_order, :third_order, :pruned_third_order] SSS = get_stochastic_steady_state(m, algorithm = algorithm) end diff --git a/test/runtests.jl b/test/runtests.jl index e85d5922c..94f67dc65 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -24,8 +24,9 @@ GC.gc() @testset verbose = true "FS2000" begin include("models/FS2000.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -83,8 +84,9 @@ GC.gc() @testset verbose = true "RBC_CME with calibration equations and parameter definitions" begin include("models/RBC_CME_calibration_equations_and_parameter_definitions.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order) - functionality_test(m, algorithm = :third_order) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -92,8 +94,9 @@ GC.gc() @testset verbose = true "RBC_CME with calibration equations" begin include("models/RBC_CME_calibration_equations.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() @@ -101,8 +104,9 @@ GC.gc() @testset verbose = true "RBC_CME" begin include("models/RBC_CME.jl") functionality_test(m, plots = false) - functionality_test(m, algorithm = :second_order, plots = false) - functionality_test(m, algorithm = :third_order, plots = false) + for algorithm ∈ [:second_order,:pruned_second_order,:third_order,:pruned_third_order] + functionality_test(m, algorithm = algorithm, plots = false) + end end m = nothing GC.gc() From 973d5fe2aad28542ada5cd69ec8f38c1262adb91 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 8 May 2023 23:45:57 +0200 Subject: [PATCH 39/83] plot pruned solution works --- src/plotting.jl | 110 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 92 insertions(+), 18 deletions(-) diff --git a/src/plotting.jl b/src/plotting.jl index d622d9f69..ec063cd2d 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -806,31 +806,32 @@ function plot_solution(𝓂::β„³, @assert state ∈ 𝓂.timings.past_not_future_and_mixed "Invalid state. Choose one from:"*repr(𝓂.timings.past_not_future_and_mixed) - @assert length(setdiff(algorithm isa Symbol ? [algorithm] : algorithm, [:third_order, :second_order, :first_order])) == 0 "Invalid algorithm. Choose any combination of: :third_order, :second_order, :first_order" + @assert length(setdiff(algorithm isa Symbol ? [algorithm] : algorithm, [:third_order, :pruned_third_order, :second_order, :pruned_second_order, :first_order])) == 0 "Invalid algorithm. Choose any combination of: :third_order, :second_order, :first_order" if algorithm isa Symbol - max_algorithm = algorithm - min_algorithm = algorithm + solve!(𝓂, verbose = verbose, algorithm = algorithm, dynamics = true, parameters = parameters) algorithm = [algorithm] else - if :third_order ∈ algorithm - max_algorithm = :third_order - elseif :second_order ∈ algorithm - max_algorithm = :second_order + if :third_order ∈ algorithm && :pruned_third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :third_order, dynamics = true, parameters = parameters) + solve!(𝓂, verbose = verbose, algorithm = :pruned_third_order, dynamics = true, parameters = parameters) + elseif :third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :third_order, dynamics = true, parameters = parameters) + elseif :pruned_third_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :pruned_third_order, dynamics = true, parameters = parameters) + elseif :second_order ∈ algorithm && :pruned_second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :second_order, dynamics = true, parameters = parameters) + solve!(𝓂, verbose = verbose, algorithm = :pruned_second_order, dynamics = true, parameters = parameters) + elseif :second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :second_order, dynamics = true, parameters = parameters) + elseif :pruned_second_order ∈ algorithm + solve!(𝓂, verbose = verbose, algorithm = :pruned_second_order, dynamics = true, parameters = parameters) else - max_algorithm = :first_order + solve!(𝓂, verbose = verbose, algorithm = :first_order, dynamics = true, parameters = parameters) end - if :first_order ∈ algorithm - min_algorithm = :first_order - elseif :second_order ∈ algorithm - min_algorithm = :second_order - else - min_algorithm = :third_order - end end - solve!(𝓂, verbose = verbose, algorithm = max_algorithm, dynamics = true, parameters = parameters) SS_and_std = get_moments(𝓂, derivatives = false, @@ -870,12 +871,24 @@ function plot_solution(𝓂::β„³, legend = :inside, label = "2nd order perturbation") end + if :pruned_second_order ∈ algorithm + StatsPlots.plot!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Pruned 2nd order perturbation") + end if :third_order ∈ algorithm StatsPlots.plot!(fill(0,1,1), framestyle = :none, legend = :inside, label = "3rd order perturbation") end + if :pruned_third_order ∈ algorithm + StatsPlots.plot!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Pruned 3rd order perturbation") + end if :first_order ∈ algorithm StatsPlots.scatter!(fill(0,1,1), @@ -891,6 +904,14 @@ function plot_solution(𝓂::β„³, legend = :inside, label = "Stochastic Steady State (2nd order)") end + if :pruned_second_order ∈ algorithm + SSS2p = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state + + StatsPlots.scatter!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Stochastic Steady State (Pruned 2nd order)") + end if :third_order ∈ algorithm SSS3 = 𝓂.solution.perturbation.third_order.stochastic_steady_state @@ -899,6 +920,15 @@ function plot_solution(𝓂::β„³, legend = :inside, label = "Stochastic Steady State (3rd order)") end + if :pruned_third_order ∈ algorithm + SSS3p = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state + + StatsPlots.scatter!(fill(0,1,1), + framestyle = :none, + legend = :inside, + label = "Stochastic Steady State (Pruned 3rd order)") + end + StatsPlots.scatter!(fill(0,1,1), label = "", marker = :rect, @@ -912,7 +942,9 @@ function plot_solution(𝓂::β„³, variable_first_list = [] variable_second_list = [] + variable_pruned_second_list = [] variable_third_list = [] + variable_pruned_third_list = [] has_impact_list = [] for k in vars_to_plot @@ -922,7 +954,9 @@ function plot_solution(𝓂::β„³, variable_first = [] variable_second = [] + variable_pruned_second = [] variable_third = [] + variable_pruned_third = [] if :first_order ∈ algorithm variable_first = [𝓂.solution.perturbation.first_order.state_update(state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] @@ -940,6 +974,14 @@ function plot_solution(𝓂::β„³, has_impact = has_impact || sum(abs2,variable_second .- sum(variable_second)/length(variable_second))/(length(variable_second)-1) > eps() end + if :pruned_second_order ∈ algorithm + variable_pruned_second = [𝓂.solution.perturbation.pruned_second_order.state_update(SSS2p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), SSS2p - full_SS .+ state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] + + variable_pruned_second = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_second] + + has_impact = has_impact || sum(abs2,variable_pruned_second .- sum(variable_pruned_second)/length(variable_pruned_second))/(length(variable_pruned_second)-1) > eps() + end + if :third_order ∈ algorithm variable_third = [𝓂.solution.perturbation.third_order.state_update(SSS3 - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo))[indexin([k],𝓂.timings.var)][1] for x in state_range] @@ -948,9 +990,19 @@ function plot_solution(𝓂::β„³, has_impact = has_impact || sum(abs2,variable_third .- sum(variable_third)/length(variable_third))/(length(variable_third)-1) > eps() end + if :pruned_third_order ∈ algorithm + variable_pruned_third = [𝓂.solution.perturbation.pruned_third_order.state_update(SSS3p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), SSS3p - full_SS .+ state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] + + variable_pruned_third = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_third] + + has_impact = has_impact || sum(abs2,variable_pruned_third .- sum(variable_pruned_third)/length(variable_pruned_third))/(length(variable_pruned_third)-1) > eps() + end + push!(variable_first_list, variable_first) push!(variable_second_list, variable_second) + push!(variable_pruned_second_list, variable_pruned_second) push!(variable_third_list, variable_third) + push!(variable_pruned_third_list, variable_pruned_third) push!(has_impact_list, has_impact) if !has_impact @@ -980,6 +1032,13 @@ function plot_solution(𝓂::β„³, xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end + if :pruned_second_order ∈ algorithm + StatsPlots.plot!(state_range .+ SSS2p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], + variable_pruned_second_list[i], + ylabel = string(k)*"β‚β‚€β‚Ž", + xlabel = string(state)*"β‚β‚‹β‚β‚Ž", + label = "") + end if :third_order ∈ algorithm StatsPlots.plot!(state_range .+ SSS3[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], variable_third_list[i], @@ -987,6 +1046,13 @@ function plot_solution(𝓂::β„³, xlabel = string(state)*"β‚β‚‹β‚β‚Ž", label = "") end + if :pruned_third_order ∈ algorithm + StatsPlots.plot!(state_range .+ SSS3p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1], + variable_pruned_third_list[i], + ylabel = string(k)*"β‚β‚€β‚Ž", + xlabel = string(state)*"β‚β‚‹β‚β‚Ž", + label = "") + end if :first_order ∈ algorithm StatsPlots.scatter!([SS_and_std[1](state)], [SS_and_std[1](kk)], @@ -996,10 +1062,18 @@ function plot_solution(𝓂::β„³, StatsPlots.scatter!([SSS2[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS2[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], label = "") end + if :pruned_second_order ∈ algorithm + StatsPlots.scatter!([SSS2p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS2p[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], + label = "") + end if :third_order ∈ algorithm StatsPlots.scatter!([SSS3[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS3[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], label = "") end + if :pruned_third_order ∈ algorithm + StatsPlots.scatter!([SSS3p[indexin([state],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], [SSS3p[indexin([k],sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)))][1]], + label = "") + end Pl end) @@ -1013,7 +1087,7 @@ function plot_solution(𝓂::β„³, p = StatsPlots.plot(ppp, legend_plot, - layout = StatsPlots.grid(2, 1, heights=[0.8, 0.2]), + layout = StatsPlots.grid(2, 1, heights = length(algorithm) > 3 ? [0.65, 0.35] : [0.8, 0.2]), plot_title = "Model: "*𝓂.model_name*" ("*string(pane)*"/"*string(Int(ceil(n_subplots/plots_per_page)))*")" ) @@ -1037,7 +1111,7 @@ function plot_solution(𝓂::β„³, p = StatsPlots.plot(ppp, legend_plot, - layout = StatsPlots.grid(2, 1, heights=[0.8, 0.2]), + layout = StatsPlots.grid(2, 1, heights = length(algorithm) > 3 ? [0.65, 0.35] : [0.8, 0.2]), plot_title = "Model: "*𝓂.model_name*" ("*string(pane)*"/"*string(Int(ceil(n_subplots/plots_per_page)))*")" ) From 0c98fbe7fa62f51306042208573052d20fc0ac01 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 12:36:46 +0200 Subject: [PATCH 40/83] add pruned (g)irf --- src/MacroModelling.jl | 113 ++++++++++++++++++++++++++++++++---------- src/get_functions.jl | 70 ++++++++++++++------------ src/macros.jl | 6 ++- src/plotting.jl | 101 ++++++++++++++++++------------------- 4 files changed, 180 insertions(+), 110 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 0477667fd..acef90ca6 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1486,13 +1486,13 @@ function solve!(𝓂::β„³; verbose::Bool = false, silent::Bool = false) - @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order] + @assert algorithm ∈ all_available_algorithms if dynamics 𝓂.solution.outdated_algorithms = union(intersect(𝓂.solution.algorithms,[algorithm]),𝓂.solution.outdated_algorithms) 𝓂.solution.algorithms = union(𝓂.solution.algorithms,[algorithm]) end - + write_parameters_input!(𝓂, parameters, verbose = verbose) if 𝓂.model_hessian == Function[] && algorithm ∈ [:second_order, :pruned_second_order] @@ -1506,7 +1506,13 @@ function solve!(𝓂::β„³; end if dynamics - if (any([:riccati, :first_order] .∈ ([algorithm],)) && any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || (any([:second_order,:pruned_second_order] .∈ ([algorithm],)) && any([:second_order,:pruned_second_order] .∈ (𝓂.solution.outdated_algorithms,))) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + if (any([:riccati, :first_order] .∈ ([algorithm],)) && + any([:riccati, :first_order] .∈ (𝓂.solution.outdated_algorithms,))) || + (any([:second_order,:pruned_second_order] .∈ ([algorithm],)) && + any([:second_order,:pruned_second_order] .∈ (𝓂.solution.outdated_algorithms,))) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) # @assert solution_error < eps() "Could not find non stochastic steady steady." @@ -1527,7 +1533,11 @@ function solve!(𝓂::β„³; end - if (:second_order == algorithm && :second_order ∈ 𝓂.solution.outdated_algorithms) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + if (:second_order == algorithm && + :second_order ∈ 𝓂.solution.outdated_algorithms) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1544,7 +1554,11 @@ function solve!(𝓂::β„³; 𝓂.solution.outdated_algorithms = setdiff(𝓂.solution.outdated_algorithms,[:second_order]) end - if (:pruned_second_order == algorithm && :pruned_second_order ∈ 𝓂.solution.outdated_algorithms) || (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + if (:pruned_second_order == algorithm && + :pruned_second_order ∈ 𝓂.solution.outdated_algorithms) || + (any([:third_order,:pruned_third_order] .∈ ([algorithm],)) && + any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, 𝐒₁, 𝐒₂ = calculate_second_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1567,6 +1581,7 @@ function solve!(𝓂::β„³; end if :third_order == algorithm && :third_order ∈ 𝓂.solution.outdated_algorithms + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1584,6 +1599,7 @@ function solve!(𝓂::β„³; end if :pruned_third_order == algorithm && :pruned_third_order ∈ 𝓂.solution.outdated_algorithms + stochastic_steady_state, converged, SS_and_pars, solution_error, βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂, 𝐒₃ = calculate_third_order_stochastic_steady_state(𝓂.parameter_values, 𝓂, verbose = verbose, pruning = true) @assert converged "Solution does not have a stochastic steady state. Try reducing shock sizes by multiplying them with a number < 1." @@ -1606,6 +1622,7 @@ function solve!(𝓂::β„³; end if any([:quadratic_iteration, :binder_pesaran] .∈ ([algorithm],)) && any([:quadratic_iteration, :binder_pesaran] .∈ (𝓂.solution.outdated_algorithms,)) + SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) @@ -1639,6 +1656,7 @@ function solve!(𝓂::β„³; end end + return nothing end @@ -1971,7 +1989,7 @@ function write_parameters_input!(𝓂::β„³, parameters::Dict{Symbol,Float64}; ve if !all(𝓂.parameter_values[ntrsct_idx] .== collect(values(parameters))) if verbose println("Parameter changes: ") end - 𝓂.solution.outdated_algorithms = 𝓂.solution.algorithms + 𝓂.solution.outdated_algorithms = Set(all_available_algorithms) end for i in 1:length(parameters) @@ -2031,7 +2049,7 @@ function write_parameters_input!(𝓂::β„³, parameters::Vector{Float64}; verbose println("Parameters unchanged.") else if !all(parameters .== 𝓂.parameter_values[1:length(parameters)]) - 𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :third_order]) + 𝓂.solution.outdated_algorithms = Set(all_available_algorithms) match_idx = [] for (i, v) in enumerate(parameters) @@ -2773,7 +2791,11 @@ end -function irf(state_update::Function, initial_state::Vector{Float64}, level::Vector{Float64}, T::timings; +function irf(state_update::Function, + initial_state::Vector{Float64}, + level::Vector{Float64}, + pruning::Bool, + T::timings; periods::Int = 40, shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, variables::Symbol_input = :all, @@ -2811,10 +2833,19 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_history = randn(T.nExo,periods) Y = zeros(T.nVars,periods,1) - Y[:,1,1] = state_update(initial_state,shock_history[:,1]) - for t in 1:periods-1 - Y[:,t+1,1] = state_update(Y[:,t,1],shock_history[:,t+1]) + if pruning + Y[:,1,1], pruned_state = state_update(initial_state, shock_history[:,1], initial_state) + + for t in 1:periods-1 + Y[:,t+1,1], pruned_state = state_update(Y[:,t,1], shock_history[:,t+1], pruned_state) + end + else + Y[:,1,1] = state_update(initial_state,shock_history[:,1]) + + for t in 1:periods-1 + Y[:,t+1,1] = state_update(Y[:,t,1],shock_history[:,t+1]) + end end return KeyedArray(Y[var_idx,:,:] .+ level[var_idx]; Variables = T.var[var_idx], Periods = 1:periods, Shocks = [:simulate]) @@ -2823,10 +2854,18 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shck = T.nExo == 0 ? Vector{Float64}(undef, 0) : zeros(T.nExo) - Y[:,1,1] = state_update(initial_state,shck) + if pruning + Y[:,1,1], pruned_state = state_update(initial_state, shck, initial_state) - for t in 1:periods-1 - Y[:,t+1,1] = state_update(Y[:,t,1],shck) + for t in 1:periods-1 + Y[:,t+1,1], pruned_state = state_update(Y[:,t,1], shck, pruned_state) + end + else + Y[:,1,1] = state_update(initial_state,shck) + + for t in 1:periods-1 + Y[:,t+1,1] = state_update(Y[:,t,1],shck) + end end return KeyedArray(Y[var_idx,:,:] .+ level[var_idx]; Variables = T.var[var_idx], Periods = 1:periods, Shocks = [:none]) @@ -2839,10 +2878,18 @@ function irf(state_update::Function, initial_state::Vector{Float64}, level::Vect shock_history[ii,1] = negative_shock ? -1 : 1 end - Y[:,1,i] = state_update(initial_state,shock_history[:,1]) + if pruning + Y[:,1,i], pruned_state = state_update(initial_state, shock_history[:,1], initial_state) - for t in 1:periods-1 - Y[:,t+1,i] = state_update(Y[:,t,i],shock_history[:,t+1]) + for t in 1:periods-1 + Y[:,t+1,i], pruned_state = state_update(Y[:,t,i], shock_history[:,t+1],pruned_state) + end + else + Y[:,1,i] = state_update(initial_state,shock_history[:,1]) + + for t in 1:periods-1 + Y[:,t+1,i] = state_update(Y[:,t,i],shock_history[:,t+1]) + end end end @@ -2855,6 +2902,7 @@ end function girf(state_update::Function, initial_state::Vector{Float64}, level::Vector{Float64}, + pruning::Bool, T::timings; periods::Int = 40, shocks::Union{Symbol_input,Matrix{Float64},KeyedArray{Float64}} = :all, @@ -2909,14 +2957,24 @@ function girf(state_update::Function, shock_history[ii,1] = negative_shock ? -1 : 1 end - Y1[:,1] = state_update(initial_state, baseline_noise) - Y2[:,1] = state_update(initial_state, baseline_noise) + if pruning + Y1[:,1], pruned_state1 = state_update(initial_state, baseline_noise, initial_state) + Y2[:,1], pruned_state2 = state_update(initial_state, baseline_noise, initial_state) + else + Y1[:,1] = state_update(initial_state, baseline_noise) + Y2[:,1] = state_update(initial_state, baseline_noise) + end for t in 1:periods baseline_noise = randn(T.nExo) - Y1[:,t+1] = state_update(Y1[:,t],baseline_noise) - Y2[:,t+1] = state_update(Y2[:,t],baseline_noise + shock_history[:,t]) + if pruning + Y1[:,t+1], pruned_state1 = state_update(Y1[:,t], baseline_noise, pruned_state1) + Y2[:,t+1], pruned_state2 = state_update(Y2[:,t], baseline_noise + shock_history[:,t], pruned_state2) + else + Y1[:,t+1] = state_update(Y1[:,t],baseline_noise) + Y2[:,t+1] = state_update(Y2[:,t],baseline_noise + shock_history[:,t]) + end end Y[:,:,i] += Y2 - Y1 @@ -3000,24 +3058,25 @@ end function parse_algorithm_to_state_update(algorithm::Symbol, 𝓂::β„³) if :linear_time_iteration == algorithm state_update = 𝓂.solution.perturbation.linear_time_iteration.state_update - + pruning = false elseif algorithm ∈ [:riccati, :first_order] state_update = 𝓂.solution.perturbation.first_order.state_update - + pruning = false elseif :second_order == algorithm state_update = 𝓂.solution.perturbation.second_order.state_update - + pruning = false elseif :pruned_second_order == algorithm state_update = 𝓂.solution.perturbation.pruned_second_order.state_update - + pruning = true elseif :third_order == algorithm state_update = 𝓂.solution.perturbation.third_order.state_update - + pruning = false elseif :pruned_third_order == algorithm state_update = 𝓂.solution.perturbation.pruned_third_order.state_update + pruning = true end - return state_update + return state_update, pruning end diff --git a/src/get_functions.jl b/src/get_functions.jl index 0bf92e165..68eaf8396 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -71,9 +71,9 @@ function get_shock_decomposition(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -148,9 +148,9 @@ function get_estimated_shocks(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -231,9 +231,9 @@ function get_estimated_variables(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -311,9 +311,9 @@ function get_estimated_variable_standard_deviations(𝓂::β„³, smooth::Bool = true, verbose::Bool = false) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -487,11 +487,11 @@ function get_conditional_forecast(𝓂::β„³, shocks = Matrix{Union{Nothing,Float64}}(undef,length(𝓂.exo),periods) end - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) - state_update = parse_algorithm_to_state_update(:first_order, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(:first_order, 𝓂) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -761,31 +761,37 @@ function get_irf(𝓂::β„³; levels::Bool = false, verbose::Bool = false) - write_parameters_input!(𝓂,parameters, verbose = verbose) - - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) shocks = 𝓂.timings.nExo == 0 ? :none : shocks @assert !(shocks == :none && generalised_irf) "Cannot compute generalised IRFs for model without shocks." - state_update = parse_algorithm_to_state_update(algorithm, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(algorithm, 𝓂) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) if algorithm == :second_order SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + SSS_delta = reference_steady_state[1:length(𝓂.var)] - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state else SSS_delta = zeros(length(𝓂.var)) end if levels if algorithm == :second_order - reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state#[indexin(full_SS,sort(union(𝓂.var,𝓂.exo_present)))] + reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + reference_steady_state = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order - reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state#[indexin(full_SS,sort(union(𝓂.var,𝓂.exo_present)))] + reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + reference_steady_state = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state end end @@ -795,6 +801,7 @@ function get_irf(𝓂::β„³; girfs = girf(state_update, SSS_delta, levels ? reference_steady_state : SSS_delta, + pruning, 𝓂.timings; periods = periods, shocks = shocks, @@ -805,6 +812,7 @@ function get_irf(𝓂::β„³; irfs = irf(state_update, initial_state, levels ? reference_steady_state : SSS_delta, + pruning, 𝓂.timings; periods = periods, shocks = shocks, @@ -905,9 +913,9 @@ function get_steady_state(𝓂::β„³; verbose::Bool = false, silent::Bool = true) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) vars_in_ss_equations = sort(collect(setdiff(reduce(union,get_symbols.(𝓂.ss_aux_equations)),union(𝓂.parameters_in_equations,𝓂.βž•_vars)))) @@ -1122,11 +1130,11 @@ function get_solution(𝓂::β„³; algorithm::Symbol = :first_order, verbose::Bool = false) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran] "This function only works for linear solutions. Choose a respective algorithm." - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) if algorithm == :linear_time_iteration solution_matrix = 𝓂.solution.perturbation.linear_time_iteration.solution_matrix @@ -1303,9 +1311,9 @@ function get_conditional_variance_decomposition(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) SS_and_pars, _ = 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) @@ -1423,9 +1431,9 @@ function get_variance_decomposition(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) SS_and_pars, solution_error = 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) @@ -1496,9 +1504,9 @@ function get_correlation(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) covar_dcmp, ___, __, _ = calculate_covariance(𝓂.parameter_values, 𝓂, verbose = verbose) @@ -1569,9 +1577,9 @@ function get_autocorrelation(𝓂::β„³; parameters = nothing, verbose::Bool = false) - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) covar_dcmp, sol, __, _ = calculate_covariance(𝓂.parameter_values, 𝓂, verbose = verbose) @@ -1671,9 +1679,9 @@ function get_moments(𝓂::β„³; parameter_derivatives::Symbol_input = :all, verbose::Bool = false)#limit output by selecting pars and vars like for plots and irfs!? - solve!(𝓂, verbose = verbose) + solve!(𝓂, parameters = parameters, verbose = verbose) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) if parameter_derivatives == :all length_par = length(𝓂.parameters) diff --git a/src/macros.jl b/src/macros.jl index e882873d4..53e73902c 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -1,6 +1,8 @@ import MacroTools: postwalk, unblock +const all_available_algorithms = [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order] + """ $(SIGNATURES) @@ -793,7 +795,7 @@ macro model(𝓂,ex) ), Float64[], Set([:first_order]), - Set([:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran, :second_order, :pruned_second_order, :third_order, :pruned_third_order]), + Set(all_available_algorithms), true, false ) @@ -1306,7 +1308,7 @@ macro parameters(𝓂,ex...) # time_dynamic_derivs = @elapsed write_functions_mapping!(mod.$𝓂, $perturbation_order) - mod.$𝓂.solution.outdated_algorithms = Set([:linear_time_iteration, :riccati, :quadratic_iteration, :binder_pesaran, :first_order, :second_order, :pruned_second_order, :third_order, :pruned_third_order]) + mod.$𝓂.solution.outdated_algorithms = Set(all_available_algorithms) if !$silent if $perturbation_order == 1 diff --git a/src/plotting.jl b/src/plotting.jl index ec063cd2d..718842113 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -102,9 +102,9 @@ function plot_model_estimates(𝓂::β„³, tickfontsize = 8, framestyle = :box) - write_parameters_input!(𝓂, parameters, verbose = verbose) + # write_parameters_input!(𝓂, parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true) reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (copy(𝓂.solution.non_stochastic_steady_state), eps()) @@ -366,33 +366,34 @@ function plot_irf(𝓂::β„³; tickfontsize = 8, framestyle = :box) - write_parameters_input!(𝓂,parameters, verbose = verbose) + # write_parameters_input!(𝓂,parameters, verbose = verbose) - solve!(𝓂, verbose = verbose, dynamics = true, algorithm = algorithm) + solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) - state_update = parse_algorithm_to_state_update(algorithm, 𝓂) + state_update, pruning = parse_algorithm_to_state_update(algorithm, 𝓂) - NSSS, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) - - full_SS = sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)) - full_SS[indexin(𝓂.aux,full_SS)] = map(x -> Symbol(replace(string(x), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")), 𝓂.aux) - - NSSS_labels = [sort(union(𝓂.exo_present,𝓂.var))...,𝓂.calibration_equations_parameters...] - - reference_steady_state = [s ∈ 𝓂.exo_present ? 0 : NSSS[indexin([s],NSSS_labels)...] for s in full_SS] + reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) if algorithm == :second_order - SSS_delta = reference_steady_state - 𝓂.solution.perturbation.second_order.stochastic_steady_state + SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order - SSS_delta = reference_steady_state - 𝓂.solution.perturbation.third_order.stochastic_steady_state + SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state else - SSS_delta = zeros(length(reference_steady_state)) + SSS_delta = zeros(𝓂.timings.nVars) end if algorithm == :second_order reference_steady_state = 𝓂.solution.perturbation.second_order.stochastic_steady_state + elseif algorithm == :pruned_second_order + reference_steady_state = 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order reference_steady_state = 𝓂.solution.perturbation.third_order.stochastic_steady_state + elseif algorithm == :pruned_third_order + reference_steady_state = 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state end initial_state = initial_state == [0.0] ? zeros(𝓂.timings.nVars) - SSS_delta : initial_state[indexin(full_SS, sort(union(𝓂.var,𝓂.exo_present)))] - reference_steady_state @@ -412,9 +413,9 @@ function plot_irf(𝓂::β„³; var_idx = parse_variables_input_to_index(variables, 𝓂.timings) if generalised_irf - Y = girf(state_update, SSS_delta, zeros(𝓂.timings.nVars), 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock)#, warmup_periods::Int = 100, draws::Int = 50, iterations_to_steady_state::Int = 500) + Y = girf(state_update, SSS_delta, zeros(𝓂.timings.nVars), pruning, 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock)#, warmup_periods::Int = 100, draws::Int = 50, iterations_to_steady_state::Int = 500) else - Y = irf(state_update, initial_state, zeros(𝓂.timings.nVars), 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock) .+ SSS_delta[var_idx] + Y = irf(state_update, initial_state, zeros(𝓂.timings.nVars), pruning, 𝓂.timings; periods = periods, shocks = shocks, variables = variables, negative_shock = negative_shock) .+ SSS_delta[var_idx] end if shocks isa KeyedArray{Float64} || shocks isa Matrix{Float64} @@ -457,13 +458,17 @@ function plot_irf(𝓂::β„³; title = string(𝓂.timings.var[var_idx[i]]), ylabel = "Level", label = "") + if can_dual_axis StatsPlots.plot!(StatsPlots.twinx(), 100*((Y[i,:,shock] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS], color = :black, label = "") + + StatsPlots.hline!(can_dual_axis ? [SS 0] : [SS], + color = :black, + label = "") end) if !(plot_count % plots_per_page == 0) @@ -1326,39 +1331,35 @@ function plot_conditional_forecast(𝓂::β„³, for i in 1:length(var_idx) SS = reference_steady_state[i] if !(all(isapprox.(Y[i,:],0,atol = eps(Float32)))) || length(findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) > 0 + can_dual_axis = gr_back && all((Y[i,:] .+ SS) .> eps(Float32)) && (SS > eps(Float32)) - if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") - StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") - if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end - StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") - end) - end - else - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - if length(cond_idx) > 0 - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) - end) - else - push!(pp,begin - StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) - StatsPlots.hline!([SS], color = :black, label = "") - end) - end + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + + push!(pp,begin + StatsPlots.plot(Y[i,:] .+ SS, + title = string(full_SS[var_idx[i]]), + ylabel = "Level", + label = "") - end + if can_dual_axis + StatsPlots.plot!(StatsPlots.twinx(), + 100*((Y[i,:] .+ SS) ./ SS .- 1), + ylabel = LaTeXStrings.L"\% \Delta", + label = "") + end + + StatsPlots.hline!(can_dual_axis ? [SS 0] : [SS], + color = :black, + label = "") + + if length(cond_idx) > 0 + StatsPlots.scatter!(cond_idx, + conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, + label = "", + marker = :star8, + markercolor = :black) + end + end) if !(plot_count % plots_per_page == 0) plot_count += 1 From c41786ff65dd27010b39087b5092ac4dc6007eb1 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 13:08:42 +0200 Subject: [PATCH 41/83] fix girf --- src/MacroModelling.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index acef90ca6..723d9a7da 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2940,11 +2940,17 @@ function girf(state_update::Function, var_idx = parse_variables_input_to_index(variables, T) Y = zeros(T.nVars, periods + 1, length(shock_idx)) + + pruned_initial_state = copy(initial_state) for (i,ii) in enumerate(shock_idx) for draw in 1:draws for i in 1:warmup_periods - initial_state = state_update(initial_state, randn(T.nExo)) + if pruning + initial_state, pruned_initial_state = state_update(initial_state, randn(T.nExo), pruned_initial_state) + else + initial_state = state_update(initial_state, randn(T.nExo)) + end end Y1 = zeros(T.nVars, periods + 1) @@ -2958,8 +2964,8 @@ function girf(state_update::Function, end if pruning - Y1[:,1], pruned_state1 = state_update(initial_state, baseline_noise, initial_state) - Y2[:,1], pruned_state2 = state_update(initial_state, baseline_noise, initial_state) + Y1[:,1], pruned_state1 = state_update(initial_state, baseline_noise, pruned_initial_state) + Y2[:,1], pruned_state2 = state_update(initial_state, baseline_noise, pruned_initial_state) else Y1[:,1] = state_update(initial_state, baseline_noise) Y2[:,1] = state_update(initial_state, baseline_noise) From a92e85694d58632737dd006d60736f8e29adde66 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 14:28:23 +0200 Subject: [PATCH 42/83] rm test pruned irf update consistency --- test/functionality_tests.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 7eb445f1c..9fa264fd7 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -303,9 +303,10 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :simulate) new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1]))) new_sub_lvl_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1])), levels = true) - @test isapprox(collect(new_sub_lvl_irfs(:,1,:)), collect(lvl_irfs(:,6,m.exo[1])),rtol = eps(Float32)) - + if algorithm ∈ setdiff(all_available_algorithms,[:pruned_second_order,:pruned_third_order]) + @test isapprox(collect(new_sub_lvl_irfs(:,1,:)), collect(lvl_irfs(:,6,m.exo[1])),rtol = eps(Float32)) + end new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, variables = m.timings.var[1]) new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, variables = m.timings.var[end-1:end]) From a731fddd9baf7fe5e95e98ff6c1755c2099ce860 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 14:31:25 +0200 Subject: [PATCH 43/83] fix test --- test/functionality_tests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 9fa264fd7..978d90dc7 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -304,7 +304,7 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = new_sub_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1]))) new_sub_lvl_irfs = get_irf(m, verbose = true, algorithm = algorithm, shocks = :none, initial_state = collect(lvl_irfs(:,5,m.exo[1])), levels = true) - if algorithm ∈ setdiff(all_available_algorithms,[:pruned_second_order,:pruned_third_order]) + if algorithm ∈ setdiff(MacroModelling.all_available_algorithms, [:pruned_second_order,:pruned_third_order]) @test isapprox(collect(new_sub_lvl_irfs(:,1,:)), collect(lvl_irfs(:,6,m.exo[1])),rtol = eps(Float32)) end From f8bee8257680edf96697fe8409aadc5cef42c4c0 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 14:44:23 +0200 Subject: [PATCH 44/83] have simulate return levels by default --- docs/src/tutorials/rbc.md | 2 +- docs/src/tutorials/sw03.md | 2 +- src/get_functions.jl | 18 +++++++++--------- src/plotting.jl | 2 +- test/functionality_tests.jl | 8 ++++---- test/test_standalone_function.jl | 2 +- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/src/tutorials/rbc.md b/docs/src/tutorials/rbc.md index d6a7bab37..66110c584 100644 --- a/docs/src/tutorials/rbc.md +++ b/docs/src/tutorials/rbc.md @@ -175,7 +175,7 @@ For simulations this is possible by calling [`simulate`](@ref): simulate(RBC) ``` -which returns the simulated data in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. +which returns the simulated data in levels in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. ## Conditional forecasts diff --git a/docs/src/tutorials/sw03.md b/docs/src/tutorials/sw03.md index 51533a3f6..a173abf9f 100644 --- a/docs/src/tutorials/sw03.md +++ b/docs/src/tutorials/sw03.md @@ -308,7 +308,7 @@ For simulations this is possible by calling [`simulate`](@ref): simulate(SW03) ``` -which returns the simulated data in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. +which returns the simulated data in levels in a 3-dimensional `KeyedArray` of the same structure as for the IRFs. ## Conditional forecasts diff --git a/src/get_functions.jl b/src/get_functions.jl index 68eaf8396..b5addf1f0 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -32,7 +32,7 @@ end; simulation = simulate(RBC); -get_shock_decomposition(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_shock_decomposition(RBC,simulation([:c],:,:simulate)) # output 3-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables ∈ 4-element Vector{Symbol} @@ -131,7 +131,7 @@ end; simulation = simulate(RBC); -get_estimated_shocks(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_shocks(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Shocks ∈ 1-element Vector{Symbol} @@ -209,7 +209,7 @@ end; simulation = simulate(RBC); -get_estimated_variables(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_variables(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Variables ∈ 4-element Vector{Symbol} @@ -290,7 +290,7 @@ end; simulation = simulate(RBC); -get_estimated_variable_standard_deviations(RBC,simulation([:c],:,:simulate), data_in_levels = false) +get_estimated_variable_standard_deviations(RBC,simulation([:c],:,:simulate)) # output 2-dimensional KeyedArray(NamedDimsArray(...)) with keys: ↓ Standard_deviations ∈ 4-element Vector{Symbol} @@ -699,7 +699,7 @@ end """ $(SIGNATURES) -Return impulse response functions (IRFs) of the model in a 3-dimensional KeyedArray +Return impulse response functions (IRFs) of the model in a 3-dimensional KeyedArray. Values are returned in absolute deviations from the (non) stochastic steady state by default. # Arguments - $MODEL @@ -835,14 +835,14 @@ See [`get_irf`](@ref) get_IRF = get_irf """ -Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. +Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. Function returns values in levels by default. """ -simulate(args...; kwargs...) = get_irf(args...; kwargs..., shocks = :simulate)#[:,:,1] +simulate(args...; kwargs...) = get_irf(args...; levels = true, kwargs..., shocks = :simulate)#[:,:,1] """ -Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. +Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. Function returns values in levels by default. """ -get_simulation(args...; kwargs...) = get_irf(args...; kwargs..., shocks = :simulate)#[:,:,1] +get_simulation(args...; kwargs...) = get_irf(args...; levels = true, kwargs..., shocks = :simulate)#[:,:,1] """ Wrapper for [`get_irf`](@ref) with `shocks = :simulate`. diff --git a/src/plotting.jl b/src/plotting.jl index 718842113..06a632862 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -73,7 +73,7 @@ end simulation = simulate(RBC_CME) -plot_model_estimates(RBC_CME, simulation([:k],:,:simulate), data_in_levels = false) +plot_model_estimates(RBC_CME, simulation([:k],:,:simulate)) ``` """ function plot_model_estimates(𝓂::β„³, diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 978d90dc7..a0f6ea756 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -221,8 +221,8 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) - data = simulation(m.var[var_idxs],:,:simulate) - data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] + data_in_levels = simulation(m.var[var_idxs],:,:simulate) + # data_in_levels = data# .+ m.solution.non_stochastic_steady_state[var_idxs] estim_vars1 = get_estimated_variables(m, data, data_in_levels = false, verbose = true) estim_vars2 = get_estimated_variables(m, data_in_levels, verbose = true) @@ -383,8 +383,8 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) - data = simulation(m.var[var_idxs],:,:simulate) - data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] + data_in_levels = simulation(m.var[var_idxs],:,:simulate) + # data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] plot_model_estimates(m, data, data_in_levels = false) plot_model_estimates(m, data, data_in_levels = false, verbose = true) diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index 1a2936228..7915cdf23 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -388,7 +388,7 @@ RBC_CME = nothing - data = simulate(RBC_CME, levels = true)[:,:,1] + data = simulate(RBC_CME)[:,:,1] observables = [:c,:k] @test isapprox(420.25039827148197,calculate_kalman_filter_loglikelihood(RBC_CME,data(observables),observables),rtol = 1e-5) From 3263d07bd6d1bcd6bd75acc271d192614d65fbe1 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 14:49:23 +0200 Subject: [PATCH 45/83] put precompilation back in --- src/MacroModelling.jl | 138 +++++++++++++++++++++--------------------- 1 file changed, 69 insertions(+), 69 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 723d9a7da..a1ad93ee5 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -3350,74 +3350,74 @@ end -# @setup_workload begin -# # Putting some things in `setup` can reduce the size of the -# # precompile file and potentially make loading faster. -# @model FS2000 begin -# dA[0] = exp(gam + z_e_a * e_a[x]) -# log(m[0]) = (1 - rho) * log(mst) + rho * log(m[-1]) + z_e_m * e_m[x] -# - P[0] / (c[1] * P[1] * m[0]) + bet * P[1] * (alp * exp( - alp * (gam + log(e[1]))) * k[0] ^ (alp - 1) * n[1] ^ (1 - alp) + (1 - del) * exp( - (gam + log(e[1])))) / (c[2] * P[2] * m[1])=0 -# W[0] = l[0] / n[0] -# - (psi / (1 - psi)) * (c[0] * P[0] / (1 - n[0])) + l[0] / n[0] = 0 -# R[0] = P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ ( - alp) / W[0] -# 1 / (c[0] * P[0]) - bet * P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) / (m[0] * l[0] * c[1] * P[1]) = 0 -# c[0] + k[0] = exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) + (1 - del) * exp( - (gam + z_e_a * e_a[x])) * k[-1] -# P[0] * c[0] = m[0] -# m[0] - 1 + d[0] = l[0] -# e[0] = exp(z_e_a * e_a[x]) -# y[0] = k[-1] ^ alp * n[0] ^ (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) -# gy_obs[0] = dA[0] * y[0] / y[-1] -# gp_obs[0] = (P[0] / P[-1]) * m[-1] / dA[0] -# log_gy_obs[0] = log(gy_obs[0]) -# log_gp_obs[0] = log(gp_obs[0]) -# end - -# @parameters FS2000 silent = true begin -# alp = 0.356 -# bet = 0.993 -# gam = 0.0085 -# mst = 1.0002 -# rho = 0.129 -# psi = 0.65 -# del = 0.01 -# z_e_a = 0.035449 -# z_e_m = 0.008862 -# end - -# ENV["GKSwstype"] = "nul" - -# @compile_workload begin -# # all calls in this block will be precompiled, regardless of whether -# # they belong to your package or not (on Julia 1.8 and higher) -# @model RBC begin -# 1 / c[0] = (0.95 / c[1]) * (Ξ± * exp(z[1]) * k[0]^(Ξ± - 1) + (1 - Ξ΄)) -# c[0] + k[0] = (1 - Ξ΄) * k[-1] + exp(z[0]) * k[-1]^Ξ± -# z[0] = 0.2 * z[-1] + 0.01 * eps_z[x] -# end - -# @parameters RBC silent = true precompile = true begin -# Ξ΄ = 0.02 -# Ξ± = 0.5 -# end - -# get_SS(FS2000) -# get_SS(FS2000, parameters = :alp => 0.36) -# get_solution(FS2000) -# get_solution(FS2000, parameters = :alp => 0.35) -# get_standard_deviation(FS2000) -# get_correlation(FS2000) -# get_autocorrelation(FS2000) -# get_variance_decomposition(FS2000) -# get_conditional_variance_decomposition(FS2000) -# get_irf(FS2000) -# # get_SSS(FS2000, silent = true) -# # get_SSS(FS2000, algorithm = :third_order, silent = true) - -# # import Plots, StatsPlots -# # plot_irf(FS2000) -# # plot_solution(FS2000,:k) # fix warning when there is no sensitivity and all values are the same. triggers: no strict ticks found... -# # plot_conditional_variance_decomposition(FS2000) -# end -# end +@setup_workload begin + # Putting some things in `setup` can reduce the size of the + # precompile file and potentially make loading faster. + @model FS2000 begin + dA[0] = exp(gam + z_e_a * e_a[x]) + log(m[0]) = (1 - rho) * log(mst) + rho * log(m[-1]) + z_e_m * e_m[x] + - P[0] / (c[1] * P[1] * m[0]) + bet * P[1] * (alp * exp( - alp * (gam + log(e[1]))) * k[0] ^ (alp - 1) * n[1] ^ (1 - alp) + (1 - del) * exp( - (gam + log(e[1])))) / (c[2] * P[2] * m[1])=0 + W[0] = l[0] / n[0] + - (psi / (1 - psi)) * (c[0] * P[0] / (1 - n[0])) + l[0] / n[0] = 0 + R[0] = P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ ( - alp) / W[0] + 1 / (c[0] * P[0]) - bet * P[0] * (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) / (m[0] * l[0] * c[1] * P[1]) = 0 + c[0] + k[0] = exp( - alp * (gam + z_e_a * e_a[x])) * k[-1] ^ alp * n[0] ^ (1 - alp) + (1 - del) * exp( - (gam + z_e_a * e_a[x])) * k[-1] + P[0] * c[0] = m[0] + m[0] - 1 + d[0] = l[0] + e[0] = exp(z_e_a * e_a[x]) + y[0] = k[-1] ^ alp * n[0] ^ (1 - alp) * exp( - alp * (gam + z_e_a * e_a[x])) + gy_obs[0] = dA[0] * y[0] / y[-1] + gp_obs[0] = (P[0] / P[-1]) * m[-1] / dA[0] + log_gy_obs[0] = log(gy_obs[0]) + log_gp_obs[0] = log(gp_obs[0]) + end + + @parameters FS2000 silent = true begin + alp = 0.356 + bet = 0.993 + gam = 0.0085 + mst = 1.0002 + rho = 0.129 + psi = 0.65 + del = 0.01 + z_e_a = 0.035449 + z_e_m = 0.008862 + end + + ENV["GKSwstype"] = "nul" + + @compile_workload begin + # all calls in this block will be precompiled, regardless of whether + # they belong to your package or not (on Julia 1.8 and higher) + @model RBC begin + 1 / c[0] = (0.95 / c[1]) * (Ξ± * exp(z[1]) * k[0]^(Ξ± - 1) + (1 - Ξ΄)) + c[0] + k[0] = (1 - Ξ΄) * k[-1] + exp(z[0]) * k[-1]^Ξ± + z[0] = 0.2 * z[-1] + 0.01 * eps_z[x] + end + + @parameters RBC silent = true precompile = true begin + Ξ΄ = 0.02 + Ξ± = 0.5 + end + + get_SS(FS2000) + get_SS(FS2000, parameters = :alp => 0.36) + get_solution(FS2000) + get_solution(FS2000, parameters = :alp => 0.35) + get_standard_deviation(FS2000) + get_correlation(FS2000) + get_autocorrelation(FS2000) + get_variance_decomposition(FS2000) + get_conditional_variance_decomposition(FS2000) + get_irf(FS2000) + # get_SSS(FS2000, silent = true) + # get_SSS(FS2000, algorithm = :third_order, silent = true) + + # import Plots, StatsPlots + # plot_irf(FS2000) + # plot_solution(FS2000,:k) # fix warning when there is no sensitivity and all values are the same. triggers: no strict ticks found... + # plot_conditional_variance_decomposition(FS2000) + end +end end From cda1f3d2df8675c30c2d8b9df97d9d25d06090a3 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 15:53:09 +0200 Subject: [PATCH 46/83] fix test for levels and plotting --- src/plotting.jl | 77 +++++++++++++++++++++---------------- test/functionality_tests.jl | 6 +-- 2 files changed, 47 insertions(+), 36 deletions(-) diff --git a/src/plotting.jl b/src/plotting.jl index 06a632862..076125300 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -372,18 +372,25 @@ function plot_irf(𝓂::β„³; state_update, pruning = parse_algorithm_to_state_update(algorithm, 𝓂) - reference_steady_state, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) + NSSS, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) + + full_SS = sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)) + full_SS[indexin(𝓂.aux,full_SS)] = map(x -> Symbol(replace(string(x), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")), 𝓂.aux) + + NSSS_labels = [sort(union(𝓂.exo_present,𝓂.var))...,𝓂.calibration_equations_parameters...] + + reference_steady_state = [s ∈ 𝓂.exo_present ? 0 : NSSS[indexin([s],NSSS_labels)...] for s in full_SS] if algorithm == :second_order - SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.second_order.stochastic_steady_state + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.second_order.stochastic_steady_state elseif algorithm == :pruned_second_order - SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.pruned_second_order.stochastic_steady_state elseif algorithm == :third_order - SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.third_order.stochastic_steady_state + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.third_order.stochastic_steady_state elseif algorithm == :pruned_third_order - SSS_delta = reference_steady_state[1:𝓂.timings.nVars] - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state + SSS_delta = reference_steady_state - 𝓂.solution.perturbation.pruned_third_order.stochastic_steady_state else - SSS_delta = zeros(𝓂.timings.nVars) + SSS_delta = zeros(length(reference_steady_state)) end if algorithm == :second_order @@ -1331,35 +1338,39 @@ function plot_conditional_forecast(𝓂::β„³, for i in 1:length(var_idx) SS = reference_steady_state[i] if !(all(isapprox.(Y[i,:],0,atol = eps(Float32)))) || length(findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing)) > 0 - can_dual_axis = gr_back && all((Y[i,:] .+ SS) .> eps(Float32)) && (SS > eps(Float32)) - cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) - - push!(pp,begin - StatsPlots.plot(Y[i,:] .+ SS, - title = string(full_SS[var_idx[i]]), - ylabel = "Level", - label = "") - - if can_dual_axis - StatsPlots.plot!(StatsPlots.twinx(), - 100*((Y[i,:] .+ SS) ./ SS .- 1), - ylabel = LaTeXStrings.L"\% \Delta", - label = "") - end + if all((Y[i,:] .+ SS) .> eps(Float32)) & (SS > eps(Float32)) + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + end) + else + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS,title = string(full_SS[var_idx[i]]),ylabel = "Level",label = "") + if gr_back StatsPlots.plot!(StatsPlots.twinx(),1:periods, 100*((Y[i,:] .+ SS) ./ SS .- 1), ylabel = LaTeXStrings.L"\% \Delta", label = "") end + StatsPlots.hline!(gr_back ? [SS 0] : [SS],color = :black,label = "") + end) + end + else + cond_idx = findall(vcat(conditions,shocks)[var_idx[i],:] .!= nothing) + if length(cond_idx) > 0 + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") + StatsPlots.scatter!(cond_idx, conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, label = "",marker = :star8, markercolor = :black) + end) + else + push!(pp,begin + StatsPlots.plot(1:periods, Y[i,:] .+ SS, title = string(full_SS[var_idx[i]]), label = "", ylabel = "Level")#, rightmargin = 17mm)#,label = reshape(String.(𝓂.timings.solution.algorithm),1,:) + StatsPlots.hline!([SS], color = :black, label = "") + end) + end - StatsPlots.hline!(can_dual_axis ? [SS 0] : [SS], - color = :black, - label = "") - - if length(cond_idx) > 0 - StatsPlots.scatter!(cond_idx, - conditions_in_levels ? vcat(conditions,shocks)[var_idx[i],cond_idx] : vcat(conditions,shocks)[var_idx[i],cond_idx] .+ SS, - label = "", - marker = :star8, - markercolor = :black) - end - end) + end if !(plot_count % plots_per_page == 0) plot_count += 1 diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index a0f6ea756..448ffe233 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -222,7 +222,7 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) data_in_levels = simulation(m.var[var_idxs],:,:simulate) - # data_in_levels = data# .+ m.solution.non_stochastic_steady_state[var_idxs] + data = data_in_levels .- m.solution.non_stochastic_steady_state[var_idxs] estim_vars1 = get_estimated_variables(m, data, data_in_levels = false, verbose = true) estim_vars2 = get_estimated_variables(m, data_in_levels, verbose = true) @@ -384,8 +384,8 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = simulation = simulate(m) data_in_levels = simulation(m.var[var_idxs],:,:simulate) - # data_in_levels = data .+ m.solution.non_stochastic_steady_state[var_idxs] - + data = data_in_levels .- m.solution.non_stochastic_steady_state[var_idxs] + plot_model_estimates(m, data, data_in_levels = false) plot_model_estimates(m, data, data_in_levels = false, verbose = true) plot_model_estimates(m, data, data_in_levels = false, verbose = true) From be3db0d147ca87b24c48f0db6c079d8e854900df Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 17:05:59 +0200 Subject: [PATCH 47/83] make irf test work --- test/test_standalone_function.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test_standalone_function.jl b/test/test_standalone_function.jl index 7915cdf23..e02e50905 100644 --- a/test/test_standalone_function.jl +++ b/test/test_standalone_function.jl @@ -246,31 +246,31 @@ end end - iirrff = irf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff = irf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff[4,1,:],[ -0.00036685520477089503 0.0021720718769730014],rtol = eps(Float32)) - ggiirrff = girf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + ggiirrff = girf(first_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff[4,1,:],ggiirrff[4,1,:],rtol = eps(Float32)) SSS_delta = RBC_CME.solution.non_stochastic_steady_state[1:length(RBC_CME.var)] - RBC_CME.solution.perturbation.second_order.stochastic_steady_state - ggiirrff2 = girf(second_order_state_update, SSS_delta, zeros(T.nVars), T, draws = 1000,warmup_periods = 100) + ggiirrff2 = girf(second_order_state_update, SSS_delta, zeros(T.nVars), false, T, draws = 1000,warmup_periods = 100) @test isapprox(ggiirrff2[4,1,:],[-0.0003668849861768406 0.0021711333455274096],rtol = 1e-3) - iirrff2 = irf(second_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff2 = irf(second_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff2[4,1,:],[-0.0004547347878067665, 0.0020831426377533636],rtol = 1e-6) SSS_delta = RBC_CME.solution.non_stochastic_steady_state[1:length(RBC_CME.var)] - RBC_CME.solution.perturbation.third_order.stochastic_steady_state - ggiirrff3 = girf(third_order_state_update, SSS_delta, zeros(T.nVars), T,draws = 1000,warmup_periods = 100) + ggiirrff3 = girf(third_order_state_update, SSS_delta, zeros(T.nVars), false, T,draws = 1000,warmup_periods = 100) @test isapprox(ggiirrff3[4,1,:],[ -0.00036686142588429404 0.002171120660323429],rtol = 1e-3) - iirrff3 = irf(third_order_state_update, zeros(T.nVars), zeros(T.nVars), T) + iirrff3 = irf(third_order_state_update, zeros(T.nVars), zeros(T.nVars), false, T) @test isapprox(iirrff3[4,1,:],[-0.00045473149068020854, 0.002083198241302615], rtol = 1e-6) end From b5f1da7952cacc663fb55f6e51641bfb61341ae4 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 18:12:59 +0200 Subject: [PATCH 48/83] update ci --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c3c3d427..90f317179 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: matrix: version: - '1.8' - - '^1.9.0-0' #^1-0 and ^1.9-0 are not recognised + - '1.9' #^1-0 and ^1.9-0 are not recognised # - 'nightly' # fails for zygote os: - ubuntu-latest @@ -26,10 +26,10 @@ jobs: include: - os: ubuntu-latest prefix: xvfb-run - - version: 'nightly' - os: ubuntu-latest - arch: x64 - allow_failure: true + # - version: 'nightly' + # os: ubuntu-latest + # arch: x64 + # allow_failure: true steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@latest From 6cab306caeac594bc63a6cb2710541fee2951be0 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 18:30:34 +0200 Subject: [PATCH 49/83] get_solution returns higher order solutions --- docs/src/unfinished_docs/todo.md | 9 +++--- src/get_functions.jl | 50 +++++++++++++++++++++++++++++--- test/functionality_tests.jl | 17 ++++++----- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index c71761f69..7f73b1ac0 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -6,13 +6,10 @@ - [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input - [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then - [ ] have parser accept rss | (r[ss] - 1) * 400 = rss -- [ ] when doing calibration with optiimiser have better return values when he doesnt find a solution (probably NaN) -- [ ] add pruning +- [ ] when doing calibration with optimiser have better return values when he doesnt find a solution (probably NaN) - [ ] sampler returned negative std. investigate and come up with solution ensuring sampler can continue - [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies - [ ] have get_std take variables as an input -- [ ] get_solution for higher order -- [ ] get solution higher order with multidimensional array (states, 1 and 2 partial derivatives variables names as dimensions in 2order case) - [ ] more informative errors when something goes wrong when writing a model - [ ] initial state accept keyed array - [ ] bring solution error into an object of the model so we dont have to pass it on as output @@ -44,7 +41,9 @@ - [ ] figure out combinations for inputs (parameters and variables in different formats for get_irf for example) - [ ] Find any SS by optimising over both SS guesses and parameter inputs - [ ] weed out SS solver and saved objects - + +- [x] get solution higher order with multidimensional array (states, 1 and 2 partial derivatives variables names as dimensions in 2order case) +- [x] add pruning - [x] add other outputs from estimation (smoothed, filter states and shocks) - [x] shorten plot_irf (take inspiration from model estimate) - [x] fix solution plot diff --git a/src/get_functions.jl b/src/get_functions.jl index b5addf1f0..66d07d416 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -1132,7 +1132,7 @@ function get_solution(𝓂::β„³; # write_parameters_input!(𝓂,parameters, verbose = verbose) - @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran] "This function only works for linear solutions. Choose a respective algorithm." + # @assert algorithm ∈ [:linear_time_iteration, :riccati, :first_order, :quadratic_iteration, :binder_pesaran] "This function only works for linear solutions. Choose a respective algorithm." solve!(𝓂, parameters = parameters, verbose = verbose, dynamics = true, algorithm = algorithm) @@ -1144,9 +1144,51 @@ function get_solution(𝓂::β„³; solution_matrix = 𝓂.solution.perturbation.quadratic_iteration.solution_matrix end - KeyedArray([𝓂.solution.non_stochastic_steady_state[1:length(𝓂.var)] solution_matrix]'; - Steady_state__States__Shocks = [:Steady_state; map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], - Variables = 𝓂.var) + if algorithm == :second_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.second_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :pruned_second_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.pruned_second_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :third_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.third_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3,4]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + elseif algorithm == :pruned_third_order + return KeyedArray(permutedims(reshape(𝓂.solution.perturbation.pruned_third_order.solution_matrix, + 𝓂.timings.nVars, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo, + 𝓂.timings.nPast_not_future_and_mixed + 1 + 𝓂.timings.nExo), + [2,1,3,4]); + States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var, + States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + else + return KeyedArray([𝓂.solution.non_stochastic_steady_state[1:length(𝓂.var)] solution_matrix]'; + Steady_state__States__Shocks = [:Steady_state; map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], + Variables = 𝓂.var) + end end diff --git a/test/functionality_tests.jl b/test/functionality_tests.jl index 448ffe233..b6a991635 100644 --- a/test/functionality_tests.jl +++ b/test/functionality_tests.jl @@ -21,13 +21,6 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = nsss = get_SS(m) if algorithm == :first_order - sols_nv = get_solution(m) - sols = get_solution(m, verbose = true) - new_sols1 = get_solution(m, verbose = true, parameters = m.parameter_values * 1.0001) - new_sols2 = get_solution(m, verbose = true, parameters = (m.parameters[1] => m.parameter_values[1] * 1.0001)) - new_sols3 = get_solution(m, verbose = true, parameters = Tuple(m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001)) - new_sols4 = get_solution(m, verbose = true, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] / 1.0001)) - old_sols = get_solution(m, verbose = true, parameters = old_par_vals) auto_corr_nv = get_autocorrelation(m) auto_corrr = get_autocorrelation(m, verbose = true) @@ -276,6 +269,16 @@ function functionality_test(m; algorithm = :first_order, plots = true, verbose = SSS = get_stochastic_steady_state(m, algorithm = algorithm) end + + # get_solution + sols_nv = get_solution(m, algorithm = algorithm) + sols = get_solution(m, algorithm = algorithm, verbose = true) + new_sols1 = get_solution(m, algorithm = algorithm, verbose = true, parameters = m.parameter_values * 1.0001) + new_sols2 = get_solution(m, algorithm = algorithm, verbose = true, parameters = (m.parameters[1] => m.parameter_values[1] * 1.0001)) + new_sols3 = get_solution(m, algorithm = algorithm, verbose = true, parameters = Tuple(m.parameters[1:2] .=> m.parameter_values[1:2] * 1.0001)) + new_sols4 = get_solution(m, algorithm = algorithm, verbose = true, parameters = (m.parameters[1:2] .=> m.parameter_values[1:2] / 1.0001)) + old_sols = get_solution(m, algorithm = algorithm, verbose = true, parameters = old_par_vals) + # irfs irfs_nv = get_irf(m, algorithm = algorithm) irfs = get_irf(m, verbose = true, algorithm = algorithm) From 6be90542579be7872f363e939f8cddf0daa303b9 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 18:36:23 +0200 Subject: [PATCH 50/83] improve docstring of get_solution --- src/get_functions.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/get_functions.jl b/src/get_functions.jl index 66d07d416..ddbb3f23f 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -1082,7 +1082,7 @@ get_ss = get_steady_state """ $(SIGNATURES) -Return the linearised solution and the non stochastic steady state (SS) of the model. +Return the solutoin of the model. In the linear case it returns the linearised solution and the non stochastic steady state (SS) of the model. In the nonlinear case (higher order perturbation) the function returns a multidimensional array with the endogenous variables as the second dimension and the state variables and shocks as the other dimensions. # Arguments - $MODEL @@ -1091,7 +1091,7 @@ Return the linearised solution and the non stochastic steady state (SS) of the m - `algorithm` [Default: `:first_order`, Type: `Symbol`]: algorithm to solve for the dynamics of the model. Only linear algorithms allowed. - $VERBOSE -The returned `KeyedArray` shows the SS, policy and transition functions of the model. The columns show the varibales including auxilliary endogenous and exogenous variables (due to leads and lags > 1). The rows are the SS, followed by the states, and exogenous shocks. +The returned `KeyedArray` shows as columns the endogenous variables inlcuding the auxilliary endogenous and exogenous variables (due to leads and lags > 1). The rows and other dimensions (depending on the chosen perturbation order) include the SS for the linear case only, followed by the states, and exogenous shocks. Subscripts following variable names indicate the timing (e.g. `variableβ‚β‚‹β‚β‚Ž` indicates the variable being in the past). Superscripts indicate leads or lags (e.g. `variableᴸ⁽²⁾` indicates the variable being in lead by two periods). If no super- or subscripts follow the variable name, the variable is in the present. # Examples ```jldoctest From afe72610c8cd021c66c2a0fdcad198335c918132 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 18:45:27 +0200 Subject: [PATCH 51/83] add weakdeps --- Project.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Project.toml b/Project.toml index f70f772ea..e0ba16019 100644 --- a/Project.toml +++ b/Project.toml @@ -32,6 +32,10 @@ Subscripts = "2b7f82d5-8785-4f63-971e-f18ddbeb808e" SymPy = "24249f21-da20-56a4-8eb1-6a02cf4ae2e6" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" +[weakdeps] +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" +Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" + [compat] AxisKeys = "^0.2" BlockTriangularForm = "^0.1" @@ -54,9 +58,11 @@ Requires = "^1" RuntimeGeneratedFunctions = "^0.5" SpecialFunctions = "^2" SpeedMapping = "^0.3" +StatsPlots = "^0.15" Subscripts = "^0.1" SymPy = "^1" Symbolics = "^5" +Turing = "^0.25" julia = "1.8" [extras] From 9f494b420774643060854777733532d505fa6028 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 19:09:32 +0200 Subject: [PATCH 52/83] redo project.toml --- Project.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Project.toml b/Project.toml index e0ba16019..03b8cda19 100644 --- a/Project.toml +++ b/Project.toml @@ -32,10 +32,6 @@ Subscripts = "2b7f82d5-8785-4f63-971e-f18ddbeb808e" SymPy = "24249f21-da20-56a4-8eb1-6a02cf4ae2e6" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" -[weakdeps] -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" -Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" - [compat] AxisKeys = "^0.2" BlockTriangularForm = "^0.1" @@ -82,3 +78,7 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] test = ["Aqua", "CSV", "DataFrames", "DynamicPPL", "MCMCChains", "LineSearches", "Optim", "Test", "Turing", "FiniteDifferences", "Zygote", "Plots", "StatsPlots"] + +[weakdeps] +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" +Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" \ No newline at end of file From 2bd1aec0859aab851a5e1840d5f772079c7c9a3d Mon Sep 17 00:00:00 2001 From: thorek1 Date: Tue, 9 May 2023 19:40:29 +0200 Subject: [PATCH 53/83] redo implicitdiff --- src/MacroModelling.jl | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index a1ad93ee5..1d3f6413b 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2462,7 +2462,7 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = end -function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) #::AbstractMatrix{Real}, +function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}, solved::Bool; T::timings, explosive::Bool = false) #::AbstractMatrix{Real}, expand = @ignore_derivatives @views [β„’.diagm(ones(T.nVars))[T.future_not_past_and_mixed_idx,:], β„’.diagm(ones(T.nVars))[T.past_not_future_and_mixed_idx,:]] A = @views βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] * expand[1] @@ -2485,12 +2485,12 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos ps = mapreduce(β„±.partials, hcat, βˆ‡β‚)' # get f(vs) - val, solved = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) + val, solved = riccati_forward(βˆ‡Μ‚β‚; T = T, explosive = explosive) if solved # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) - A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) + B = β„±.jacobian(x -> riccati_conditions(x, val, solved; T = T), βˆ‡Μ‚β‚) + A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x, solved; T = T), val) # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] @@ -2511,17 +2511,15 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos end,size(val)), solved end +riccati_ = ImplicitFunction(riccati_forward, riccati_conditions) # riccati_AD = ImplicitFunction(riccati_forward, riccati_conditions) -riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) +# riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) - riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) - A = riccati(βˆ‡β‚) - - solved = @ignore_derivatives !(isapprox(sum(abs,A), 0, rtol = eps())) - + A, solved = riccati_(βˆ‡β‚; T = T, explosive = explosive) + if !solved return hcat(A, zeros(size(A,1),T.nExo)), solved end @@ -2661,7 +2659,7 @@ end -function calculate_third_order_solution(βˆ‡β‚::AbstractMatrix{<: Real}, #first order derivatives +function calculate_third_order_solution(βˆ‡β‚::AbstractMatrix{<: Real}, #first order derivatives βˆ‡β‚‚::SparseMatrixCSC{<: Real}, #second order derivatives βˆ‡β‚ƒ::SparseMatrixCSC{<: Real}, #third order derivatives 𝑺₁::AbstractMatrix{<: Real}, #first order solution From e099c8ca115a83069a3224c7530d4edde3c5e159 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 00:05:39 +0200 Subject: [PATCH 54/83] test pruning --- test/test_higher_order.jl | 189 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 test/test_higher_order.jl diff --git a/test/test_higher_order.jl b/test/test_higher_order.jl new file mode 100644 index 000000000..3b5590a86 --- /dev/null +++ b/test/test_higher_order.jl @@ -0,0 +1,189 @@ +using MacroModelling + +Gali_2015_chapter_3_nonlinear = nothing +include("models/Gali_2015_chapter_3_nonlinear.jl") + + + +@model RBC_CME begin + y[0]=A[0]*k[-1]^alpha + 1/c[0]=beta*1/c[1]*(alpha*A[1]*k[0]^(alpha-1)+(1-delta)) + 1/c[0]=beta*1/c[1]*(R[0]/Pi[+1]) + R[0] * beta =(Pi[0]/Pibar)^phi_pi + A[0]*k[-1]^alpha=c[0]+k[0]-(1-delta*z_delta[0])*k[-1] + z_delta[0] = 1 - rho_z_delta + rho_z_delta * z_delta[-1] + std_z_delta * delta_eps[x] + A[0] = 1 - rhoz + rhoz * A[-1] + std_eps * eps_z[x] +end + +@parameters RBC_CME begin + alpha = .157 + beta = .999 + delta = .0226 + Pibar = 1.0008 + phi_pi = 1.5 + rhoz = .9 + std_eps = .0068 + rho_z_delta = .9 + std_z_delta = .005 +end + +# c is conditioned to deviate by 0.01 in period 1 and y is conditioned to deviate by 0.02 in period 3 +conditions = KeyedArray(Matrix{Union{Nothing,Float64}}(undef,2,2),Variables = [:c,:y], Periods = 1:2) +conditions[1,1] = .01 +conditions[2,2] = .02 + +# in period 2 second shock (eps_z) is conditioned to take a value of 0.05 +shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) +shocks[1,1] = .05 + +plot_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) + + + + +# Gali_2015_chapter_3_nonlinear.solution.outdated_algorithms |>collect|>sort +# Gali_2015_chapter_3_nonlinear.solution.algorithms |>collect|>sort + + +irf2 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order) +irf2 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, parameters = :std_nu => 1) +irf2p = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order) +irf1 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :first_order) +irf3 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :third_order) +irf3 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_third_order) + + +𝓂 = Gali_2015_chapter_3_nonlinear +solution_matrix = 𝓂.solution.perturbation.third_order.solution_matrix +reshape(solution_matrix,23,8,8,8)[:,1,1,1] +solution_matrix[:,1] + +solution_matrix = 𝓂.solution.perturbation.first_order.solution_matrix +solution_mat = permutedims(reshape(solution_matrix,23,8,8,8),[2,1,3,4]); +permutedims(solution_mat,[2,1,3,4]); + + +KeyedArray(permutedims(reshape(solution_matrix,23,8,8,8),[2,1,3,4]); +States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], +Variables = 𝓂.var, +States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], +States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) + + + + +get_SSS(Gali_2015_chapter_3_nonlinear) +get_SSS(Gali_2015_chapter_3_nonlinear, parameters = :std_nu => 1) +get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order, parameters = :std_nu => 1) +get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_third_order, parameters = :std_nu => 1) +get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :third_order, parameters = :std_nu => 1) + +get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order, parameters = :std_nu => 1) + +Gali_2015_chapter_3_nonlinear.solution.perturbation.pruned_second_order.stochastic_steady_state + + +get_SSS(Caldara_et_al_2012) + +get_SSS(Caldara_et_al_2012, algorithm = :pruned_second_order) + +import StatsPlots +plot_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, shocks = :eps_nu, variables = [:Y,:Pi,:R,:W_real]) +plot_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, shocks = :eps_a, variables = [:Y,:Pi,:R,:W_real], parameters = :std_nu => 1) +get_SSS(Gali_2015_chapter_3_nonlinear) +plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :pruned_second_order,:third_order, :pruned_third_order]) +plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :third_order]) +plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :pruned_third_order]) +plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :third_order, :pruned_third_order]) +plot_solution(Caldara_et_al_2012,:k,algorithm = [:second_order, :third_order]) + +plot_solution(Caldara_et_al_2012,:k,algorithm = [:second_order]) + +get_SSS(Caldara_et_al_2012, algorithm = :third_order) + +get_SSS(Caldara_et_al_2012, algorithm = :pruned_third_order) + +import ComponentArrays as π’ž +𝓂 = Caldara_et_al_2012 +verbose = true +parameters = 𝓂.parameter_values + + + +SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) + +SS = SS_and_pars[1:end - length(𝓂.calibration_equations)] + +βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) + +𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + +βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) + +𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) + +𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] + +# state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), SS, 𝓂) + +import LinearAlgebra as β„’ +using SpeedMapping +tol = eps() + + +state = zero(SS) +pruned_state = zero(SS) +shock = zeros(𝓂.timings.nExo) + +aug_state .= [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + +aug_pruned_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + +sol_pruned = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_pruned_state, aug_pruned_state) / 2 + pruned_state .= 𝐒₁ * aug_pruned_state + end, +tol = tol, maps_limit = 10000) + +sol_pruned.minimizer + + +𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_pruned_state, aug_pruned_state) / 2 + +state = zero(SS) +pruned_state = zero(SS) + +aug_state .= [state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + +aug_pruned_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + +sol = speedmapping(state; + m! = (SSS, sss) -> begin + aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] + 1 + shock] + + SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 + end, +tol = tol, maps_limit = 10000) + +sol.minimizer + +isapprox(sol_pruned.minimizer,sol.minimizer, rtol = eps(Float32)) \ No newline at end of file From afa7b40b047983ea4e7ac6ace2a87fd04f2a8d34 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 00:10:35 +0200 Subject: [PATCH 55/83] undo implicitdiff changes --- src/MacroModelling.jl | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 1d3f6413b..c08b79017 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -2461,8 +2461,7 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = return @view(A[T.reorder,:]), true end - -function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}, solved::Bool; T::timings, explosive::Bool = false) #::AbstractMatrix{Real}, +function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) expand = @ignore_derivatives @views [β„’.diagm(ones(T.nVars))[T.future_not_past_and_mixed_idx,:], β„’.diagm(ones(T.nVars))[T.past_not_future_and_mixed_idx,:]] A = @views βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] * expand[1] @@ -2484,15 +2483,12 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, βˆ‡β‚)' - # get f(vs) - val, solved = riccati_forward(βˆ‡Μ‚β‚; T = T, explosive = explosive) + val, solved = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) if solved # get J(f, vs) * ps (cheating). Write your custom rule here - B = β„±.jacobian(x -> riccati_conditions(x, val, solved; T = T), βˆ‡Μ‚β‚) - A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x, solved; T = T), val) - # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] - # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] + B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) + A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) AΜ‚ = RF.lu(A, check = false) @@ -2511,15 +2507,15 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos end,size(val)), solved end -riccati_ = ImplicitFunction(riccati_forward, riccati_conditions) -# riccati_AD = ImplicitFunction(riccati_forward, riccati_conditions) - -# riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) +riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) - A, solved = riccati_(βˆ‡β‚; T = T, explosive = explosive) - + riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) + A = riccati(βˆ‡β‚) + + solved = @ignore_derivatives !(isapprox(sum(abs,A), 0, rtol = eps())) + if !solved return hcat(A, zeros(size(A,1),T.nExo)), solved end From ca1632f74816f797d6056bd1276b0eef22cce9df Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 09:39:28 +0200 Subject: [PATCH 56/83] typo --- src/get_functions.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/get_functions.jl b/src/get_functions.jl index ddbb3f23f..7307a6c87 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -1082,7 +1082,7 @@ get_ss = get_steady_state """ $(SIGNATURES) -Return the solutoin of the model. In the linear case it returns the linearised solution and the non stochastic steady state (SS) of the model. In the nonlinear case (higher order perturbation) the function returns a multidimensional array with the endogenous variables as the second dimension and the state variables and shocks as the other dimensions. +Return the solution of the model. In the linear case it returns the linearised solution and the non stochastic steady state (SS) of the model. In the nonlinear case (higher order perturbation) the function returns a multidimensional array with the endogenous variables as the second dimension and the state variables and shocks as the other dimensions. # Arguments - $MODEL From ab69a0234558b29dd3725d6d42e3c2e0ea30aebe Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 09:41:32 +0200 Subject: [PATCH 57/83] todos --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 7f73b1ac0..5b61cd32f 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -8,6 +8,7 @@ - [ ] have parser accept rss | (r[ss] - 1) * 400 = rss - [ ] when doing calibration with optimiser have better return values when he doesnt find a solution (probably NaN) - [ ] sampler returned negative std. investigate and come up with solution ensuring sampler can continue +- [ ] automatically adjust plots for different legend widhts and heights - [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies - [ ] have get_std take variables as an input - [ ] more informative errors when something goes wrong when writing a model From cc8d21535d393418741ea92a0a5b24d2879d120b Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 09:42:30 +0200 Subject: [PATCH 58/83] rm higher order --- test/test_higher_order.jl | 189 -------------------------------------- 1 file changed, 189 deletions(-) delete mode 100644 test/test_higher_order.jl diff --git a/test/test_higher_order.jl b/test/test_higher_order.jl deleted file mode 100644 index 3b5590a86..000000000 --- a/test/test_higher_order.jl +++ /dev/null @@ -1,189 +0,0 @@ -using MacroModelling - -Gali_2015_chapter_3_nonlinear = nothing -include("models/Gali_2015_chapter_3_nonlinear.jl") - - - -@model RBC_CME begin - y[0]=A[0]*k[-1]^alpha - 1/c[0]=beta*1/c[1]*(alpha*A[1]*k[0]^(alpha-1)+(1-delta)) - 1/c[0]=beta*1/c[1]*(R[0]/Pi[+1]) - R[0] * beta =(Pi[0]/Pibar)^phi_pi - A[0]*k[-1]^alpha=c[0]+k[0]-(1-delta*z_delta[0])*k[-1] - z_delta[0] = 1 - rho_z_delta + rho_z_delta * z_delta[-1] + std_z_delta * delta_eps[x] - A[0] = 1 - rhoz + rhoz * A[-1] + std_eps * eps_z[x] -end - -@parameters RBC_CME begin - alpha = .157 - beta = .999 - delta = .0226 - Pibar = 1.0008 - phi_pi = 1.5 - rhoz = .9 - std_eps = .0068 - rho_z_delta = .9 - std_z_delta = .005 -end - -# c is conditioned to deviate by 0.01 in period 1 and y is conditioned to deviate by 0.02 in period 3 -conditions = KeyedArray(Matrix{Union{Nothing,Float64}}(undef,2,2),Variables = [:c,:y], Periods = 1:2) -conditions[1,1] = .01 -conditions[2,2] = .02 - -# in period 2 second shock (eps_z) is conditioned to take a value of 0.05 -shocks = Matrix{Union{Nothing,Float64}}(undef,2,1) -shocks[1,1] = .05 - -plot_conditional_forecast(RBC_CME, conditions, shocks = shocks, conditions_in_levels = false) - - - - -# Gali_2015_chapter_3_nonlinear.solution.outdated_algorithms |>collect|>sort -# Gali_2015_chapter_3_nonlinear.solution.algorithms |>collect|>sort - - -irf2 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order) -irf2 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, parameters = :std_nu => 1) -irf2p = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order) -irf1 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :first_order) -irf3 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :third_order) -irf3 = get_irf(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_third_order) - - -𝓂 = Gali_2015_chapter_3_nonlinear -solution_matrix = 𝓂.solution.perturbation.third_order.solution_matrix -reshape(solution_matrix,23,8,8,8)[:,1,1,1] -solution_matrix[:,1] - -solution_matrix = 𝓂.solution.perturbation.first_order.solution_matrix -solution_mat = permutedims(reshape(solution_matrix,23,8,8,8),[2,1,3,4]); -permutedims(solution_mat,[2,1,3,4]); - - -KeyedArray(permutedims(reshape(solution_matrix,23,8,8,8),[2,1,3,4]); -States__ShocksΒΉ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], -Variables = 𝓂.var, -States__ShocksΒ² = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)], -States__ShocksΒ³ = [map(x->Symbol(string(x) * "β‚β‚‹β‚β‚Ž"),𝓂.timings.past_not_future_and_mixed); :Volatility;map(x->Symbol(string(x) * "β‚β‚“β‚Ž"),𝓂.exo)]) - - - - -get_SSS(Gali_2015_chapter_3_nonlinear) -get_SSS(Gali_2015_chapter_3_nonlinear, parameters = :std_nu => 1) -get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order, parameters = :std_nu => 1) -get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_third_order, parameters = :std_nu => 1) -get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :third_order, parameters = :std_nu => 1) - -get_SSS(Gali_2015_chapter_3_nonlinear, algorithm = :pruned_second_order, parameters = :std_nu => 1) - -Gali_2015_chapter_3_nonlinear.solution.perturbation.pruned_second_order.stochastic_steady_state - - -get_SSS(Caldara_et_al_2012) - -get_SSS(Caldara_et_al_2012, algorithm = :pruned_second_order) - -import StatsPlots -plot_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, shocks = :eps_nu, variables = [:Y,:Pi,:R,:W_real]) -plot_irf(Gali_2015_chapter_3_nonlinear, algorithm = :second_order, shocks = :eps_a, variables = [:Y,:Pi,:R,:W_real], parameters = :std_nu => 1) -get_SSS(Gali_2015_chapter_3_nonlinear) -plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :pruned_second_order,:third_order, :pruned_third_order]) -plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :third_order]) -plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :pruned_third_order]) -plot_solution(Caldara_et_al_2012,:k,algorithm = [:first_order,:second_order, :third_order, :pruned_third_order]) -plot_solution(Caldara_et_al_2012,:k,algorithm = [:second_order, :third_order]) - -plot_solution(Caldara_et_al_2012,:k,algorithm = [:second_order]) - -get_SSS(Caldara_et_al_2012, algorithm = :third_order) - -get_SSS(Caldara_et_al_2012, algorithm = :pruned_third_order) - -import ComponentArrays as π’ž -𝓂 = Caldara_et_al_2012 -verbose = true -parameters = 𝓂.parameter_values - - - -SS_and_pars, solution_error = 𝓂.SS_solve_func(parameters, 𝓂, verbose) - -SS = SS_and_pars[1:end - length(𝓂.calibration_equations)] - -βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - -𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) - -βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) - -𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) - -𝐒₁ = [𝐒₁[:,1:𝓂.timings.nPast_not_future_and_mixed] zeros(𝓂.timings.nVars) 𝐒₁[:,𝓂.timings.nPast_not_future_and_mixed+1:end]] - -# state, converged = second_order_stochastic_steady_state_iterative_solution(π’ž.ComponentArray(; 𝐒₁, 𝐒₂), SS, 𝓂) - -import LinearAlgebra as β„’ -using SpeedMapping -tol = eps() - - -state = zero(SS) -pruned_state = zero(SS) -shock = zeros(𝓂.timings.nExo) - -aug_state .= [state[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - -aug_pruned_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - -sol_pruned = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_pruned_state, aug_pruned_state) / 2 - pruned_state .= 𝐒₁ * aug_pruned_state - end, -tol = tol, maps_limit = 10000) - -sol_pruned.minimizer - - -𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_pruned_state, aug_pruned_state) / 2 - -state = zero(SS) -pruned_state = zero(SS) - -aug_state .= [state[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - -aug_pruned_state = [pruned_state[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - -sol = speedmapping(state; - m! = (SSS, sss) -> begin - aug_state .= [sss[𝓂.timings.past_not_future_and_mixed_idx] - 1 - shock] - - SSS .= 𝐒₁ * aug_state + 𝐒₂ * β„’.kron(aug_state, aug_state) / 2 - end, -tol = tol, maps_limit = 10000) - -sol.minimizer - -isapprox(sol_pruned.minimizer,sol.minimizer, rtol = eps(Float32)) \ No newline at end of file From 267ea8231e96caf943c28f1954e2bcef9a1b3b2d Mon Sep 17 00:00:00 2001 From: thorek1 Date: Wed, 10 May 2023 09:45:49 +0200 Subject: [PATCH 59/83] bump version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 03b8cda19..5791e8e29 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "MacroModelling" uuid = "687ffad2-3618-405e-ac50-e0f7b9c75e44" authors = ["Thore Kockerols "] -version = "0.1.22" +version = "0.1.23" [deps] AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" From b8a837145dff12cfc48919cfa0220cc699cd3966 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Wed, 10 May 2023 11:27:26 +0200 Subject: [PATCH 60/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 5b61cd32f..aca6c9489 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -10,6 +10,7 @@ - [ ] sampler returned negative std. investigate and come up with solution ensuring sampler can continue - [ ] automatically adjust plots for different legend widhts and heights - [ ] include weakdeps: https://pkgdocs.julialang.org/dev/creating-packages/#Weak-dependencies +- [ ] write to mod file for unicode characters. have them take what you would type: \alpha\bar - [ ] have get_std take variables as an input - [ ] more informative errors when something goes wrong when writing a model - [ ] initial state accept keyed array From 42b7fdf1d4118238f3ba97afe2a151245d93db9c Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Thu, 11 May 2023 00:46:48 +0200 Subject: [PATCH 61/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index aca6c9489..a20d348c3 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] fix ss of pruned solution in plotsolution. seems detached - [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input - [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then - [ ] have parser accept rss | (r[ss] - 1) * 400 = rss From 0e7f06b4f5dc889299fe86cde5b52c4109ddbb30 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Thu, 11 May 2023 11:13:21 +0200 Subject: [PATCH 62/83] fixed plotting of pruned solution --- src/plotting.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/plotting.jl b/src/plotting.jl index 076125300..a850f3c1e 100644 --- a/src/plotting.jl +++ b/src/plotting.jl @@ -751,6 +751,8 @@ Plot the solution of the model (mapping of past states to present variables) aro The (non) stochastic steady state is plotted along with the mapping from the chosen past state to one present variable per plot. All other (non-chosen) states remain in the (non) stochastic steady state. +In the case of pruned solutions the "pruned" state has as a baseline the non stochastic steady state and the "actual" state refers to the stochastic steady state. The plot then shows the mapping from `Οƒ` standard deviations added to these two steady states and the present variables. Note that there is no unique mapping between the "pruned" and "actual" states. Furthermore, the mapping of the "actual" state is itself dependend on the "pruned" state so that the plots shown are just one realisation of inifite possible mappings. + # Arguments - $MODEL - `state` [Type: `Symbol`]: state variable to be shown on x-axis. @@ -841,16 +843,13 @@ function plot_solution(𝓂::β„³, else solve!(𝓂, verbose = verbose, algorithm = :first_order, dynamics = true, parameters = parameters) end - end - SS_and_std = get_moments(𝓂, derivatives = false, parameters = parameters, verbose = verbose) - full_NSSS = sort(union(𝓂.var,𝓂.aux,𝓂.exo_present)) full_NSSS[indexin(𝓂.aux,full_NSSS)] = map(x -> Symbol(replace(string(x), r"ᴸ⁽⁻?[⁰¹²³⁴⁡⁢⁷⁸⁹]+⁾" => "")), 𝓂.aux) full_SS = [s ∈ 𝓂.exo_present ? 0 : SS_and_std[1](s) for s in full_NSSS] @@ -871,6 +870,7 @@ function plot_solution(𝓂::β„³, legend_plot = StatsPlots.plot(framestyle = :none) + if :first_order ∈ algorithm StatsPlots.plot!(fill(0,1,1), framestyle = :none, @@ -985,9 +985,9 @@ function plot_solution(𝓂::β„³, has_impact = has_impact || sum(abs2,variable_second .- sum(variable_second)/length(variable_second))/(length(variable_second)-1) > eps() end - + if :pruned_second_order ∈ algorithm - variable_pruned_second = [𝓂.solution.perturbation.pruned_second_order.state_update(SSS2p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), SSS2p - full_SS .+ state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] + variable_pruned_second = [𝓂.solution.perturbation.pruned_second_order.state_update(SSS2p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] variable_pruned_second = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_second] @@ -1003,7 +1003,7 @@ function plot_solution(𝓂::β„³, end if :pruned_third_order ∈ algorithm - variable_pruned_third = [𝓂.solution.perturbation.pruned_third_order.state_update(SSS3p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), SSS3p - full_SS .+ state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] + variable_pruned_third = [𝓂.solution.perturbation.pruned_third_order.state_update(SSS3p - full_SS .+ state_selector * x, zeros(𝓂.timings.nExo), state_selector * x)[1][indexin([k],𝓂.timings.var)][1] for x in state_range] variable_pruned_third = [(abs(x) > eps() ? x : 0.0) + SS_and_std[1](kk) for x in variable_pruned_third] From d237f4feb03a4d283a9cfaba507d1f37b4b37d3b Mon Sep 17 00:00:00 2001 From: thorek1 Date: Thu, 11 May 2023 11:22:23 +0200 Subject: [PATCH 63/83] update todo --- docs/src/unfinished_docs/todo.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index a20d348c3..6ec4cf60f 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,7 +3,7 @@ ## High priority - [ ] add balanced growth path handling -- [ ] fix ss of pruned solution in plotsolution. seems detached +- [ ] write docs for (non-linear) solution algorithms - [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input - [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then - [ ] have parser accept rss | (r[ss] - 1) * 400 = rss @@ -45,6 +45,7 @@ - [ ] Find any SS by optimising over both SS guesses and parameter inputs - [ ] weed out SS solver and saved objects +- [x] fix ss of pruned solution in plotsolution. seems detached - [x] get solution higher order with multidimensional array (states, 1 and 2 partial derivatives variables names as dimensions in 2order case) - [x] add pruning - [x] add other outputs from estimation (smoothed, filter states and shocks) From 162de94b5abd5cd211fbf44c6742d14324c98bc1 Mon Sep 17 00:00:00 2001 From: Thore Kockerols Date: Sun, 21 May 2023 23:38:25 +0200 Subject: [PATCH 64/83] Update todo.md --- docs/src/unfinished_docs/todo.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/unfinished_docs/todo.md b/docs/src/unfinished_docs/todo.md index 6ec4cf60f..5c89f2501 100644 --- a/docs/src/unfinished_docs/todo.md +++ b/docs/src/unfinished_docs/todo.md @@ -3,6 +3,7 @@ ## High priority - [ ] add balanced growth path handling +- [ ] add JOSS article (see Makie.jl) - [ ] write docs for (non-linear) solution algorithms - [ ] for cond forecastind and kalman, get rid of observables input and use axis key of data input - [ ] for cond forecasting allow less shocks than conditions with a warning. should be svd then From ba533adbb3e0b4b682930683656d4b4e392eb4fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 16:56:40 +0200 Subject: [PATCH 65/83] Add files via upload Implementation of the filter-free DSGE estimation in MacroModelling.jl --- test/test_filterfree.jl | 214 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 test/test_filterfree.jl diff --git a/test/test_filterfree.jl b/test/test_filterfree.jl new file mode 100644 index 000000000..87589d650 --- /dev/null +++ b/test/test_filterfree.jl @@ -0,0 +1,214 @@ +using MacroModelling +import Turing +import Turing: NUTS, sample, logpdf +# import Optim, LineSearches, Plots +using Random, CSV, DataFrames, MCMCChains, AxisKeys +import DynamicPPL: logjoint +import LinearAlgebra as β„’ +import ChainRulesCore: @ignore_derivatives, ignore_derivatives + +cd("C:/Users/fm007/Documents/GitHub/MacroModelling.jl/test") +include("models/FS2000.jl") + +FS2000 = m + +# load data +dat = CSV.read("data/FS2000_data.csv", DataFrame) +data = KeyedArray(Array(dat)',Variable = Symbol.("log_".*names(dat)),Time = 1:size(dat)[1]) +data = log.(data) + +# declare observables +observables = sort(Symbol.("log_".*names(dat))) + +# subset observables in data +data = data(observables,:) + +# declare parameters +alp = 0.356 +bet = 0.993 +gam = 0.0085 +mst = 1.0002 +rho = 0.129 +psi = 0.65 +del = 0.01 +z_e_a = 0.035449 +z_e_m = 0.008862 +parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] +# filter data with parameters +filtered_errors = MacroModelling.get_estimated_shocks(FS2000, data; parameters= parameters) # filtered_states = get_estimated_variables(FS2000, data; parameters= parameters) + +# Define DSGE Turing model +Turing.@model function FS2000_filter_free_loglikelihood_function(data, model, observables) + + alp ~ Beta(0.356, 0.02, ΞΌΟƒ = true) + #bet ~ Beta(0.993, 0.002, ΞΌΟƒ = true) + #gam ~ Normal(0.0085, 0.003) + #mst ~ Normal(1.0002, 0.007) + rho ~ Beta(0.129, 0.223, ΞΌΟƒ = true) + #psi ~ Beta(0.65, 0.05, ΞΌΟƒ = true) + #del ~ Beta(0.01, 0.005, ΞΌΟƒ = true) + #z_e_a ~ InverseGamma(0.035449, Inf, ΞΌΟƒ = true) + #z_e_m ~ InverseGamma(0.008862, Inf, ΞΌΟƒ = true) + + #alp = 0.356 + bet = 0.993 + gam = 0.0085 + mst = 1.0002 + #rho = 0.129 + psi = 0.65 + del = 0.01 + z_e_a = 0.035449 + z_e_m = 0.008862 + + # Log likehood function inputs - + # I did not manage to delegate the sampling to another function - Would it be possible to call it in with an include() command? + shock_distribution = Turing.Normal() + algorithm = :first_order + parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] + verbose::Bool = false + tol::AbstractFloat = eps() + filter = :filter_free + +# BEGINNING OF OBJECTIVE FUNCTION + + # draw intial conditions + x0 ~ Turing.filldist(shock_distribution,m.timings.nPast_not_future_and_mixed) # Initial conditions + + # draw errors + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks + + # reshape errors to vector + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # Checks + @assert length(observables) == size(data)[1] "Data columns and number of observables are not identical. Make sure the data contains only the selected observables." + @assert length(observables) <= m.timings.nExo "Cannot estimate model with more observables than exogenous shocks. Have at least as many shocks as observable variables." + + @ignore_derivatives sort!(observables) + @ignore_derivatives solve!(m, verbose = verbose) + + if isnothing(parameters) + parameters = m.parameter_values + else + ub = @ignore_derivatives fill(1e12 + rand(), length(m.parameters) + length(m.βž•_vars)) + lb = @ignore_derivatives - ub + + for (i,v) in enumerate(m.bounded_vars) + if v ∈ m.parameters + @ignore_derivatives lb[i] = m.lower_bounds[i] + @ignore_derivatives ub[i] = m.upper_bounds[i] + end + end + + if min(max(parameters,lb),ub) != parameters + return -Inf + end + end + + SS_and_pars, solution_error = m.SS_solve_func(parameters, m, verbose) + + if solution_error > tol || isnan(solution_error) + return -Inf + end + + NSSS_labels = @ignore_derivatives [sort(union(m.exo_present,m.var))...,m.calibration_equations_parameters...] + + obs_indices = @ignore_derivatives indexin(observables,NSSS_labels) + + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + observables_and_states = @ignore_derivatives sort(union(m.timings.past_not_future_and_mixed_idx,indexin(observables,sort(union(m.aux,m.var,m.exo_present))))) + + # solve DSGE with parameters + solution = get_solution(m, parameters, algorithm = algorithm) + + # store solution + if algorithm == :first_order + 𝐒₁ = solution[2] + else + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + end + + # Thore: we can probably skip this because it is computationally expensive and should drop out in sampling ... MΓ‘tyΓ‘s: We cannot as the initial condition bias for the erros is important + + # Option 1 - no initial condition sampling - biased errors but faster - no need to compute LR covariance matrix + # x0 = zeros(m.timings.nPast_not_future_and_mixed) + + # Option 2 - initial condition is sampled - unbiased errors - slow as LR covariance is needed. + calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = m.timings.past_not_future_and_mixed_idx) + long_run_covariance = calculate_covariance_(solution[2]) + initial_conditions =long_run_covariance * x0 # x0 + + # Declare states + state = zeros(typeof(Ο΅_draw[1]), m.timings.nVars, size(data, 2) ) + + # propagate the state space + if algorithm == :first_order + + aug_state = [initial_conditions + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + end + elseif algorithm == :second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + elseif algorithm == :pruned_second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + end + + # define data in deviations form SS + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + # compute observation predictions - without ME + state_deviations = data_in_deviations - state[obs_indices,:] + # make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-4*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) *10^2 + make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-8*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) + # make_sure_state_equals_observable = -sum(abs2,state_deviations) * 1e30 +# END OF OBJECTIVE FUNCTION + + Turing.@addlogprob! make_sure_state_equals_observable#calculate_filterfree_loglikelihood(model, data(observables), observables; parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m]) +end + +FS2000_filterfree = FS2000_filter_free_loglikelihood_function(data, FS2000, observables) + +n_samples = 1000 + +samps = sample(FS2000_filterfree, NUTS(), n_samples, progress = true)#, init_params = sol) + +symbol_to_int(s) = parse(Int, string(s)[9:end-1]) +Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:m.timings.nExo*size(data,2)], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) +tmp = Turing.describe(Ο΅_chain) +Ο΅_mean = tmp[1][:, 2] +Ο΅_mean = reshape(Ο΅_mean, m.timings.nExo, Integer(size(Ο΅_mean,1)/m.timings.nExo)) +Ο΅_std = tmp[1][:, 3] +Ο΅_std = reshape(Ο΅_std, m.timings.nExo, Integer(size(Ο΅_std,1)/m.timings.nExo)) + +sum(abs,Ο΅_mean[1,end-20:end]-collect(filtered_errors[1,end-20:end]))<10^-4 From f8bbbb74dc1e14ae178c9b441dedabce92316e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 17:09:51 +0200 Subject: [PATCH 66/83] Filter free estimation test using Monte Carlo The file is a test suite for filter-free DSGE estimation. It: -solves the RBC model using MacroModelling, -draws structural shocks from a t-distribution, and initial conditions -simulates the data, -Re-estimates the shocks using filter-free estimation. --- test/filter_free.jl | 169 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 test/filter_free.jl diff --git a/test/filter_free.jl b/test/filter_free.jl new file mode 100644 index 000000000..9dc7f9775 --- /dev/null +++ b/test/filter_free.jl @@ -0,0 +1,169 @@ + + +using MacroModelling +import Turing, StatsPlots , Plots, Random +import LinearAlgebra as β„’ + +@model RBC begin + K[0] = (1 - Ξ΄) * K[-1] + I[0] + Y[0] = Z[0] * K[-1]^Ξ± + Y[0] = C[0] + I[0] + 1 / C[0]^Ξ³ = Ξ² / C[1]^Ξ³ * (Ξ± * Y[1] / K[0] + (1 - Ξ΄)) + Z[0] = (1 - ρ) + ρ * Z[-1] + Οƒ * Ο΅[x] +end + + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.5 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end +solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) + +zsim = simulate(RBC) +zsim1 = hcat(zsim([:K,:Z],:,:)...) +zdata = β„’.reshape(zsim1,2,40) + +# z_rbc1 = hcat(zsim...) +# z_rbc1 = β„’.reshape(z_rbc1,size(RBC.var,1),40) + +# Simulate T observations from a random initial condition +m= RBC + +T = 20 +Random.seed!(12345) #Fix seed to reproduce data +Ο΅ = randn(T+1)' #Shocks are normal can be made anything e.g. student-t + +calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) +long_run_covariance = calculate_covariance_(solution[2]) + +Οƒ = 0.01 +Ξ± = 0.5 +Ξ² = 0.95 +ρ = 0.2 +Ξ΄ = 0.02 +Ξ³ = 1. + +SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) +Random.seed!(12345) #Fix seed to reproduce data +initial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions +initial_conditions = β„’.diag(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) +# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) +state = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) +state_predictions = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) + +aug_state = [initial_conditions +1 +0] + +𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) +state[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +state_predictions[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + +for t in 2:T+1 + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +end + +observables_index = sort(indexin([:K, :Z], m.timings.var)) +data = state[observables_index,2:end] + +aug_state = [initial_conditions +1 +0] +for t in 2:T+1 + aug_state = [state_predictions[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + 0] + state_predictions[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +end + +state_deviations = data[:,1:end] - state_predictions[observables_index,2:end] +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) +## + + +Turing.@model function loglikelihood_scaling_function(m, data, observables) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + #Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) + #Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + #Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) + #Ξ³ ~ Turing.Normal(1, 0.05) + Οƒ = 0.01 + Ξ± = 0.5 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. + + solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + if solution[end] != true + return Turing.@addlogprob! Inf + end + #initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions + + #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + long_run_covariance = calculate_covariance_(solution[2]) + #initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought + # SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) + initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + # Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)+1) + + # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions + + aug_state = [initial_conditions + 1 + zeros( m.timings.nExo)] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2)+1 + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t-1]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data[:,1:end] - state[observables_index,2:end] + #println(sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)] )) + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data,[:K,:Z]) + +n_samples = 300 +n_adapts = 50 +Ξ΄ = 0.65 +alg = Turing.NUTS(n_adapts,Ξ΄) + +samps = Turing.sample(loglikelihood_scaling, alg, n_samples, progress = true)#, init_params = sol) + + + +#Plot true and estimated latents to see how well we backed them out +noise = Ο΅[:,2:end] + +symbol_to_int(s) = parse(Int, string(s)[9:end-1]) +Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:20], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) +tmp = Turing.describe(Ο΅_chain) +Ο΅_mean = tmp[1][:, 2] +Ο΅_std = tmp[1][:, 3] +Plots.plot(Ο΅_mean[1:end], ribbon=1.96 * Ο΅_std[1:end], label="Posterior mean", title = "First-Order Joint: Estimated Latents") +Plots.plot!(noise', label="True values") From 6aa91aa70987b5a614e4eb3f34151cad21b9f935 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 17:33:50 +0200 Subject: [PATCH 67/83] Update filter_free.jl - Update Fixed simulation exercise --- test/filter_free.jl | 139 ++++++++++++++++++++++++++------------------ 1 file changed, 81 insertions(+), 58 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 9dc7f9775..1f6bd2b94 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -1,18 +1,14 @@ - - using MacroModelling import Turing, StatsPlots , Plots, Random import LinearAlgebra as β„’ @model RBC begin - K[0] = (1 - Ξ΄) * K[-1] + I[0] - Y[0] = Z[0] * K[-1]^Ξ± - Y[0] = C[0] + I[0] - 1 / C[0]^Ξ³ = Ξ² / C[1]^Ξ³ * (Ξ± * Y[1] / K[0] + (1 - Ξ΄)) - Z[0] = (1 - ρ) + ρ * Z[-1] + Οƒ * Ο΅[x] + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] end - @parameters RBC verbose = true begin Οƒ = 0.01 Ξ± = 0.5 @@ -23,72 +19,79 @@ end end solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) -zsim = simulate(RBC) -zsim1 = hcat(zsim([:K,:Z],:,:)...) -zdata = β„’.reshape(zsim1,2,40) - -# z_rbc1 = hcat(zsim...) -# z_rbc1 = β„’.reshape(z_rbc1,size(RBC.var,1),40) - -# Simulate T observations from a random initial condition -m= RBC +# draw from t scaled by approximate invariant variance) for the initial condition +m =RBC +calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) +long_run_covariance = calculate_covariance_(solution[2]) -T = 20 +T =20 +ddof = 4 +shockdist = Turing.TDist(ddof) #Shocks are student-t Random.seed!(12345) #Fix seed to reproduce data -Ο΅ = randn(T+1)' #Shocks are normal can be made anything e.g. student-t +initial_conditions = long_run_covariance * rand(shockdist,m.timings.nPast_not_future_and_mixed) +#nitial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions +#initial_conditions = β„’.diag.(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) +# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) -calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) -long_run_covariance = calculate_covariance_(solution[2]) +Random.seed!(12345) #Fix seed to reproduce data +# Generate noise sequence +noiseshocks = rand(shockdist,T) +noise = Matrix(noiseshocks') # the Ο΅ shocks are "noise" in DifferenceEquations for SciML compatibility -Οƒ = 0.01 -Ξ± = 0.5 -Ξ² = 0.95 -ρ = 0.2 -Ξ΄ = 0.02 -Ξ³ = 1. +#Ο΅ = [-0.369555723973723 0.47827032464044467 0.2567178329209457 -1.1127581634083954 1.779713752762057 -1.3694068387087652 0.4598600006094857 0.1319461357213755 0.21210992474923543 0.37965007742056217 -0.36234330914698276 0.04507575971259013 0.2562242956767027 -1.4425668844506196 -0.2559534237970267 -0.40742710317783837 1.5578503125015226 0.05971261026086091 -0.5590041386255554 -0.1841854411460526] +Ο΅ = noise -SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) -Random.seed!(12345) #Fix seed to reproduce data -initial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions -initial_conditions = β„’.diag(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) -# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) +# Initialize states state = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) state_predictions = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) aug_state = [initial_conditions 1 -0] +Ο΅[:,1]] + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) state[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 state_predictions[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 -for t in 2:T+1 +for t in 2:T aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] 1 Ο΅[:,t]] state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end -observables_index = sort(indexin([:K, :Z], m.timings.var)) -data = state[observables_index,2:end] +observables_index = sort(indexin([:k, :z], m.timings.var)) +data_sim = state[observables_index,1:end] aug_state = [initial_conditions 1 0] -for t in 2:T+1 +for t in 2:T aug_state = [state_predictions[m.timings.past_not_future_and_mixed_idx,t-1] 1 0] state_predictions[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end -state_deviations = data[:,1:end] - state_predictions[observables_index,2:end] -sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) -## +state_deviations = data_sim[:,1:end] - state_predictions[observables_index,1:end] +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data_sim, 2)]) + +dataFV = [-0.02581238618841974 -0.024755946984579915 -0.0007518239655738511 -0.02582984321259188 -0.04567755888428696 0.021196857503906794 -0.0772465811707222 -0.008386388700111276 -0.02347363396607608 -0.033743271643453004 -0.04771401523417986 -0.0723137820802147 -0.052024995108031956 -0.04914479042856236 -0.0628064692912924 0.026322291179482583 0.05836273680164356 0.08777750705366681 -0.006357303764844118 -0.027859850762631953 0.0036979646377400615; -9.300233770305984e-6 0.0036936971929831686 -0.004043963807807812 -0.0033759710907710194 0.010452387415929751 -0.01570666004443462 0.010552736378200728 -0.0024880527304547108 -0.0018170719033046975 -0.002484513628153294 -0.004293403499836281 0.002764752391502571 0.00010219288117461296 -0.0025418043805321045 0.013917307968399776 0.005342995831650222 0.005142870198108429 -0.014549929085393539 -0.003507111919687318 0.0048886190023180905 0.0028195782119241446] +state_deviations_FV = dataFV[:,1:end] - state_predictions[observables_index,1:end] + +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations_FV,1)))), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) + +sum([Turing.logpdf(Turing.MvNormal(zeros(size(data_sim)[1]),Matrix(0.0000001*β„’.I, size(data_sim)[1], size(data_sim)[1])), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) -Turing.@model function loglikelihood_scaling_function(m, data, observables) + +Plots.plot(data_sim[:,1:end]') +Plots.plot!(dataFV[:,2:end]') + + + +Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) #Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) #Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) @@ -96,6 +99,9 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables) #Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) #Ξ³ ~ Turing.Normal(1, 0.05) Οƒ = 0.01 + #Ξ± ~ Turing.Uniform(0.2, 0.8) + #Ξ² ~ Turing.Uniform(0.5, 0.99) + Ξ± = 0.5 Ξ² = 0.95 ρ = 0.2 @@ -106,47 +112,60 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables) if solution[end] != true return Turing.@addlogprob! Inf end - #initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions - - #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) long_run_covariance = calculate_covariance_(solution[2]) - #initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought - # SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) - initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions + + x0 ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + initial_conditions = long_run_covariance * x0 + + #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + #calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + # long_run_covariance = calculate_covariance_(solution[2]) + # initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought + #SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) + # initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - # Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + #Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) - state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)+1) + state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)) # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions aug_state = [initial_conditions 1 - zeros( m.timings.nExo)] + Ο΅[:,1]] state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 - for t in 2:size(data, 2)+1 + for t in 2:size(data, 2) aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] 1 - Ο΅[:,t-1]] + Ο΅[:,t]] state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end observables_index = sort(indexin(observables, m.timings.var)) - state_deviations = data[:,1:end] - state[observables_index,2:end] - #println(sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)] )) + state_deviations = data[:,1:end] - state[observables_index,1:end] + + # println(sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)])) + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) + - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) end -loglikelihood_scaling = loglikelihood_scaling_function(RBC, data,[:K,:Z]) + +Ξ© = 0.0001 +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data_sim,[:k,:z], Ξ©) n_samples = 300 n_adapts = 50 @@ -155,10 +174,13 @@ alg = Turing.NUTS(n_adapts,Ξ΄) samps = Turing.sample(loglikelihood_scaling, alg, n_samples, progress = true)#, init_params = sol) +Plots.plot(samps[["x0[1]"]]; colordim=:parameter, legend=true) + +#Plots.plot(samps[["Ξ²"]]; colordim=:parameter, legend=true) +#Plots.plot(samps[["Ξ±"]]; colordim=:parameter, legend=true) #Plot true and estimated latents to see how well we backed them out -noise = Ο΅[:,2:end] symbol_to_int(s) = parse(Int, string(s)[9:end-1]) Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:20], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) @@ -167,3 +189,4 @@ tmp = Turing.describe(Ο΅_chain) Ο΅_std = tmp[1][:, 3] Plots.plot(Ο΅_mean[1:end], ribbon=1.96 * Ο΅_std[1:end], label="Posterior mean", title = "First-Order Joint: Estimated Latents") Plots.plot!(noise', label="True values") + From bab2c724e281dbb6851e210272e6820e7d0e5aad Mon Sep 17 00:00:00 2001 From: thorek1 Date: Fri, 28 Apr 2023 14:05:31 +0200 Subject: [PATCH 68/83] solution works, need to set up filter --- Project.toml | 6 ++ src/MacroModelling.jl | 9 +- src/get_functions.jl | 4 + test/sampler_testing_for_higher_order.jl | 129 +++++++++++++++++++++++ 4 files changed, 145 insertions(+), 3 deletions(-) create mode 100644 test/sampler_testing_for_higher_order.jl diff --git a/Project.toml b/Project.toml index 5791e8e29..ab8870dab 100644 --- a/Project.toml +++ b/Project.toml @@ -11,14 +11,17 @@ ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" DynarePreprocessor_jll = "23afba7c-24e5-5ee2-bc2c-b42e07f0492a" +FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" ImplicitDifferentiation = "57b37032-215b-411a-8a7c-41a003a55207" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" LaTeXStrings = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f" +LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" RecursiveFactorization = "f2c3362d-daeb-58d1-803e-2bc74f2840b4" @@ -28,9 +31,12 @@ RuntimeGeneratedFunctions = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" Subscripts = "2b7f82d5-8785-4f63-971e-f18ddbeb808e" SymPy = "24249f21-da20-56a4-8eb1-6a02cf4ae2e6" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" +Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [compat] AxisKeys = "^0.2" diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index c08b79017..2e2e78c8a 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -74,8 +74,8 @@ export write_mod_file, write_dynare_file, write_to_dynare_file, write_to_dynare, export irf, girf # Remove comment for debugging -# export riccati_forward, block_solver, remove_redundant_SS_vars!, write_parameters_input!, parse_variables_input_to_index, undo_transformer , transformer, SSS_third_order_parameter_derivatives, SSS_second_order_parameter_derivatives, calculate_third_order_stochastic_steady_state, calculate_second_order_stochastic_steady_state, filter_and_smooth -# export create_symbols_eqs!, solve_steady_state!, write_functions_mapping!, solve!, parse_algorithm_to_state_update, block_solver, block_solver_AD, calculate_covariance, calculate_jacobian, calculate_first_order_solution, expand_steady_state, calculate_quadratic_iteration_solution, calculate_linear_time_iteration_solution, get_symbols, calculate_covariance_AD, parse_shocks_input_to_index +export riccati_forward, block_solver, remove_redundant_SS_vars!, write_parameters_input!, parse_variables_input_to_index, undo_transformer , transformer, SSS_third_order_parameter_derivatives, SSS_second_order_parameter_derivatives, calculate_third_order_stochastic_steady_state, calculate_second_order_stochastic_steady_state, filter_and_smooth +export create_symbols_eqs!, solve_steady_state!, write_functions_mapping!, solve!, parse_algorithm_to_state_update, block_solver, block_solver_AD, calculate_covariance, calculate_jacobian, calculate_first_order_solution, expand_steady_state, calculate_quadratic_iteration_solution, calculate_linear_time_iteration_solution, get_symbols, calculate_covariance_AD, parse_shocks_input_to_index # levenberg_marquardt @@ -1514,7 +1514,6 @@ function solve!(𝓂::β„³; any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) - # @assert solution_error < eps() "Could not find non stochastic steady steady." βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) @@ -2520,6 +2519,10 @@ function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive return hcat(A, zeros(size(A,1),T.nExo)), solved end + if !success + return hcat(A, zeros(T.nVars,T.nExo)), success + end + Jm = @view(β„’.diagm(ones(S,T.nVars))[T.past_not_future_and_mixed_idx,:]) βˆ‡β‚Š = @views βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] * β„’.diagm(ones(S,T.nVars))[T.future_not_past_and_mixed_idx,:] diff --git a/src/get_functions.jl b/src/get_functions.jl index 7307a6c87..2082d1542 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -1255,6 +1255,10 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, true elseif algorithm == :third_order + if !success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.timings.nVars,2), spzeros(𝓂.timings.nVars,2), success + end + βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) diff --git a/test/sampler_testing_for_higher_order.jl b/test/sampler_testing_for_higher_order.jl new file mode 100644 index 000000000..21261bf6c --- /dev/null +++ b/test/sampler_testing_for_higher_order.jl @@ -0,0 +1,129 @@ +using MacroModelling +import Turing, StatsPlots + +@model RBC begin + K[0] = (1 - Ξ΄) * K[-1] + I[0] + Y[0] = Z[0] * K[-1]^Ξ± + Y[0] = C[0] + I[0] + 1 / C[0]^Ξ³ = Ξ² / C[1]^Ξ³ * (Ξ± * Y[1] / K[0] + (1 - Ξ΄)) + Z[0] = (1 - ρ) + ρ * Z[-1] + Οƒ * Ο΅[x] +end + + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.5 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1 +end + +get_SS(RBC) + +# plot_irf(RBC) + +get_solution(RBC) + + + +Turing.@model function loglikelihood_function(m) + Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) + Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) + Ξ³ ~ Turing.Normal(1, 0.05) + + Turing.@addlogprob! sum(get_solution(m,[Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³])[2]) / 1e8 +end + +# using LinearAlgebra + +# Z₁₁ = randn(10,10) +# Ẑ₁₁ = svd(Z₁₁) +# Ẑ₁₁ |>inv + +# Ẑ₁₁.S .|> inv +# Ẑ₁₁.Vt |> inv + +# (Ẑ₁₁.U * inv(diagm(Ẑ₁₁.S)) * Ẑ₁₁.Vt)' +# inv(Z₁₁) + +# Z₂₁ = randn(10,10) + +# D = Z₂₁ / Ẑ₁₁ +# D = Z₂₁ / Z₁₁ + + + +loglikelihood = loglikelihood_function(RBC) + + +n_samples = 10 + +Turing.setadbackend(:forwarddiff) + +# using Zygote +# Turing.setadbackend(:zygote) +samps = Turing.sample(loglikelihood, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) + + + + +Turing.@model function loglikelihood_second_order_function(m) + Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) + Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) + Ξ³ ~ Turing.Normal(1, 0.05) + soll = get_solution(m,[Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + println(soll[end]) + Turing.@addlogprob! sum(soll[3]) / 1e6 +end + + +loglikelihood_second_order = loglikelihood_second_order_function(RBC) + +samps = Turing.sample(loglikelihood_second_order, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) + + + + + +Turing.@model function loglikelihood_scaling_function(m, data) + Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) + Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) + Ξ³ ~ Turing.Normal(1, 0.05) + + initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nVars) # Initial conditions + + solution = get_solution(m,[Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + + if !solution[end] + return Turing.@addlogprob! Inf + end + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + x_iv = long_run_covariance * initial_conditions #scale initial condition with ergodic variance + + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Turing.@addlogprob! sum(x_iv) / 1e6 +end + +data = randn(1,10) + + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data) + +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) + + From e9fcd6aa629c5bcd3505dcc57d7b205424ea6bd0 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Fri, 28 Apr 2023 17:28:06 +0200 Subject: [PATCH 69/83] skip aqua tests --- test/runtests.jl | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 94f67dc65..a6ea6f2e3 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -3,23 +3,23 @@ using MacroModelling using Random using AxisKeys, SparseArrays import StatsPlots, Turing # has to come before Aqua, otherwise exports are not recognised -using Aqua +# using Aqua # using JuliaFormatter include("functionality_tests.jl") -@testset verbose = true "Code quality (Aqua.jl)" begin - # Aqua.test_all(MacroModelling) - @testset "Compare Project.toml and test/Project.toml" Aqua.test_project_extras(MacroModelling) - @testset "Project.toml formatting" Aqua.test_project_toml_formatting(MacroModelling) - @testset "Stale dependencies" Aqua.test_stale_deps(MacroModelling) - @testset "Unbound type parameters" Aqua.test_unbound_args(MacroModelling) - @testset "Undefined exports" Aqua.test_undefined_exports(MacroModelling) - @testset "Piracy" Aqua.test_piracy(MacroModelling) - @testset "Method ambiguity" Aqua.test_ambiguities(MacroModelling, recursive = false) -end -GC.gc() +# @testset verbose = true "Code quality (Aqua.jl)" begin +# # Aqua.test_all(MacroModelling) +# @testset "Compare Project.toml and test/Project.toml" Aqua.test_project_extras(MacroModelling) +# @testset "Project.toml formatting" Aqua.test_project_toml_formatting(MacroModelling) +# @testset "Stale dependencies" Aqua.test_stale_deps(MacroModelling) +# @testset "Unbound type parameters" Aqua.test_unbound_args(MacroModelling) +# @testset "Undefined exports" Aqua.test_undefined_exports(MacroModelling) +# @testset "Piracy" Aqua.test_piracy(MacroModelling) +# @testset "Method ambiguity" Aqua.test_ambiguities(MacroModelling, recursive = false) +# end +# GC.gc() @testset verbose = true "FS2000" begin include("models/FS2000.jl") From e597da604e2af00f1284bc98cc91391cab9540c1 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Fri, 28 Apr 2023 19:11:29 +0200 Subject: [PATCH 70/83] runs --- test/sampler_testing_for_higher_order.jl | 75 ++++++++++++++++++++---- 1 file changed, 64 insertions(+), 11 deletions(-) diff --git a/test/sampler_testing_for_higher_order.jl b/test/sampler_testing_for_higher_order.jl index 21261bf6c..61add6b5d 100644 --- a/test/sampler_testing_for_higher_order.jl +++ b/test/sampler_testing_for_higher_order.jl @@ -1,5 +1,6 @@ using MacroModelling import Turing, StatsPlots +import LinearAlgebra as β„’ @model RBC begin K[0] = (1 - Ξ΄) * K[-1] + I[0] @@ -91,8 +92,9 @@ samps = Turing.sample(loglikelihood_second_order, Turing.NUTS(), n_samples, prog +solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) -Turing.@model function loglikelihood_scaling_function(m, data) +Turing.@model function loglikelihood_scaling_function(m, data, observables) Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) @@ -100,30 +102,81 @@ Turing.@model function loglikelihood_scaling_function(m, data) Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) Ξ³ ~ Turing.Normal(1, 0.05) - initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nVars) # Initial conditions + initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions - solution = get_solution(m,[Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) - if !solution[end] + if solution[end] != true return Turing.@addlogprob! Inf end - calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - long_run_covariance = calculate_covariance_(solution[2]) + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - x_iv = long_run_covariance * initial_conditions #scale initial condition with ergodic variance + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) - Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)) + + # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t-1]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end - Turing.@addlogprob! sum(x_iv) / 1e6 + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data[:,2:end] - state[observables_index,2:end] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)-1]) end -data = randn(1,10) +data=[ 0.062638 0.053282 0.00118333 0.442814 0.300381 0.150443 0.228132 0.382626 -0.0122483 0.0848671 0.0196158 0.197779 0.782655 0.751345 0.911694 0.754197 0.493297 0.0265917 0.209705 0.0876804; +-0.0979824 0.0126432 -0.12628 0.161212 -0.109357 0.120232 0.0316766 0.0678017 -0.0371438 -0.162375 0.0574594 -0.0564989 -0.18021 0.0749526 0.132553 -0.135002 -0.0143846 -0.0770139 -0.0295755 -0.0943254] + + + +# AA = spzeros(10,10) +# AA[1:3,5:7] .= 1 + +# AA * Real[rand(10)...] -loglikelihood_scaling = loglikelihood_scaling_function(RBC, data) + +n_samples = 100 + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data,[:K,:Z]) samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) +# m = RBC + + + +# solution = get_solution(m, m.parameter_values, algorithm = :second_order) + +# 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + +# t=2 + + +# state = zeros(Real,m.timings.nVars, size(data, 2)+1) +# Ο΅ = zeros( m.timings.nExo, size(data, 2)) +# aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] +# 1 +# Ο΅[:,t-1]] +# state[:,t] = 𝐒₁ * aug_state + solution[3] * Real[β„’.kron(aug_state, aug_state)...] / 2 + + +# observables = :K +# solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) +# solution[3] \ No newline at end of file From 11b51b5f6d978088f396b5e800fc3e3670abf34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 16:56:40 +0200 Subject: [PATCH 71/83] Add files via upload Implementation of the filter-free DSGE estimation in MacroModelling.jl --- test/test_filterfree.jl | 214 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 test/test_filterfree.jl diff --git a/test/test_filterfree.jl b/test/test_filterfree.jl new file mode 100644 index 000000000..87589d650 --- /dev/null +++ b/test/test_filterfree.jl @@ -0,0 +1,214 @@ +using MacroModelling +import Turing +import Turing: NUTS, sample, logpdf +# import Optim, LineSearches, Plots +using Random, CSV, DataFrames, MCMCChains, AxisKeys +import DynamicPPL: logjoint +import LinearAlgebra as β„’ +import ChainRulesCore: @ignore_derivatives, ignore_derivatives + +cd("C:/Users/fm007/Documents/GitHub/MacroModelling.jl/test") +include("models/FS2000.jl") + +FS2000 = m + +# load data +dat = CSV.read("data/FS2000_data.csv", DataFrame) +data = KeyedArray(Array(dat)',Variable = Symbol.("log_".*names(dat)),Time = 1:size(dat)[1]) +data = log.(data) + +# declare observables +observables = sort(Symbol.("log_".*names(dat))) + +# subset observables in data +data = data(observables,:) + +# declare parameters +alp = 0.356 +bet = 0.993 +gam = 0.0085 +mst = 1.0002 +rho = 0.129 +psi = 0.65 +del = 0.01 +z_e_a = 0.035449 +z_e_m = 0.008862 +parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] +# filter data with parameters +filtered_errors = MacroModelling.get_estimated_shocks(FS2000, data; parameters= parameters) # filtered_states = get_estimated_variables(FS2000, data; parameters= parameters) + +# Define DSGE Turing model +Turing.@model function FS2000_filter_free_loglikelihood_function(data, model, observables) + + alp ~ Beta(0.356, 0.02, ΞΌΟƒ = true) + #bet ~ Beta(0.993, 0.002, ΞΌΟƒ = true) + #gam ~ Normal(0.0085, 0.003) + #mst ~ Normal(1.0002, 0.007) + rho ~ Beta(0.129, 0.223, ΞΌΟƒ = true) + #psi ~ Beta(0.65, 0.05, ΞΌΟƒ = true) + #del ~ Beta(0.01, 0.005, ΞΌΟƒ = true) + #z_e_a ~ InverseGamma(0.035449, Inf, ΞΌΟƒ = true) + #z_e_m ~ InverseGamma(0.008862, Inf, ΞΌΟƒ = true) + + #alp = 0.356 + bet = 0.993 + gam = 0.0085 + mst = 1.0002 + #rho = 0.129 + psi = 0.65 + del = 0.01 + z_e_a = 0.035449 + z_e_m = 0.008862 + + # Log likehood function inputs - + # I did not manage to delegate the sampling to another function - Would it be possible to call it in with an include() command? + shock_distribution = Turing.Normal() + algorithm = :first_order + parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m] + verbose::Bool = false + tol::AbstractFloat = eps() + filter = :filter_free + +# BEGINNING OF OBJECTIVE FUNCTION + + # draw intial conditions + x0 ~ Turing.filldist(shock_distribution,m.timings.nPast_not_future_and_mixed) # Initial conditions + + # draw errors + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks + + # reshape errors to vector + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # Checks + @assert length(observables) == size(data)[1] "Data columns and number of observables are not identical. Make sure the data contains only the selected observables." + @assert length(observables) <= m.timings.nExo "Cannot estimate model with more observables than exogenous shocks. Have at least as many shocks as observable variables." + + @ignore_derivatives sort!(observables) + @ignore_derivatives solve!(m, verbose = verbose) + + if isnothing(parameters) + parameters = m.parameter_values + else + ub = @ignore_derivatives fill(1e12 + rand(), length(m.parameters) + length(m.βž•_vars)) + lb = @ignore_derivatives - ub + + for (i,v) in enumerate(m.bounded_vars) + if v ∈ m.parameters + @ignore_derivatives lb[i] = m.lower_bounds[i] + @ignore_derivatives ub[i] = m.upper_bounds[i] + end + end + + if min(max(parameters,lb),ub) != parameters + return -Inf + end + end + + SS_and_pars, solution_error = m.SS_solve_func(parameters, m, verbose) + + if solution_error > tol || isnan(solution_error) + return -Inf + end + + NSSS_labels = @ignore_derivatives [sort(union(m.exo_present,m.var))...,m.calibration_equations_parameters...] + + obs_indices = @ignore_derivatives indexin(observables,NSSS_labels) + + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + observables_and_states = @ignore_derivatives sort(union(m.timings.past_not_future_and_mixed_idx,indexin(observables,sort(union(m.aux,m.var,m.exo_present))))) + + # solve DSGE with parameters + solution = get_solution(m, parameters, algorithm = algorithm) + + # store solution + if algorithm == :first_order + 𝐒₁ = solution[2] + else + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + end + + # Thore: we can probably skip this because it is computationally expensive and should drop out in sampling ... MΓ‘tyΓ‘s: We cannot as the initial condition bias for the erros is important + + # Option 1 - no initial condition sampling - biased errors but faster - no need to compute LR covariance matrix + # x0 = zeros(m.timings.nPast_not_future_and_mixed) + + # Option 2 - initial condition is sampled - unbiased errors - slow as LR covariance is needed. + calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = m.timings.past_not_future_and_mixed_idx) + long_run_covariance = calculate_covariance_(solution[2]) + initial_conditions =long_run_covariance * x0 # x0 + + # Declare states + state = zeros(typeof(Ο΅_draw[1]), m.timings.nVars, size(data, 2) ) + + # propagate the state space + if algorithm == :first_order + + aug_state = [initial_conditions + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + end + elseif algorithm == :second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + elseif algorithm == :pruned_second_order + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + end + + # define data in deviations form SS + data_in_deviations = collect(data(observables)) .- SS_and_pars[obs_indices] + + # compute observation predictions - without ME + state_deviations = data_in_deviations - state[obs_indices,:] + # make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-4*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) *10^2 + make_sure_state_equals_observable = sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(1e-8*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) + # make_sure_state_equals_observable = -sum(abs2,state_deviations) * 1e30 +# END OF OBJECTIVE FUNCTION + + Turing.@addlogprob! make_sure_state_equals_observable#calculate_filterfree_loglikelihood(model, data(observables), observables; parameters = [alp, bet, gam, mst, rho, psi, del, z_e_a, z_e_m]) +end + +FS2000_filterfree = FS2000_filter_free_loglikelihood_function(data, FS2000, observables) + +n_samples = 1000 + +samps = sample(FS2000_filterfree, NUTS(), n_samples, progress = true)#, init_params = sol) + +symbol_to_int(s) = parse(Int, string(s)[9:end-1]) +Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:m.timings.nExo*size(data,2)], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) +tmp = Turing.describe(Ο΅_chain) +Ο΅_mean = tmp[1][:, 2] +Ο΅_mean = reshape(Ο΅_mean, m.timings.nExo, Integer(size(Ο΅_mean,1)/m.timings.nExo)) +Ο΅_std = tmp[1][:, 3] +Ο΅_std = reshape(Ο΅_std, m.timings.nExo, Integer(size(Ο΅_std,1)/m.timings.nExo)) + +sum(abs,Ο΅_mean[1,end-20:end]-collect(filtered_errors[1,end-20:end]))<10^-4 From 55bdb24c6f59cbc1988cfa248d394f2cfbd01903 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 17:09:51 +0200 Subject: [PATCH 72/83] Filter free estimation test using Monte Carlo The file is a test suite for filter-free DSGE estimation. It: -solves the RBC model using MacroModelling, -draws structural shocks from a t-distribution, and initial conditions -simulates the data, -Re-estimates the shocks using filter-free estimation. --- test/filter_free.jl | 169 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 test/filter_free.jl diff --git a/test/filter_free.jl b/test/filter_free.jl new file mode 100644 index 000000000..9dc7f9775 --- /dev/null +++ b/test/filter_free.jl @@ -0,0 +1,169 @@ + + +using MacroModelling +import Turing, StatsPlots , Plots, Random +import LinearAlgebra as β„’ + +@model RBC begin + K[0] = (1 - Ξ΄) * K[-1] + I[0] + Y[0] = Z[0] * K[-1]^Ξ± + Y[0] = C[0] + I[0] + 1 / C[0]^Ξ³ = Ξ² / C[1]^Ξ³ * (Ξ± * Y[1] / K[0] + (1 - Ξ΄)) + Z[0] = (1 - ρ) + ρ * Z[-1] + Οƒ * Ο΅[x] +end + + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.5 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end +solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) + +zsim = simulate(RBC) +zsim1 = hcat(zsim([:K,:Z],:,:)...) +zdata = β„’.reshape(zsim1,2,40) + +# z_rbc1 = hcat(zsim...) +# z_rbc1 = β„’.reshape(z_rbc1,size(RBC.var,1),40) + +# Simulate T observations from a random initial condition +m= RBC + +T = 20 +Random.seed!(12345) #Fix seed to reproduce data +Ο΅ = randn(T+1)' #Shocks are normal can be made anything e.g. student-t + +calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) +long_run_covariance = calculate_covariance_(solution[2]) + +Οƒ = 0.01 +Ξ± = 0.5 +Ξ² = 0.95 +ρ = 0.2 +Ξ΄ = 0.02 +Ξ³ = 1. + +SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) +Random.seed!(12345) #Fix seed to reproduce data +initial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions +initial_conditions = β„’.diag(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) +# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) +state = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) +state_predictions = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) + +aug_state = [initial_conditions +1 +0] + +𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) +state[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +state_predictions[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + +for t in 2:T+1 + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +end + +observables_index = sort(indexin([:K, :Z], m.timings.var)) +data = state[observables_index,2:end] + +aug_state = [initial_conditions +1 +0] +for t in 2:T+1 + aug_state = [state_predictions[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + 0] + state_predictions[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 +end + +state_deviations = data[:,1:end] - state_predictions[observables_index,2:end] +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) +## + + +Turing.@model function loglikelihood_scaling_function(m, data, observables) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + #Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) + #Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + #Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) + #Ξ³ ~ Turing.Normal(1, 0.05) + Οƒ = 0.01 + Ξ± = 0.5 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. + + solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + if solution[end] != true + return Turing.@addlogprob! Inf + end + #initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions + + #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + long_run_covariance = calculate_covariance_(solution[2]) + #initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought + # SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) + initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + # Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)+1) + + # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions + + aug_state = [initial_conditions + 1 + zeros( m.timings.nExo)] + state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2)+1 + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t-1]] + state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data[:,1:end] - state[observables_index,2:end] + #println(sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)] )) + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data,[:K,:Z]) + +n_samples = 300 +n_adapts = 50 +Ξ΄ = 0.65 +alg = Turing.NUTS(n_adapts,Ξ΄) + +samps = Turing.sample(loglikelihood_scaling, alg, n_samples, progress = true)#, init_params = sol) + + + +#Plot true and estimated latents to see how well we backed them out +noise = Ο΅[:,2:end] + +symbol_to_int(s) = parse(Int, string(s)[9:end-1]) +Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:20], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) +tmp = Turing.describe(Ο΅_chain) +Ο΅_mean = tmp[1][:, 2] +Ο΅_std = tmp[1][:, 3] +Plots.plot(Ο΅_mean[1:end], ribbon=1.96 * Ο΅_std[1:end], label="Posterior mean", title = "First-Order Joint: Estimated Latents") +Plots.plot!(noise', label="True values") From 2d5252e59a012a41facda2d5de8f82209c1c169c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Thu, 25 May 2023 17:33:50 +0200 Subject: [PATCH 73/83] Update filter_free.jl - Update Fixed simulation exercise --- test/filter_free.jl | 139 ++++++++++++++++++++++++++------------------ 1 file changed, 81 insertions(+), 58 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 9dc7f9775..1f6bd2b94 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -1,18 +1,14 @@ - - using MacroModelling import Turing, StatsPlots , Plots, Random import LinearAlgebra as β„’ @model RBC begin - K[0] = (1 - Ξ΄) * K[-1] + I[0] - Y[0] = Z[0] * K[-1]^Ξ± - Y[0] = C[0] + I[0] - 1 / C[0]^Ξ³ = Ξ² / C[1]^Ξ³ * (Ξ± * Y[1] / K[0] + (1 - Ξ΄)) - Z[0] = (1 - ρ) + ρ * Z[-1] + Οƒ * Ο΅[x] + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] end - @parameters RBC verbose = true begin Οƒ = 0.01 Ξ± = 0.5 @@ -23,72 +19,79 @@ end end solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) -zsim = simulate(RBC) -zsim1 = hcat(zsim([:K,:Z],:,:)...) -zdata = β„’.reshape(zsim1,2,40) - -# z_rbc1 = hcat(zsim...) -# z_rbc1 = β„’.reshape(z_rbc1,size(RBC.var,1),40) - -# Simulate T observations from a random initial condition -m= RBC +# draw from t scaled by approximate invariant variance) for the initial condition +m =RBC +calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) +long_run_covariance = calculate_covariance_(solution[2]) -T = 20 +T =20 +ddof = 4 +shockdist = Turing.TDist(ddof) #Shocks are student-t Random.seed!(12345) #Fix seed to reproduce data -Ο΅ = randn(T+1)' #Shocks are normal can be made anything e.g. student-t +initial_conditions = long_run_covariance * rand(shockdist,m.timings.nPast_not_future_and_mixed) +#nitial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions +#initial_conditions = β„’.diag.(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) +# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) -calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) -long_run_covariance = calculate_covariance_(solution[2]) +Random.seed!(12345) #Fix seed to reproduce data +# Generate noise sequence +noiseshocks = rand(shockdist,T) +noise = Matrix(noiseshocks') # the Ο΅ shocks are "noise" in DifferenceEquations for SciML compatibility -Οƒ = 0.01 -Ξ± = 0.5 -Ξ² = 0.95 -ρ = 0.2 -Ξ΄ = 0.02 -Ξ³ = 1. +#Ο΅ = [-0.369555723973723 0.47827032464044467 0.2567178329209457 -1.1127581634083954 1.779713752762057 -1.3694068387087652 0.4598600006094857 0.1319461357213755 0.21210992474923543 0.37965007742056217 -0.36234330914698276 0.04507575971259013 0.2562242956767027 -1.4425668844506196 -0.2559534237970267 -0.40742710317783837 1.5578503125015226 0.05971261026086091 -0.5590041386255554 -0.1841854411460526] +Ο΅ = noise -SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) -Random.seed!(12345) #Fix seed to reproduce data -initial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions -initial_conditions = β„’.diag(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) -# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) +# Initialize states state = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) state_predictions = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) aug_state = [initial_conditions 1 -0] +Ο΅[:,1]] + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) state[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 state_predictions[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 -for t in 2:T+1 +for t in 2:T aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] 1 Ο΅[:,t]] state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end -observables_index = sort(indexin([:K, :Z], m.timings.var)) -data = state[observables_index,2:end] +observables_index = sort(indexin([:k, :z], m.timings.var)) +data_sim = state[observables_index,1:end] aug_state = [initial_conditions 1 0] -for t in 2:T+1 +for t in 2:T aug_state = [state_predictions[m.timings.past_not_future_and_mixed_idx,t-1] 1 0] state_predictions[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end -state_deviations = data[:,1:end] - state_predictions[observables_index,2:end] -sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) -## +state_deviations = data_sim[:,1:end] - state_predictions[observables_index,1:end] +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data_sim, 2)]) + +dataFV = [-0.02581238618841974 -0.024755946984579915 -0.0007518239655738511 -0.02582984321259188 -0.04567755888428696 0.021196857503906794 -0.0772465811707222 -0.008386388700111276 -0.02347363396607608 -0.033743271643453004 -0.04771401523417986 -0.0723137820802147 -0.052024995108031956 -0.04914479042856236 -0.0628064692912924 0.026322291179482583 0.05836273680164356 0.08777750705366681 -0.006357303764844118 -0.027859850762631953 0.0036979646377400615; -9.300233770305984e-6 0.0036936971929831686 -0.004043963807807812 -0.0033759710907710194 0.010452387415929751 -0.01570666004443462 0.010552736378200728 -0.0024880527304547108 -0.0018170719033046975 -0.002484513628153294 -0.004293403499836281 0.002764752391502571 0.00010219288117461296 -0.0025418043805321045 0.013917307968399776 0.005342995831650222 0.005142870198108429 -0.014549929085393539 -0.003507111919687318 0.0048886190023180905 0.0028195782119241446] +state_deviations_FV = dataFV[:,1:end] - state_predictions[observables_index,1:end] + +sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations_FV,1)))), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) + +sum([Turing.logpdf(Turing.MvNormal(zeros(size(data_sim)[1]),Matrix(0.0000001*β„’.I, size(data_sim)[1], size(data_sim)[1])), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) -Turing.@model function loglikelihood_scaling_function(m, data, observables) + +Plots.plot(data_sim[:,1:end]') +Plots.plot!(dataFV[:,2:end]') + + + +Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) #Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) #Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) @@ -96,6 +99,9 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables) #Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) #Ξ³ ~ Turing.Normal(1, 0.05) Οƒ = 0.01 + #Ξ± ~ Turing.Uniform(0.2, 0.8) + #Ξ² ~ Turing.Uniform(0.5, 0.99) + Ξ± = 0.5 Ξ² = 0.95 ρ = 0.2 @@ -106,47 +112,60 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables) if solution[end] != true return Turing.@addlogprob! Inf end - #initial_conditions ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions - - #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) long_run_covariance = calculate_covariance_(solution[2]) - #initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought - # SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) - initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions + + x0 ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + initial_conditions = long_run_covariance * x0 + + #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks + #calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + # long_run_covariance = calculate_covariance_(solution[2]) + # initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought + #SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) + # initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - # Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! + Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + #Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) - state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)+1) + state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)) # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions aug_state = [initial_conditions 1 - zeros( m.timings.nExo)] + Ο΅[:,1]] state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 - for t in 2:size(data, 2)+1 + for t in 2:size(data, 2) aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] 1 - Ο΅[:,t-1]] + Ο΅[:,t]] state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 end observables_index = sort(indexin(observables, m.timings.var)) - state_deviations = data[:,1:end] - state[observables_index,2:end] - #println(sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)] )) + state_deviations = data[:,1:end] - state[observables_index,1:end] + + # println(sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)])) + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) + - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) end -loglikelihood_scaling = loglikelihood_scaling_function(RBC, data,[:K,:Z]) + +Ξ© = 0.0001 +loglikelihood_scaling = loglikelihood_scaling_function(RBC, data_sim,[:k,:z], Ξ©) n_samples = 300 n_adapts = 50 @@ -155,10 +174,13 @@ alg = Turing.NUTS(n_adapts,Ξ΄) samps = Turing.sample(loglikelihood_scaling, alg, n_samples, progress = true)#, init_params = sol) +Plots.plot(samps[["x0[1]"]]; colordim=:parameter, legend=true) + +#Plots.plot(samps[["Ξ²"]]; colordim=:parameter, legend=true) +#Plots.plot(samps[["Ξ±"]]; colordim=:parameter, legend=true) #Plot true and estimated latents to see how well we backed them out -noise = Ο΅[:,2:end] symbol_to_int(s) = parse(Int, string(s)[9:end-1]) Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:20], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) @@ -167,3 +189,4 @@ tmp = Turing.describe(Ο΅_chain) Ο΅_std = tmp[1][:, 3] Plots.plot(Ο΅_mean[1:end], ribbon=1.96 * Ο΅_std[1:end], label="Posterior mean", title = "First-Order Joint: Estimated Latents") Plots.plot!(noise', label="True values") + From bb33900f4d2e4a8b43e3da74ad9c2daa41ed627e Mon Sep 17 00:00:00 2001 From: thorek1 Date: Fri, 26 May 2023 15:34:56 +0100 Subject: [PATCH 74/83] merge latest changes rom main branch --- Project.toml | 8 +++--- src/MacroModelling.jl | 67 ++++++++++++++++++++++--------------------- src/get_functions.jl | 34 +++++++++++----------- 3 files changed, 54 insertions(+), 55 deletions(-) diff --git a/Project.toml b/Project.toml index ab8870dab..3d11a0324 100644 --- a/Project.toml +++ b/Project.toml @@ -38,6 +38,10 @@ Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +[weakdeps] +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" +Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" + [compat] AxisKeys = "^0.2" BlockTriangularForm = "^0.1" @@ -84,7 +88,3 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [targets] test = ["Aqua", "CSV", "DataFrames", "DynamicPPL", "MCMCChains", "LineSearches", "Optim", "Test", "Turing", "FiniteDifferences", "Zygote", "Plots", "StatsPlots"] - -[weakdeps] -StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" -Turing = "fce5fe82-541a-59a6-adf8-730c64b5f9a0" \ No newline at end of file diff --git a/src/MacroModelling.jl b/src/MacroModelling.jl index 7d1875a37..3f4d60ae7 100644 --- a/src/MacroModelling.jl +++ b/src/MacroModelling.jl @@ -1315,7 +1315,7 @@ function calculate_second_order_stochastic_steady_state(parameters::Vector{M}, βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1447,7 +1447,7 @@ function calculate_third_order_stochastic_steady_state(parameters::Vector{M}, βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) @@ -1514,12 +1514,15 @@ function solve!(𝓂::β„³; any([:third_order,:pruned_third_order] .∈ (𝓂.solution.outdated_algorithms,))) SS_and_pars, solution_error = 𝓂.solution.outdated_NSSS ? 𝓂.SS_solve_func(𝓂.parameter_values, 𝓂, verbose) : (𝓂.solution.non_stochastic_steady_state, eps()) + # @assert solution_error < eps() "Could not find non stochastic steady steady." βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol_mat, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + @assert solved "Could not find stable first order solution." + state_update₁ = function(state::Vector{Float64}, shock::Vector{Float64}) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end 𝓂.solution.perturbation.first_order = perturbation_solution(sol_mat, state_update₁) @@ -2363,7 +2366,7 @@ end -function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false) +function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = false)::Tuple{Matrix{Float64},Bool} βˆ‡β‚Š = @view βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] βˆ‡β‚€ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed .+ range(1, T.nVars)] βˆ‡β‚‹ = @view βˆ‡β‚[:,T.nFuture_not_past_and_mixed + T.nVars .+ range(1, T.nPast_not_future_and_mixed)] @@ -2407,9 +2410,12 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ẑ₁₁ = RF.lu(Z₁₁, check = false) if !β„’.issuccess(Ẑ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ẑ₁₁) + Ẑ₁₁ = β„’.svd(Z₁₁, check = false) end + if !β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false + end else eigenselect = abs.(schdcmp.Ξ² ./ schdcmp.Ξ±) .< 1 @@ -2424,16 +2430,16 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Ẑ₁₁ = RF.lu(Z₁₁, check = false) if !β„’.issuccess(Ẑ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ẑ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false end end - + Ŝ₁₁ = RF.lu(S₁₁, check = false) if !β„’.issuccess(Ŝ₁₁) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(Ŝ₁₁) + return zeros(T.nVars,T.nPast_not_future_and_mixed), false end - + D = Z₂₁ / Ẑ₁₁ L = Z₁₁ * (Ŝ₁₁ \ T₁₁) / Ẑ₁₁ @@ -2443,11 +2449,11 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = Aβ‚Šα΅€ = @view Aβ‚Š[1:T.nPresent_only,:] AΜƒβ‚€α΅€ = @view Aβ‚€[1:T.nPresent_only, T.present_but_not_only_idx] Aβ‚‹α΅€ = @view Aβ‚‹[1:T.nPresent_only,:] - + AΜ„Μ‚β‚€α΅€ = RF.lu(AΜ„β‚€α΅€, check = false) if !β„’.issuccess(AΜ„Μ‚β‚€α΅€) - return zeros(T.nVars,T.nPast_not_future_and_mixed), β„’.issuccess(AΜ„Μ‚β‚€α΅€) + AΜ„Μ‚β‚€α΅€ = β„’.svd(collect(AΜ„β‚€α΅€)) end A = @views vcat(-(AΜ„Μ‚β‚€α΅€ \ (Aβ‚Šα΅€ * D * L + AΜƒβ‚€α΅€ * sol[T.dynamic_order,:] + Aβ‚‹α΅€)), sol) @@ -2455,8 +2461,7 @@ function riccati_forward(βˆ‡β‚::Matrix{Float64}; T::timings, explosive::Bool = return @view(A[T.reorder,:]), true end - -function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) #::AbstractMatrix{Real}, +function riccati_conditions(βˆ‡β‚::AbstractMatrix{<: Real}, sol_d::AbstractMatrix{<: Real}; T::timings, explosive::Bool = false) expand = @ignore_derivatives @views [β„’.diagm(ones(T.nVars))[T.future_not_past_and_mixed_idx,:], β„’.diagm(ones(T.nVars))[T.past_not_future_and_mixed_idx,:]] A = @views βˆ‡β‚[:,1:T.nFuture_not_past_and_mixed] * expand[1] @@ -2478,45 +2483,41 @@ function riccati_forward(βˆ‡β‚::Matrix{β„±.Dual{Z,S,N}}; T::timings = T, explos # you can play with the dimension here, sometimes it makes sense to transpose ps = mapreduce(β„±.partials, hcat, βˆ‡β‚)' - # get f(vs) - val, success = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) + val, solved = riccati_forward(βˆ‡Μ‚β‚;T = T, explosive = explosive) - if success + if solved # get J(f, vs) * ps (cheating). Write your custom rule here B = β„±.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚) A = β„±.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val) - # B = Zygote.jacobian(x -> riccati_conditions(x, val; T = T), βˆ‡Μ‚β‚)[1] - # A = Zygote.jacobian(x -> riccati_conditions(βˆ‡Μ‚β‚, x; T = T), val)[1] - + AΜ‚ = RF.lu(A, check = false) - + if !β„’.issuccess(AΜ‚) AΜ‚ = β„’.svd(A) end jvp = -(AΜ‚ \ B) * ps else - jvp = fill(0,length(val),length(βˆ‡β‚)) * ps + jvp = fill(0,length(val),length(βˆ‡Μ‚β‚)) * ps end # pack: SoA -> AoS return reshape(map(val, eachrow(jvp)) do v, p β„±.Dual{Z}(v, p...) # Z is the tag - end,size(val)), success + end,size(val)), solved end riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive)[1], (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) -riccati_(βˆ‡β‚;T, explosive) = ImplicitFunction(βˆ‡β‚ -> riccati_forward(βˆ‡β‚, T=T, explosive=explosive), (x,y)->riccati_conditions(x,y,T=T,explosive=explosive)) - -function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false) where S <: Real +function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive::Bool = false)::Tuple{Matrix{S},Bool} where S <: Real # A = riccati_AD(βˆ‡β‚, T = T, explosive = explosive) riccati = riccati_(βˆ‡β‚, T = T, explosive = explosive) - A, success = riccati(βˆ‡β‚) - # A = riccati_forward(βˆ‡β‚, T = T, explosive = explosive) + A = riccati(βˆ‡β‚) - if !success - return hcat(A, zeros(T.nVars,T.nExo)), success + solved = @ignore_derivatives !(isapprox(sum(abs,A), 0, rtol = eps())) + + if !solved + return hcat(A, zeros(size(A,1),T.nExo)), solved end Jm = @view(β„’.diagm(ones(S,T.nVars))[T.past_not_future_and_mixed_idx,:]) @@ -2527,7 +2528,7 @@ function calculate_first_order_solution(βˆ‡β‚::Matrix{S}; T::timings, explosive B = -((βˆ‡β‚Š * A * Jm + βˆ‡β‚€) \ βˆ‡β‚‘) - return hcat(A, B), success + return hcat(A, B), solved end @@ -3084,7 +3085,7 @@ function calculate_covariance(parameters::Vector{<: Real}, 𝓂::β„³; verbose::B βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) covar_raw = calculate_covariance_forward(sol,T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars)) @@ -3183,7 +3184,7 @@ function calculate_kalman_filter_loglikelihood(𝓂::β„³, data::AbstractArray{Fl βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) if !solved return -Inf @@ -3259,7 +3260,7 @@ function filter_and_smooth(𝓂::β„³, data_in_deviations::AbstractArray{Float64} βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views sol[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[𝓂.timings.past_not_future_and_mixed_idx,:] diff --git a/src/get_functions.jl b/src/get_functions.jl index 8840a9a82..7307a6c87 100644 --- a/src/get_functions.jl +++ b/src/get_functions.jl @@ -654,7 +654,7 @@ function get_irf(𝓂::β„³, βˆ‡β‚ = calculate_jacobian(parameters, reference_steady_state, 𝓂) - sol_mat, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol_mat, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) state_update = function(state::Vector, shock::Vector) sol_mat * [state[𝓂.timings.past_not_future_and_mixed_idx]; shock] end @@ -1236,23 +1236,25 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol βˆ‡β‚ = calculate_jacobian(parameters, SS_and_pars, 𝓂) - 𝐒₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝐒₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) - if algorithm == :second_order - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.timings.nVars,2), success + if !solved + if algorithm == :second_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), false + elseif algorithm == :third_order + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.var,2), spzeros(𝓂.var,2), false + else + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, false end + end + if algorithm == :second_order βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, true elseif algorithm == :third_order - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, spzeros(𝓂.timings.nVars,2), spzeros(𝓂.timings.nVars,2), success - end - βˆ‡β‚‚ = calculate_hessian(parameters, SS_and_pars, 𝓂) 𝐒₂ = calculate_second_order_solution(βˆ‡β‚, βˆ‡β‚‚, 𝐒₁; T = 𝓂.timings) @@ -1261,13 +1263,9 @@ function get_solution(𝓂::β„³, parameters::Vector{<: Real}; algorithm::Symbol 𝐒₃ = calculate_third_order_solution(βˆ‡β‚, βˆ‡β‚‚, βˆ‡β‚ƒ, 𝐒₁, 𝐒₂; T = 𝓂.timings) - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, 𝐒₂, 𝐒₃, true else - if !success - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, success - end - - return SS_and_pars[1:length(𝓂.var)], 𝐒₁, success + return SS_and_pars[1:length(𝓂.var)], 𝐒₁, true end end @@ -1363,7 +1361,7 @@ function get_conditional_variance_decomposition(𝓂::β„³; βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - 𝑺₁, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + 𝑺₁, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) A = @views 𝑺₁[:,1:𝓂.timings.nPast_not_future_and_mixed] * β„’.diagm(ones(𝓂.timings.nVars))[indexin(𝓂.timings.past_not_future_and_mixed_idx,1:𝓂.timings.nVars),:] @@ -1483,7 +1481,7 @@ function get_variance_decomposition(𝓂::β„³; βˆ‡β‚ = calculate_jacobian(𝓂.parameter_values, SS_and_pars, 𝓂) - sol, success = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) + sol, solved = calculate_first_order_solution(βˆ‡β‚; T = 𝓂.timings) variances_by_shock = reduce(hcat,[β„’.diag(calculate_covariance_forward(sol[:,[1:𝓂.timings.nPast_not_future_and_mixed..., 𝓂.timings.nPast_not_future_and_mixed+i]], T = 𝓂.timings, subset_indices = collect(1:𝓂.timings.nVars))) for i in 1:𝓂.timings.nExo]) From 82073b1fb32bf486d862119b5ce944d7f1e9070d Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sat, 27 May 2023 00:26:50 +0100 Subject: [PATCH 75/83] recovers shocks and parameter values; no prior --- test/filter_free.jl | 178 +++++++++++++++----------------------------- 1 file changed, 58 insertions(+), 120 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 1f6bd2b94..651d54cb3 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -1,5 +1,5 @@ using MacroModelling -import Turing, StatsPlots , Plots, Random +import Turing, StatsPlots, Random, Statistics import LinearAlgebra as β„’ @model RBC begin @@ -11,7 +11,7 @@ end @parameters RBC verbose = true begin Οƒ = 0.01 - Ξ± = 0.5 + Ξ± = 0.25 Ξ² = 0.95 ρ = 0.2 Ξ΄ = 0.02 @@ -19,174 +19,112 @@ end end solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) -# draw from t scaled by approximate invariant variance) for the initial condition -m =RBC -calculate_covariance_ = MacroModelling.calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) -long_run_covariance = calculate_covariance_(solution[2]) - -T =20 -ddof = 4 -shockdist = Turing.TDist(ddof) #Shocks are student-t -Random.seed!(12345) #Fix seed to reproduce data -initial_conditions = long_run_covariance * rand(shockdist,m.timings.nPast_not_future_and_mixed) -#nitial_conditions_dist = Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) #Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions -#initial_conditions = β„’.diag.(rand(initial_conditions_dist, m.timings.nPast_not_future_and_mixed)) -# long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * randn(m.timings.nPast_not_future_and_mixed) - -Random.seed!(12345) #Fix seed to reproduce data -# Generate noise sequence -noiseshocks = rand(shockdist,T) -noise = Matrix(noiseshocks') # the Ο΅ shocks are "noise" in DifferenceEquations for SciML compatibility - -#Ο΅ = [-0.369555723973723 0.47827032464044467 0.2567178329209457 -1.1127581634083954 1.779713752762057 -1.3694068387087652 0.4598600006094857 0.1319461357213755 0.21210992474923543 0.37965007742056217 -0.36234330914698276 0.04507575971259013 0.2562242956767027 -1.4425668844506196 -0.2559534237970267 -0.40742710317783837 1.5578503125015226 0.05971261026086091 -0.5590041386255554 -0.1841854411460526] -Ο΅ = noise - -# Initialize states -state = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) -state_predictions = zeros(typeof(initial_conditions[1]),m.timings.nVars, T+1) - -aug_state = [initial_conditions -1 -Ο΅[:,1]] - - -𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) -state[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 -state_predictions[:,1] = 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 - -for t in 2:T - aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] - 1 - Ο΅[:,t]] - state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 -end - -observables_index = sort(indexin([:k, :z], m.timings.var)) -data_sim = state[observables_index,1:end] - -aug_state = [initial_conditions -1 -0] -for t in 2:T - aug_state = [state_predictions[m.timings.past_not_future_and_mixed_idx,t-1] - 1 - 0] - state_predictions[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 -end -state_deviations = data_sim[:,1:end] - state_predictions[observables_index,1:end] -sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data_sim, 2)]) +# draw shocks +periods = 10 +shocks = randn(1,periods) +shocks /= Statistics.std(shocks) # antithetic shocks +shocks .-= Statistics.mean(shocks) # antithetic shocks -dataFV = [-0.02581238618841974 -0.024755946984579915 -0.0007518239655738511 -0.02582984321259188 -0.04567755888428696 0.021196857503906794 -0.0772465811707222 -0.008386388700111276 -0.02347363396607608 -0.033743271643453004 -0.04771401523417986 -0.0723137820802147 -0.052024995108031956 -0.04914479042856236 -0.0628064692912924 0.026322291179482583 0.05836273680164356 0.08777750705366681 -0.006357303764844118 -0.027859850762631953 0.0036979646377400615; -9.300233770305984e-6 0.0036936971929831686 -0.004043963807807812 -0.0033759710907710194 0.010452387415929751 -0.01570666004443462 0.010552736378200728 -0.0024880527304547108 -0.0018170719033046975 -0.002484513628153294 -0.004293403499836281 0.002764752391502571 0.00010219288117461296 -0.0025418043805321045 0.013917307968399776 0.005342995831650222 0.005142870198108429 -0.014549929085393539 -0.003507111919687318 0.0048886190023180905 0.0028195782119241446] -state_deviations_FV = dataFV[:,1:end] - state_predictions[observables_index,1:end] +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0)[:,:,1] |>collect -sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations_FV,1)))), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) -sum([Turing.logpdf(Turing.MvNormal(zeros(size(data_sim)[1]),Matrix(0.0000001*β„’.I, size(data_sim)[1], size(data_sim)[1])), state_deviations_FV[:,t]) for t in 1:size(data_sim, 2)]) - - -Plots.plot(data_sim[:,1:end]') -Plots.plot!(dataFV[:,2:end]') +StatsPlots.plot(simulated_data') +StatsPlots.plot(shocks') Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) - #Ξ± ~ MacroModelling.Beta(0.5, 0.1, ΞΌΟƒ = true) - #Ξ² ~ MacroModelling.Beta(0.95, 0.01, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) - #Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, ΞΌΟƒ = true) - #Ξ³ ~ Turing.Normal(1, 0.05) - Οƒ = 0.01 - #Ξ± ~ Turing.Uniform(0.2, 0.8) - #Ξ² ~ Turing.Uniform(0.5, 0.99) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + Ξ± ~ Turing.Uniform(0.1, 0.4) + Ξ² ~ Turing.Uniform(0.9, 0.9999) + # Ξ΄ ~ Turing.Uniform(0.0001, 0.05) + # Οƒ ~ Turing.Uniform(0.0, 0.1) + # ρ ~ Turing.Uniform(0.0, 1.0) + # Ξ³ ~ Turing.Uniform(0.0, 2.0) - Ξ± = 0.5 - Ξ² = 0.95 + Οƒ = 0.01 + # Ξ± = 0.5 + # Ξ² = 0.95 ρ = 0.2 Ξ΄ = 0.02 Ξ³ = 1. - solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + # solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) + solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :first_order) + if solution[end] != true return Turing.@addlogprob! Inf end + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + Ο΅_draw ~ Turing.filldist(Turing.Normal(), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + long_run_covariance = calculate_covariance_(solution[2]) - x0 ~ Turing.filldist(Turing.TDist(4),m.timings.nPast_not_future_and_mixed) # Initial conditions - Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - initial_conditions = long_run_covariance * x0 - - #xnought ~ Turing.filldist(Turing.Normal(0.,1.),m.timings.nPast_not_future_and_mixed) #Initial shocks - #calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) - # long_run_covariance = calculate_covariance_(solution[2]) - # initial_conditions = long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] * xnought - #SS = get_steady_state(m, parameters = (:Οƒ => Οƒ, :Ξ± => Ξ±, :Ξ² => Ξ², :ρ => ρ, :Ξ΄ => Ξ΄, :Ξ³ => Ξ³ ), algorithm = :second_order) - # initial_conditions ~ Turing.MvNormal(zeros(m.timings.nPast_not_future_and_mixed),long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions # Turing.MvNormal(SS.data.data[m.timings.past_not_future_and_mixed_idx,1],long_run_covariance[m.timings.past_not_future_and_mixed_idx,m.timings.past_not_future_and_mixed_idx] ) # Initial conditions + # initial_conditions = x0 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - Ο΅_draw ~ Turing.filldist(Turing.TDist(4), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - #Ο΅_draw ~ Turing.filldist(Turing.Normal(0,1), m.timings.nExo * size(data, 2)) #Shocks are Normally - distributed! - - Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + Ο΅_draw ~ Turing.filldist(Turing.Normal(), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - state = zeros(typeof(initial_conditions[1]),m.timings.nVars, size(data, 2)) + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) - # state[m.timings.past_not_future_and_mixed_idx,1] .= initial_conditions + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) aug_state = [initial_conditions 1 Ο΅[:,1]] - state[:,1] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 for t in 2:size(data, 2) aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] 1 Ο΅[:,t]] - state[:,t] .= 𝐒₁ * aug_state + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 end observables_index = sort(indexin(observables, m.timings.var)) - state_deviations = data[:,1:end] - state[observables_index,1:end] + state_deviations = data - state[observables_index,:] - # println(sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)])) - - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(zeros(size(data)[1]),Matrix(Ξ©*β„’.I, size(data)[1], size(data)[1])), state_deviations[:,t]) for t in 1:size(data, 2)]) - - - # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.Diagonal(ones(size(state_deviations,1)))), state_deviations[:,t]) for t in 1:size(data, 2)]) + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) end -Ξ© = 0.0001 -loglikelihood_scaling = loglikelihood_scaling_function(RBC, data_sim,[:k,:z], Ξ©) - -n_samples = 300 -n_adapts = 50 -Ξ΄ = 0.65 -alg = Turing.NUTS(n_adapts,Ξ΄) +Ξ© = sqrt(eps()) +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data, [:k,:z], Ξ©) -samps = Turing.sample(loglikelihood_scaling, alg, n_samples, progress = true)#, init_params = sol) +n_samples = 1000 -Plots.plot(samps[["x0[1]"]]; colordim=:parameter, legend=true) +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) -#Plots.plot(samps[["Ξ²"]]; colordim=:parameter, legend=true) - -#Plots.plot(samps[["Ξ±"]]; colordim=:parameter, legend=true) +StatsPlots.plot(samps) #Plot true and estimated latents to see how well we backed them out +estimated_parameters = Turing.describe(samps)[1].nt.parameters +estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], estimated_parameters) +estimated_means = Turing.describe(samps)[1].nt.mean +estimated_std = Turing.describe(samps)[1].nt.std + -symbol_to_int(s) = parse(Int, string(s)[9:end-1]) -Ο΅_chain = sort(samps[:, [Symbol("Ο΅_draw[$a]") for a in 1:20], 1], lt = (x,y) -> symbol_to_int(x) < symbol_to_int(y)) -tmp = Turing.describe(Ο΅_chain) -Ο΅_mean = tmp[1][:, 2] -Ο΅_std = tmp[1][:, 3] -Plots.plot(Ο΅_mean[1:end], ribbon=1.96 * Ο΅_std[1:end], label="Posterior mean", title = "First-Order Joint: Estimated Latents") -Plots.plot!(noise', label="True values") +StatsPlots.plot(estimated_means[estimated_parameters_indices], + ribbon = 1.96 * estimated_std[estimated_parameters_indices], + label = "Posterior mean", + title = "First-Order Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") From 58b1837dd1932912a6b4a872163b28bbdb58399e Mon Sep 17 00:00:00 2001 From: thorek1 Date: Sat, 27 May 2023 00:30:05 +0100 Subject: [PATCH 76/83] more annotation --- test/filter_free.jl | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 651d54cb3..228d2e54d 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -17,8 +17,6 @@ end Ξ΄ = 0.02 Ξ³ = 1. end -solution = get_solution(RBC, RBC.parameter_values, algorithm = :second_order) - # draw shocks periods = 10 @@ -29,13 +27,12 @@ shocks .-= Statistics.mean(shocks) # antithetic shocks # get simulation simulated_data = get_irf(RBC,shocks = shocks, periods = 0)[:,:,1] |>collect - - -StatsPlots.plot(simulated_data') +# plot simulation +plot_irf(RBC,shocks = shocks, periods = 0) StatsPlots.plot(shocks') - +# define loglikelihood model Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) From c67c5e6f1de870e2e41fed0ef9a227e25ee81cce Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 29 May 2023 13:04:46 +0100 Subject: [PATCH 77/83] delicate balance of shock llh and deviation loss --- test/filter_free.jl | 134 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 113 insertions(+), 21 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 228d2e54d..6146a1441 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -25,15 +25,19 @@ shocks /= Statistics.std(shocks) # antithetic shocks shocks .-= Statistics.mean(shocks) # antithetic shocks # get simulation -simulated_data = get_irf(RBC,shocks = shocks, periods = 0)[:,:,1] |>collect +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect # plot simulation plot_irf(RBC,shocks = shocks, periods = 0) StatsPlots.plot(shocks') +function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) + abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 +end + # define loglikelihood model -Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) @@ -41,31 +45,35 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) # Ξ³ ~ Turing.Normal(1, 0.05) # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) - Ξ± ~ Turing.Uniform(0.1, 0.4) - Ξ² ~ Turing.Uniform(0.9, 0.9999) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + # Ξ² ~ Turing.Uniform(0.92, 0.9999) # Ξ΄ ~ Turing.Uniform(0.0001, 0.05) # Οƒ ~ Turing.Uniform(0.0, 0.1) # ρ ~ Turing.Uniform(0.0, 1.0) - # Ξ³ ~ Turing.Uniform(0.0, 2.0) + # Ξ³ ~ Turing.Uniform(0.5, 1.5) Οƒ = 0.01 - # Ξ± = 0.5 - # Ξ² = 0.95 + # Ξ± = 0.25 + Ξ² = 0.95 ρ = 0.2 Ξ΄ = 0.02 Ξ³ = 1. - # solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :second_order) - solution = get_solution(m, [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³], algorithm = :first_order) + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + solution = get_solution(m, parameters, algorithm = algorithm) if solution[end] != true return Turing.@addlogprob! Inf end - + # draw_shocks(m) x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions - Ο΅_draw ~ Turing.filldist(Turing.Normal(), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! - calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) long_run_covariance = calculate_covariance_(solution[2]) @@ -75,15 +83,15 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - Ο΅_draw ~ Turing.filldist(Turing.Normal(), m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) aug_state = [initial_conditions - 1 - Ο΅[:,1]] + 1 + Ο΅[:,1]] state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 @@ -96,19 +104,48 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables,Ξ©) end observables_index = sort(indexin(observables, m.timings.var)) - - state_deviations = data - state[observables_index,:] - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) + state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + # println(sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)])) + # println(-sum(abs.(state_deviations).^5) / length(data) * 1e3) + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) + # Turing.@addlogprob! -sum(abs.(state_deviations .* 1e4).^4) / length(data) + Turing.@addlogprob! -sum(Ο΅_loss.(state_deviations)) / length(data) * 2e6 end - -Ξ© = sqrt(eps()) -loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data, [:k,:z], Ξ©) +Ξ© = 1e-8#eps() +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:k,:,:Shock_matrix), [:k], Ξ©) # Kalman +loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free n_samples = 1000 +# solution = get_solution(RBC, RBC.parameter_values, algorithm = :first_order)[1] + +# simulated_data(:k,:,:Shock_matrix) .- solution[1][observables_index...] + samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + StatsPlots.plot(samps) @@ -125,3 +162,58 @@ StatsPlots.plot(estimated_means[estimated_parameters_indices], title = "First-Order Joint: Estimated Latents") StatsPlots.plot!(shocks', label = "True values") + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end From d2274ed6c00859dc195fddf977f3f825a85a7d29 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 29 May 2023 17:34:00 +0100 Subject: [PATCH 78/83] recovers parameters and shocks to reasonable acc --- test/filter_free.jl | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 6146a1441..fae361dfb 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -1,5 +1,5 @@ using MacroModelling -import Turing, StatsPlots, Random, Statistics +import Turing, StatsPlots, Random, Statistics, DynamicHMC import LinearAlgebra as β„’ @model RBC begin @@ -53,8 +53,8 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) # ρ ~ Turing.Uniform(0.0, 1.0) # Ξ³ ~ Turing.Uniform(0.5, 1.5) - Οƒ = 0.01 # Ξ± = 0.25 + Οƒ = 0.01 Ξ² = 0.95 ρ = 0.2 Ξ΄ = 0.02 @@ -106,15 +106,25 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) observables_index = sort(indexin(observables, m.timings.var)) state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + # for (i,o) in enumerate(observables_index) + # if solution[1][o] != 0 + # state_deviations[i,:] /= solution[1][o] + # end + # end + # println(sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)])) # println(-sum(abs.(state_deviations).^5) / length(data) * 1e3) - # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .^ 3 .* Ξ©) for t in 1:size(data, 2)]) + # Turing.@addlogprob! -sum(abs.(state_deviations .* 1e4).^4) / length(data) - Turing.@addlogprob! -sum(Ο΅_loss.(state_deviations)) / length(data) * 2e6 + # Turing.@addlogprob! -sum(Ο΅_loss.(state_deviations)) / length(data) * 2e6 end -Ξ© = 1e-8#eps() -loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:k,:,:Shock_matrix), [:k], Ξ©) # Kalman +Ξ© = 1e5#eps() +# loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:k,:,:Shock_matrix), [:k], Ξ©) # Kalman loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free n_samples = 1000 From ff502a52b996ccbfa9ad169240bc52713e11f90b Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 29 May 2023 17:45:25 +0100 Subject: [PATCH 79/83] % dev. works as well --- test/filter_free.jl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index fae361dfb..e268744bd 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -107,23 +107,23 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] - # for (i,o) in enumerate(observables_index) - # if solution[1][o] != 0 - # state_deviations[i,:] /= solution[1][o] - # end - # end + for (i,o) in enumerate(observables_index) + if solution[1][o] != 0 && (all(state[o,:] .+ solution[1][o] .> 0) || all(state[o,:] .+ solution[1][o] .< 0)) + state_deviations[i,:] /= solution[1][o] + end + end # println(sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)])) # println(-sum(abs.(state_deviations).^5) / length(data) * 1e3) - # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .^ 3 .* Ξ©) for t in 1:size(data, 2)]) + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .* Ξ©) for t in 1:size(data, 2)]) + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .^ 3 .* Ξ©) for t in 1:size(data, 2)]) # Turing.@addlogprob! -sum(abs.(state_deviations .* 1e4).^4) / length(data) # Turing.@addlogprob! -sum(Ο΅_loss.(state_deviations)) / length(data) * 2e6 end -Ξ© = 1e5#eps() +Ξ© = 1e4#eps() # loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:k,:,:Shock_matrix), [:k], Ξ©) # Kalman loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free From 2e330b20d78f98c23397ddf5496e8002b4226cf8 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 29 May 2023 22:06:42 +0200 Subject: [PATCH 80/83] consistent results between kalman and filter free --- test/filter_free.jl | 99 ++++++++++++++++++++------------------------- 1 file changed, 43 insertions(+), 56 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index e268744bd..1bf5f7239 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -32,9 +32,9 @@ plot_irf(RBC,shocks = shocks, periods = 0) StatsPlots.plot(shocks') -function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) - abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 -end +# function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) +# abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 +# end # define loglikelihood model Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) @@ -47,84 +47,71 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) Ξ± ~ Turing.Uniform(0.15, 0.45) - # Ξ² ~ Turing.Uniform(0.92, 0.9999) - # Ξ΄ ~ Turing.Uniform(0.0001, 0.05) - # Οƒ ~ Turing.Uniform(0.0, 0.1) - # ρ ~ Turing.Uniform(0.0, 1.0) - # Ξ³ ~ Turing.Uniform(0.5, 1.5) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) # Ξ± = 0.25 - Οƒ = 0.01 - Ξ² = 0.95 - ρ = 0.2 - Ξ΄ = 0.02 - Ξ³ = 1. + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. algorithm = :first_order parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] shock_distribution = Turing.Normal() - # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) - solution = get_solution(m, parameters, algorithm = algorithm) + # solution = get_solution(m, parameters, algorithm = algorithm) - if solution[end] != true - return Turing.@addlogprob! Inf - end - # draw_shocks(m) - x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + # if solution[end] != true + # return Turing.@addlogprob! Inf + # end + # # draw_shocks(m) + # x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions - calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + # calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) - long_run_covariance = calculate_covariance_(solution[2]) + # long_run_covariance = calculate_covariance_(solution[2]) - initial_conditions = long_run_covariance * x0 - # initial_conditions = x0 + # initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 - 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + # 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) - Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + # Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) - Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + # Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) - state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + # state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) - aug_state = [initial_conditions - 1 - Ο΅[:,1]] + # aug_state = [initial_conditions + # 1 + # Ο΅[:,1]] - state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + # state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 - for t in 2:size(data, 2) - aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] - 1 - Ο΅[:,t]] + # for t in 2:size(data, 2) + # aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + # 1 + # Ο΅[:,t]] - state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 - end + # state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + # end - observables_index = sort(indexin(observables, m.timings.var)) + # observables_index = sort(indexin(observables, m.timings.var)) - state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] - - for (i,o) in enumerate(observables_index) - if solution[1][o] != 0 && (all(state[o,:] .+ solution[1][o] .> 0) || all(state[o,:] .+ solution[1][o] .< 0)) - state_deviations[i,:] /= solution[1][o] - end - end - - # println(sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)])) - # println(-sum(abs.(state_deviations).^5) / length(data) * 1e3) - - Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .* Ξ©) for t in 1:size(data, 2)]) - # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(β„’.I(size(data,1))), state_deviations[:,t] .^ 3 .* Ξ©) for t in 1:size(data, 2)]) + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] - # Turing.@addlogprob! -sum(abs.(state_deviations .* 1e4).^4) / length(data) - # Turing.@addlogprob! -sum(Ο΅_loss.(state_deviations)) / length(data) * 2e6 + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) end -Ξ© = 1e4#eps() -# loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:k,:,:Shock_matrix), [:k], Ξ©) # Kalman +Ξ© = 1e-3#eps() +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free n_samples = 1000 From 3dc808217f4813c81de682a434004ab0a9db8102 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Mon, 29 May 2023 23:12:32 +0200 Subject: [PATCH 81/83] comments --- test/filter_free.jl | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/test/filter_free.jl b/test/filter_free.jl index 1bf5f7239..546c728bf 100644 --- a/test/filter_free.jl +++ b/test/filter_free.jl @@ -1,5 +1,5 @@ using MacroModelling -import Turing, StatsPlots, Random, Statistics, DynamicHMC +import Turing, StatsPlots, Random, Statistics import LinearAlgebra as β„’ @model RBC begin @@ -110,7 +110,7 @@ Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) end -Ξ© = 1e-3#eps() +Ξ© = 1e-2#eps() loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free @@ -119,9 +119,31 @@ n_samples = 1000 # solution = get_solution(RBC, RBC.parameter_values, algorithm = :first_order)[1] # simulated_data(:k,:,:Shock_matrix) .- solution[1][observables_index...] +# using AdvancedHMC +# D = 18 -samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol) -samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) +# n_samples, n_adapts = 1_000, 300 +# metric = DiagEuclideanMetric(D) +# hamiltonian = Hamiltonian(metric, logprob, +# ForwardDiff) + +# integrator = Leapfrog(find_good_stepsize(hamiltonian, +# initial_theta)) + +# proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) +# adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), +# StepSizeAdaptor(0.8, integrator)) + +# samples, stats = sample(hamiltonian, proposal, initial_theta, n_samples, adaptor, n_adapts; progress=true) + +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. interval = -.01:.0001:.01 interval = -1:.01:1 From 911b0124cbae5c7996cf3e43ee7aec609697ab89 Mon Sep 17 00:00:00 2001 From: thorek1 Date: Thu, 1 Jun 2023 00:06:08 +0200 Subject: [PATCH 82/83] test museinference --- Project.toml | 7 +- test/filter_free_muse_inference.jl | 252 +++++++++++++++++++++++++++++ 2 files changed, 258 insertions(+), 1 deletion(-) create mode 100644 test/filter_free_muse_inference.jl diff --git a/Project.toml b/Project.toml index 3d11a0324..406927d03 100644 --- a/Project.toml +++ b/Project.toml @@ -4,12 +4,16 @@ authors = ["Thore Kockerols "] version = "0.1.23" [deps] +AbstractDifferentiation = "c29ec348-61ec-40c8-8164-b8c60e9d9f3d" +AdvancedHMC = "0bf59076-c3b1-5ca4-86bd-e02cd72cde3d" AxisKeys = "94b1ba4f-4ee9-5380-92f1-94cde586c3c5" BlockTriangularForm = "adeb47b7-70bf-415a-bb24-c358563e873a" ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +DifferentiableStateSpaceModels = "beacd9db-9e5e-4956-9b09-459a4b2028df" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +DynamicHMC = "bbc10e6e-7c05-544b-b16e-64fede858acb" DynarePreprocessor_jll = "23afba7c-24e5-5ee2-bc2c-b42e07f0492a" FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" @@ -21,6 +25,7 @@ LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearMaps = "7a12625a-238d-50fd-b39a-03d52299707e" MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +MuseInference = "43b88160-90c7-4f71-933b-9d65205cd921" Optim = "429524aa-4258-5aef-a3af-852621145aeb" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" @@ -68,7 +73,7 @@ StatsPlots = "^0.15" Subscripts = "^0.1" SymPy = "^1" Symbolics = "^5" -Turing = "^0.25" +Turing = "^0.21" julia = "1.8" [extras] diff --git a/test/filter_free_muse_inference.jl b/test/filter_free_muse_inference.jl new file mode 100644 index 000000000..d4ab1fb19 --- /dev/null +++ b/test/filter_free_muse_inference.jl @@ -0,0 +1,252 @@ +using MacroModelling +import Turing, StatsPlots, Random, Statistics +import LinearAlgebra as β„’ + +@model RBC begin + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] +end + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.25 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end + +# draw shocks +periods = 100 +shocks = randn(1,periods) +shocks /= Statistics.std(shocks) # antithetic shocks +shocks .-= Statistics.mean(shocks) # antithetic shocks + +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect + +# plot simulation +plot_irf(RBC,shocks = shocks, periods = 0) +StatsPlots.plot(shocks') + + +# function Ο΅_loss(Ξ”; Ο΅ = .01, p = 2) +# abs(Ξ”) > Ο΅ ? abs(Ξ”)^p : 0 +# end + +# define loglikelihood model +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + Ξ΄ ~ MacroModelling.Normal(0.02, 0.05) + # Ξ³ ~ Turing.Normal(1, 0.05) + Οƒ ~ MacroModelling.Normal(0.01, 0.05) + + # Ξ± ~ Turing.Uniform(0.15, 0.45) + # Ξ² ~ Turing.Uniform(0.92, 0.9999) + # Ξ΄ ~ Turing.Uniform(0.0, 1.0) + # Οƒ ~ Turing.Uniform(0.0, 1.0) + # ρ ~ Turing.Uniform(0.0, 1.0) + # Ξ³ ~ Turing.Uniform(0.0, 1.5) + + Ξ± = 0.25 + Ξ² = 0.95 + # Ξ΄ = 0.02 + # Οƒ = 0.01 + ρ = 0.2 + Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + # draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + states ~ Turing.MvNormal(vec(state[observables_index,:] .+ solution[1][observables_index...]), Ξ© * β„’.I(length(state[observables_index,:]))) + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +Ξ© = 1e-0#eps() +# loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman +loglikelihood_scaling = loglikelihood_scaling_function(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # Filter free + +n_samples = 1000 + +# solution = get_solution(RBC, RBC.parameter_values, algorithm = :first_order)[1] + +# simulated_data(:k,:,:Shock_matrix) .- solution[1][observables_index...] +# using AdvancedHMC +# D = 18 + +# n_samples, n_adapts = 1_000, 300 +# metric = DiagEuclideanMetric(D) +# hamiltonian = Hamiltonian(metric, logprob, +# ForwardDiff) + +# integrator = Leapfrog(find_good_stepsize(hamiltonian, +# initial_theta)) + +# proposal = NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator) +# adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), +# StepSizeAdaptor(0.8, integrator)) + +# samples, stats = sample(hamiltonian, proposal, initial_theta, n_samples, adaptor, n_adapts; progress=true) + +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +using MuseInference, StatsPlots +import Turing +using AbstractDifferentiation + +model = loglikelihood_scaling | (states = vec(simulated_data(:k,:,:Shock_matrix)'),) +prob = MuseInference.TuringMuseProblem(model, params=(:Ξ΄, :Οƒ)) + +result = muse(prob, (Ξ΄=0.2, Οƒ=0.01,)) + +# get solution +result = muse(prob, (Ξ΄=0.5, Οƒ=0.1)) + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(samps) + +#Plot true and estimated latents to see how well we backed them out +estimated_parameters = Turing.describe(samps)[1].nt.parameters +estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], estimated_parameters) +estimated_means = Turing.describe(samps)[1].nt.mean +estimated_std = Turing.describe(samps)[1].nt.std + + +StatsPlots.plot(estimated_means[estimated_parameters_indices], + ribbon = 1.96 * estimated_std[estimated_parameters_indices], + label = "Posterior mean", + title = "First-Order Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") + + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end From 3f9a1b332d0bae2121ee23c75a0acc6efc261e22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1ty=C3=A1s=20Farkas?= Date: Fri, 2 Jun 2023 16:36:25 +0200 Subject: [PATCH 83/83] Filter-free advantage with t-shocks Monte Carlo --- test/ff_ng.jl | 335 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 335 insertions(+) create mode 100644 test/ff_ng.jl diff --git a/test/ff_ng.jl b/test/ff_ng.jl new file mode 100644 index 000000000..3d00f4974 --- /dev/null +++ b/test/ff_ng.jl @@ -0,0 +1,335 @@ +using MacroModelling +import Turing, StatsPlots, Random, Statistics +import LinearAlgebra as β„’ + +using HypothesisTests, Distributions + +@model RBC begin + 1 / (- k[0] + (1 - Ξ΄ ) * k[-1] + (exp(z[-1]) * k[-1]^Ξ±)) = (Ξ² / (- k[+1] + (1 - Ξ΄) * k[0] +(exp(z[0]) * k[0]^Ξ±))) * (Ξ±* exp(z[0]) * k[0] ^(Ξ± - 1) + (1 - Ξ΄)) ; + # 1 / c[0] - (Ξ² / c[1]) * (Ξ± * exp(z[1]) * k[1]^(Ξ± - 1) + (1 - Ξ΄)) =0 + # q[0] = exp(z[0]) * k[0]^Ξ± + z[0] = ρ * z[-1] - Οƒ* EPSz[x] +end + +@parameters RBC verbose = true begin + Οƒ = 0.01 + Ξ± = 0.25 + Ξ² = 0.95 + ρ = 0.2 + Ξ΄ = 0.02 + Ξ³ = 1. +end + +# draw shocks +Random.seed!(1) +periods = 20 +shockdist = Turing.TDist(3) # Turing.Beta(10,1) # +shocks = rand(shockdist,1,periods) # shocks = randn(1,periods) + +#shocks /= Statistics.std(shocks) # antithetic shocks +#shocks .-= Statistics.mean(shocks) # antithetic shocks +# Test for non-normality + HypothesisTests.ExactOneSampleKSTest(shocks[1,:],Turing.Normal(0,1)) + StatsPlots.plot(Distributions.Normal(0,1), fill=(0, .5,:blue)) + StatsPlots.density!(shocks') +# get simulation +simulated_data = get_irf(RBC,shocks = shocks, periods = 0, levels = true)#(:k,:,:) |>collect + +# plot simulation +MacroModelling.plot_irf(RBC,shocks = shocks, periods = 0) +#StatsPlots.plot(shocks') +Ξ© = 10^(-5)# eps() +n_samples = 1000 + + +# define loglikelihood model - KF +Turing.@model function loglikelihood_scaling_function(m, data, observables, Ξ©) + #Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + #ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + # Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) + + # Ξ± = 0.25 + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + shock_distribution = Turing.Normal() + + Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + # solution = get_solution(m, parameters, algorithm = algorithm) + + # if solution[end] != true + # return Turing.@addlogprob! Inf + # end + # # draw_shocks(m) + # x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + # calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + # long_run_covariance = calculate_covariance_(solution[2]) + + # initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 + + # 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + # Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + # Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + # state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + # aug_state = [initial_conditions + # 1 + # Ο΅[:,1]] + + # state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + # for t in 2:size(data, 2) + # aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + # 1 + # Ο΅[:,t]] + + # state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + # end + + # observables_index = sort(indexin(observables, m.timings.var)) + + # state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + # Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling = loglikelihood_scaling_function(RBC, simulated_data(:,:,:Shock_matrix), [:k], Ξ©) # Kalman +samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(), n_samples, progress = true)#, init_params = sol + + +StatsPlots.plot(samps) +kf_estimated_parameters = Turing.describe(samps)[1].nt.parameters +kf_estimated_means = Turing.describe(samps)[1].nt.mean +kf_estimated_std = Turing.describe(samps)[1].nt.std +kfmean= kf_estimated_means +kfstd = kf_estimated_std +Turing.@model function loglikelihood_scaling_function_ff(m, data, observables, Ξ©) #, kfmean, kfstd + + # Οƒ ~ MacroModelling.Beta(0.01, 0.02, ΞΌΟƒ = true) + # Ξ± ~ MacroModelling.Beta(0.25, 0.15, 0.1, .4, ΞΌΟƒ = true) + # Ξ² ~ MacroModelling.Beta(0.95, 0.05, .9, .9999, ΞΌΟƒ = true) + # ρ ~ MacroModelling.Beta(0.2, 0.1, ΞΌΟƒ = true) + # Ξ΄ ~ MacroModelling.Beta(0.02, 0.05, 0.0, .1, ΞΌΟƒ = true) + # Ξ³ ~ Turing.Normal(1, 0.05) + #Οƒ ~ MacroModelling.InverseGamma(0.01, 0.05, ΞΌΟƒ = true) + + Ξ± ~ Turing.Uniform(0.15, 0.45) + Ξ² ~ Turing.Uniform(0.92, 0.9999) + Ξ΄ ~ Turing.Uniform(0.0001, 0.1) + Οƒ ~ Turing.Uniform(0.0, 0.1) + ρ ~ Turing.Uniform(0.0, 1.0) + Ξ³ ~ Turing.Uniform(0.0, 1.5) + + #Ξ± ~ Turing.Uniform(kfmean[1]-2*kfstd[1], kfmean[1]+2*kfstd[1]) + #Ξ² ~ Turing.Uniform(kfmean[2]-2*kfstd[2], kfmean[2]+2*kfstd[2]) + #Ξ΄ ~ Turing.Uniform(kfmean[3]-2*kfstd[3], kfmean[3]+2*kfstd[3]) + #Οƒ ~ Turing.Uniform(0.0, kfmean[4]+2*kfstd[4]) + #ρ ~ Turing.Uniform(0.0, kfmean[5]+2*kfstd[5]) + #Ξ³ ~ Turing.Uniform(0.0, kfmean[6]+2*kfstd[6]) + + + # Ξ± = 0.25 + # Ξ² = 0.95 + # Οƒ = 0.01 + # ρ = 0.2 + # Ξ΄ = 0.02 + # Ξ³ = 1. + + algorithm = :first_order + parameters = [Οƒ, Ξ±, Ξ², ρ, Ξ΄, Ξ³] + # skewness + shock_distribution = Turing.TDist(3.0) + + # Turing.@addlogprob! calculate_kalman_filter_loglikelihood(m, data(observables), observables; parameters = parameters) + + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + # draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] .- solution[1][observables_index...] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +loglikelihood_scaling_ff = loglikelihood_scaling_function_ff(RBC, collect(simulated_data(:k,:,:Shock_matrix))', [:k], Ξ©) # ,kf_estimated_means, kf_estimated_std # Filter free + +n_samples = 1000 +samps_ff = Turing.sample(loglikelihood_scaling_ff, Turing.NUTS(), n_samples, progress = true)#, init_params = sol +StatsPlots.plot(samps_ff) + +ff_estimated_parameters = Turing.describe(samps_ff)[1].nt.parameters +ff_estimated_means = Turing.describe(samps_ff)[1].nt.mean +ff_estimated_std = Turing.describe(samps_ff)[1].nt.std + + +ff_bias= ( ff_estimated_means[1:6]- RBC.parameter_values[[2, 3, 5, 1, 4,6]]) +kf_bias= ( kf_estimated_means[1:6]- RBC.parameter_values[[2, 3, 5, 1, 4,6]]) + +ff_z = (ff_bias)./ff_estimated_std[1:6] +kf_z = ( kf_bias)./kf_estimated_std[1:6] + +grouplabel = repeat(["KF", "FF"], inner = 6) + +StatsPlots.groupedbar( repeat(kf_estimated_parameters, outer =2) , [kf_bias ff_bias], group = grouplabel, xlabel = "Structural Parameters Biases") +StatsPlots.groupedbar( repeat(kf_estimated_parameters, outer =2), [kf_z ff_z], group = grouplabel, xlabel = "Structural Parameter z-scores") +data = KeyedArray(Array(collect(simulated_data(:k,:,:Shock_matrix)))',row = [:k], col = 1:1:20) + + + +kf_filtered_shocks = MacroModelling.get_estimated_shocks(RBC, data, parameters = kf_estimated_means[[4, 1, 2, 5, 3,6]]) + + +ff_estimated_parameters_indices = indexin([Symbol("Ο΅_draw[$a]") for a in 1:periods], ff_estimated_parameters ) +StatsPlots.plot(ff_estimated_means[ff_estimated_parameters_indices], + ribbon = 1.96 * ff_estimated_std[ff_estimated_parameters_indices], + label = "Posterior mean", + title = "Joint: Estimated Latents") +StatsPlots.plot!(shocks', label = "True values") +StatsPlots.plot!(collect(kf_filtered_shocks'), label = "KF filtered shocks") + + +# samps = Turing.sample(loglikelihood_scaling, Turing.HMCDA(1000,.65,.75;init_Ο΅ = .05), n_samples, progress = true)#, init_params = sol) +# samps = Turing.sample(loglikelihood_scaling, Turing.NUTS(1000, .65; init_Ο΅ = .01), n_samples, progress = true)#, init_params = sol) + + +# filter free generates good parameter estimates but does not necessarily nail the latent states and shocks +# std of MvNormal determines speed, convergence (too small no convergence), accuracy (smaller value will get more info on latent shocks and shock related parameters [shock size and persistence]) +# the logic behind it appears to be: ideally the data is perfectly matched so you want the MvNormal to have a very small std but then the sampler will focus on matching the data since thats where the gradients are very large. the gradients regarding parameter priors are too small to have any influence and the sampler gets stuck if parameters do change because the gradients to match the data dominate. + +interval = -.01:.0001:.01 +interval = -1:.01:1 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(x->4*x^3,interval) +StatsPlots.plot!(x->3*x*abs(x),interval) +StatsPlots.plot!(x->2*x,interval) + +interval = -.01:.0001:.01 + +StatsPlots.plot(x->abs(x)^4,interval) +StatsPlots.plot!(x->abs(x)^3,interval) +StatsPlots.plot!(x->abs(x)^2,interval) + + + +StatsPlots.plot(samps_ff) + +#Plot true and estimated latents to see how well we backed them out + + + + + +# testing functions + +function calculate_filter_free_llh(m, parameters, data, observables; algorithm = :first_order, shock_distribution = Turing.Normal(), Ξ©::Float64 = sqrt(eps())) + solution = get_solution(m, parameters, algorithm = algorithm) + + if solution[end] != true + return Turing.@addlogprob! Inf + end + + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + + calculate_covariance_ = calculate_covariance_AD(solution[2], T = m.timings, subset_indices = collect(1:m.timings.nVars)) + + long_run_covariance = calculate_covariance_(solution[2]) + + initial_conditions = long_run_covariance * x0 + # initial_conditions = x0 + + 𝐒₁ = hcat(solution[2][:,1:m.timings.nPast_not_future_and_mixed], zeros(m.timings.nVars), solution[2][:,m.timings.nPast_not_future_and_mixed+1:end]) + + Ο΅_draw ~ Turing.filldist(shock_distribution, m.timings.nExo * size(data, 2)) #Shocks are t-distributed! + + Ο΅ = reshape(Ο΅_draw, m.timings.nExo, size(data, 2)) + + state = zeros(typeof(initial_conditions[1]), m.timings.nVars, size(data, 2)) + + aug_state = [initial_conditions + 1 + Ο΅[:,1]] + + state[:,1] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + + for t in 2:size(data, 2) + aug_state = [state[m.timings.past_not_future_and_mixed_idx,t-1] + 1 + Ο΅[:,t]] + + state[:,t] .= 𝐒₁ * aug_state# + solution[3] * β„’.kron(aug_state, aug_state) / 2 + end + + observables_index = sort(indexin(observables, m.timings.var)) + + state_deviations = data - state[observables_index,:] + + Turing.@addlogprob! sum([Turing.logpdf(Turing.MvNormal(Ξ© * β„’.I(size(data,1))), state_deviations[:,t]) for t in 1:size(data, 2)]) +end + +function draw_shocks(m) + x0 ~ Turing.filldist(Turing.Normal(), m.timings.nPast_not_future_and_mixed) # Initial conditions + return x0 +end