This notebook generates results for "Using the Sequence-Space Jacobian to Solve and Estimate Heterogeneous-Agent Models" and saves the data to produce tables and figures.
Full notebook can take 6-10 hours to run. Most of that time is taken up by section 5. All sections are self-contained and can be run in isolation (once imports are loaded, of course).
# Python packages
import time
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.io import loadmat
# Sequence jacobian package
from toolkit import jacobian as jac
from toolkit.solved_block import solved
from toolkit import estimation as ssj_est
from toolkit import nonlinear
# Models
from models import herbst_schorfheide as hs
from models import smets_wouters as sw
from models import krusell_smith as ks
from models import hank_1a
from models import hank_2a
# Auxiliary functions
from toolkit import aux_fn as aux
from toolkit import aux_speed as aux_jac
# Save output of models?
savedata = True
# MCMC chains
Nsim = 100_000 # number of simulations (200_000 for the paper)
Nburn = 50_000 # number of initial periods thrown away (50_000 for the paper)
Calibrate steady state and precompute Jacobian.
# Compute steady state
t = time.time()
ss = hs.calibrate_ss()
elapsed_ss0 = time.time() - t
t = time.time()
ss = hs.calibrate_ss()
elapsed_ss = time.time() - t
# Compute model Jacobian with pre-defined parameters
T = 300
unknowns = ['y', 'pi', 'R']
exogenous = ['epsR', 'g', 'z']
targets = ['yres', 'Rres', 'pires']
outputs = ['gdp', 'infl', 'interest']
block_list = [hs.euler, hs.nkpc, hs.taylor, hs.measurement]
t = time.time()
G = jac.get_G(block_list, exogenous, unknowns, targets, T, ss)
elapsed_G0 = time.time() - t
t = time.time()
G = jac.get_G(block_list, exogenous, unknowns, targets, T, ss)
elapsed_G = time.time() - t
# Show times
print(f'\nTime to compute steady state (burn-in): {1000*round(elapsed_ss0,2)}ms')
print(f'Time to compute model jacobian (burn-in): {1000*round(elapsed_G0,2)}ms')
print(f'\nTime to compute steady state: {1000*round(elapsed_ss,2)}ms')
print(f'Time to compute model jacobian: {1000*round(elapsed_G,2)}ms')
Estimate shock parameters. The data for the estimation was taken from the Matlab replication files available from the book's website.
# Load data
data = pd.read_csv('import_export/data/data_hs.csv', index_col=0, sep=';').values
# Parameters of estimation
shock_series = [('epsR', 0), ('g', 1), ('z', 1)]
params = ['rho_r', 'tau', 'kappa', 'psi1', 'psi2', 'data_r', 'data_pi', 'data_gamma']
x_guess = np.array([0.2, 0.2, 0.8, 0.2, 0.8, 0.8, 2, 0.8, 2, 1, 0.5, 3, 0.5])
priors = [('Invgamma_hs', 0.4, 4), ('Invgamma_hs', 1, 4), ('Uniform', 0, 1), ('Invgamma_hs', 0.5, 4),
('Uniform', 0, 1), ('Uniform', 0, 1), ('Gamma', 2, 0.5), ('Uniform', 0, 1), ('Gamma', 1.5, 0.25),
('Gamma', 0.5, 0.25), ('Gamma', 0.5, 0.5), ('Gamma', 7, 2), ('Normal', 0.4, 0.2)]
bounds = [(0.1, 0.4), (0.3, 0.99), (0.7, 0.999), (0.01, 0.5), (0.7, 0.99), (0.4, 0.9), (1, 5),
(0.2, 1), (1., 3), (0.1, 2), (0.2, 0.8), (2.5, 4), (0.04, 0.95)]
# Define function to demean the data using the last 3 parameters
def data_demean_f(x, data):
data_adj = np.zeros_like(data)
xdta = x[-data.shape[1]:]
data_adj[:, 0] = data[:, 0] - xdta[2]
data_adj[:, 1] = data[:, 1] - xdta[1]
data_adj[:, 2] = data[:, 2] - xdta[0] - xdta[1] - 4 * xdta[2]
return data_adj
# Store model information for estimation
jac_info = {'T': T, 'block_list': block_list, 'exogenous': exogenous, 'unknowns': unknowns, 'targets': targets,
'ss': ss}
# Estimate
t = time.time()
result, x_sd, nfev = aux.estimate(outputs, data, x_guess, shock_series, priors, T, params=params, jac_info=jac_info,
bounds=bounds, sd=False, data_demean_f=data_demean_f, tol=1e-12)
x = result.x
elapsed_est0 = time.time() - t
t = time.time()
result, x_sd, nfev = aux.estimate(outputs, data, x_guess, shock_series, priors, T, params=params, jac_info=jac_info,
bounds=bounds, sd=False, data_demean_f=data_demean_f, tol=1e-12)
x = result.x
elapsed_est = time.time() - t
# Show times
print(f'Time for estimation (burn-in): {round(elapsed_est0, 2)}s')
print(f'Time for estimation: {round(elapsed_est, 2)}s')
Compute impulse responses and compare with Dynare.
# Construct dict with shock process
dshock = {}
dshock['epsR'] = ss['sigma_r'] * 0 ** np.arange(T)
dshock['g'] = ss['sigma_g'] * ss['rho_g'] ** np.arange(T)
dshock['z'] = ss['sigma_z'] * ss['rho_z'] ** np.arange(T)
Tshow = 50
# Build IRF of endogenous variables in python
irf = {}
for k in G:
irf[k] = {}
for l in exogenous:
if l in G[k]: irf[k][l] = G[k][l] @ dshock[l]
# Load irf from dynare
res_dynare = loadmat('import_export/dynare/herbst_schorfheide/irf_dynare.mat')
# Compute distance between dynare and python for all variables and all shocks
diff_irf = np.zeros(9)
series_python = [['y', 'g'], ['y', 'epsR'], ['y', 'z'], ['pi', 'g'], ['pi', 'epsR'], ['pi', 'z'], ['R', 'g'],
['R', 'epsR'], ['R', 'z']]
series_dynare = ['y_epsg', 'y_epsR', 'y_epsz', 'pi_epsg', 'pi_epsR', 'pi_epsz', 'R_epsg', 'R_epsR', 'R_epsz']
for i in range(9):
diff_irf[i] = np.sum(
np.abs(irf[series_python[i][0]][series_python[i][1]][:10] - res_dynare[series_dynare[i]][:10].T))
# Load irf of output from python
series_python = [['y', 'z'], ['y', 'epsR'], ['y', 'g']]
irf_python = np.zeros((3, Tshow))
for i in range(3):
irf_python[i] = irf[series_python[i][0]][series_python[i][1]][:Tshow]
# Load irf of output from dynare
series_dynare = ['y_epsz', 'y_epsR', 'y_epsg']
irf_dynare = np.zeros((3, Tshow))
for i in range(3):
irf_dynare[i] = res_dynare[series_dynare[i]][:Tshow].T
# Compute distance between them
irf_dist = np.abs(irf_python - irf_dynare)
# save data
if savedata: np.save('import_export/results/hs', [x, diff_irf, irf_python, irf_dist])
if savedata: np.save('import_export/results/hs_speed', [elapsed_ss, elapsed_G, elapsed_est])
Calibrate steady state and precompute Jacobian.
# Compute steady state
t = time.time()
ss, shocks = sw.calibrate_ss()
elapsed_ss0 = time.time() - t
t = time.time()
ss, shocks = sw.calibrate_ss()
elapsed_ss = time.time() - t
# Compute model jacobian
T = 300
unknowns = ['w', 'wf', 'n', 'nf', 'r', 'rf']
shocks_list = ['eps_a', 'eps_b', 'eps_g', 'eps_I', 'eps_i', 'eps_p', 'eps_w']
targets = ['goods_mkt', 'goods_mkt_f', 'fisher', 'w_res', 'mu_p_f', 'mu_w_f']
blocks = [sw.household, sw.household_f, sw.household_markup, sw.firm, sw.firm_f, sw.nkpc_p, sw.nkpc_w, sw.monetary,
sw.geneq, sw.geneq_f, sw.measurement]
t = time.time()
G = jac.get_G(blocks, shocks_list, unknowns, targets, T, ss)
elapsed_G = time.time() - t
t = time.time()
G = jac.get_G(blocks, shocks_list, unknowns, targets, T, ss)
elapsed_G0 = time.time() - t
# Show times
print(f'\nTime to compute steady state (burn-in): {1000 * round(elapsed_ss0, 2)}ms')
print(f'Time to compute model jacobian (burn-in): {1000 * round(elapsed_G0, 2)}ms')
print(f'\nTime to compute steady state: {1000 * round(elapsed_ss, 2)}ms')
print(f'Time to compute model jacobian: {1000 * round(elapsed_G, 2)}ms')
Estimate shock parameters. Data for the estimation was taken from the replication files on the AER website of Smets and Wouters (2007).
# Load data
data = (pd.read_csv('import_export/data/data_sw.csv', index_col=0, sep=';').values)[74:, :]
# Remove intercept from series
data_adj = np.zeros_like(data)
data_adj[:, 0] = data[:, 0] - 0.509 # y
data_adj[:, 1] = data[:, 1] - 0.509 # I
data_adj[:, 2] = data[:, 2] - 0.509 # c
data_adj[:, 3] = data[:, 3] - 0.635 # pi
data_adj[:, 4] = data[:, 4] - 1.427 # i
data_adj[:, 5] = data[:, 5] - 1.743 # n
data_adj[:, 6] = data[:, 6] - 0.509 # w
# Define option for estimation
outputs = ['y_dta', 'I_dta', 'c_dta', 'pi_dta', 'i_dta', 'n_dta', 'w_dta']
shock_series = [('eps_a', 1), ('eps_b', 1), ('eps_g', 1), ('eps_I', 1), ('eps_i', 1), ('eps_p', 2), ('eps_w', 2)]
x_guess = [0.4, 0.8, 0.4, 0.8, 0.4, 0.8, 0.4, 0.8, 0.4, 0.8, 0.4, 0.8, 0.8, 0.4, 0.8, 0.8]
priors = [('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20), ('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20),
('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20),
('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20), ('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20),
('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20), ('Beta', 0.5, 0.20),
('Invgamma_hs', 0.1, 2), ('Beta', 0.5, 0.20), ('Beta', 0.5, 0.20)]
bounds = [(0.01, 3), (0.01, 0.9999), (0.025, 5), (0.01, 0.9999), (0.01, 3), (0.01, 0.9999), (0.01, 3), (0.01, 0.9999),
(0.01, 3), (0.01, 0.9999), (0.01, 3), (0.01, 0.9999), (0.01, 0.9999), (0.01, 3), (0.01, 0.9999), (0.01, 0.9999)]
# Estimate
t = time.time()
result, x_sd, nfev = aux.estimate(outputs, data_adj, x_guess, shock_series, priors, T, G=G, bounds=bounds)
elapsed_est0 = time.time() - t
t = time.time()
result, x_sd, nfev = aux.estimate(outputs, data_adj, x_guess, shock_series, priors, T, G=G, bounds=bounds)
elapsed_est = time.time() - t
x = result.x
# Show times
print(f'Time for estimation (burn-in): {round(elapsed_est0, 2)}s')
print(f'Time for estimation: {round(elapsed_est, 2)}s')
Compute impulse responses and compare with dynare
# Load irfs from dynare
irf_dynare = loadmat('import_export/dynare/smets_wouters/irf_dynare.mat')
# Define variables
series = ['y', 'c', 'I', 'n', 'pi', 'w', 'i']
series_dynare = [['y_ea', 'y_eb', 'y_eg', 'y_eqs', 'y_em', 'y_epinf', 'y_ew'],
['c_ea', 'c_eb', 'c_eg', 'c_eqs', 'c_em', 'c_epinf', 'c_ew', ],
['inve_ea', 'inve_eb', 'inve_eg', 'inve_eqs', 'inve_em', 'inve_epinf', 'inve_ew'],
['lab_ea', 'lab_eb', 'lab_eg', 'lab_eqs', 'lab_em', 'lab_epinf', 'lab_ew'],
['pinf_ea', 'pinf_eb', 'pinf_eg', 'pinf_eqs', 'pinf_em', 'pinf_epinf', 'pinf_ew'],
['w_ea', 'w_eb', 'w_eg', 'w_eqs', 'w_em', 'w_epinf', 'w_ew'],
['r_ea', 'r_eb', 'r_eg', 'r_eqs', 'r_em', 'r_epinf', 'r_ew']]
# Compute CIR for all series and all shocks
diff_irf = np.zeros((len(series), len(shocks_list)))
for i in range(len(series)):
for j in range(len(shocks_list)):
irf1 = (G[series[i]][shocks_list[j]] @ (
shocks[shocks_list[j]][0] * aux.arma_irf(np.array(shocks[shocks_list[j]][1]),
np.array(shocks[shocks_list[j]][2]), T)))[:50]
irf2 = irf_dynare[series_dynare[i][j]].T
diff_irf[i, j] = np.max(np.abs(irf2 - irf1))
# Compute irf for output
Tshow = 50
irf = np.zeros((len(shocks_list), Tshow))
for k in range(len(shocks_list)):
dshock = shocks[shocks_list[k]][0] * aux.arma_irf(np.array(shocks[shocks_list[k]][1]),
np.array(shocks[shocks_list[k]][2]), T)
irf[k] = (G['y'][shocks_list[k]] @ dshock)[:Tshow]
# Load irfs of output from dynare
res_dynare = loadmat('import_export/dynare/smets_wouters/irf_dynare.mat')
series_dynare = ['y_ea', 'y_eb', 'y_eg', 'y_eqs', 'y_em', 'y_epinf', 'y_ew']
irf_dynare = np.zeros((len(shocks_list), Tshow))
for k in range(len(series_dynare)):
irf_dynare[k] = res_dynare[series_dynare[k]][:Tshow].T
# Compute distance between them
irf_dist = np.abs(irf - irf_dynare)
# save data
if savedata: np.save('import_export/results/sw', [x, diff_irf, irf, irf_dist])
if savedata: np.save('import_export/results/sw_speed', [elapsed_ss, elapsed_G, elapsed_est])
Calibrate steady state and precompute Jacobian.
times = {}
# Steady state
t = time.time()
ss = ks.ks_ss()
elapsed_ss0 = time.time() - t
# Steady state
t = time.time()
ss = ks.ks_ss()
times['ss'] = time.time() - t
# Compute model jacobian G
T = 300
t = time.time()
G = jac.get_G(block_list=[ks.firm, ks.mkt_clearing, ks.household], exogenous=['Z'], unknowns=['K'],
targets=['asset_mkt'], T=T, ss=ss)
elapsed_G0 = time.time() - t
t = time.time()
G = jac.get_G(block_list=[ks.firm, ks.mkt_clearing, ks.household], exogenous=['Z'], unknowns=['K'],
targets=['asset_mkt'], T=T, ss=ss)
times['G'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['G']
print(f'\nTime to compute steady state (burn-in): {round(elapsed_ss0, 2)}s')
print(f'Time to compute model jacobian (burn-in): {round(elapsed_G0, 2)}s')
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time to compute model jacobian: {round(t2, 2)}s')
Bayesian estimation
# Define series to estimate
series = ['y']
data = aux.get_normalized_data(ss, 'import_export/data/data_bayes.csv', series)
# Define option for estimation
shock_series = [('Z', 2)] # specifies shock to hit economy and whether it's an AR-1 or ARMA 1-1
x_guess = [0.4, 0.5, 0.4]
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2), ('Beta', 0.5, 0.2)]
bounds = [(1e-2, 4), (1e-2, 0.98), (1e-2, 0.98)]
outputs = ['Y']
# Estimate
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
elapsed_est0 = time.time() - t
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times['est'] = time.time() - t
# Show times
t1 = times['est']
print(f'Time for estimation (burn-in): {round(t1, 2)}s')
print(f'Time for estimation: {round(t1, 2)}s')
Simulation at the posterior mode
T_simul = 100_000
t = time.time()
sim = aux.simulate(G, ['Z'], outputs, {'Z': x[0]}, {'Z': x[1]}, T, T_simul) # Note, just simulate AR(1) for now
times_sim0 = time.time() - t
t = time.time()
sim = aux.simulate(G, ['Z'], outputs, {'Z': x[0]}, {'Z': x[1]}, T, T_simul) # Note, just simulate AR(1) for now
times['sim'] = time.time() - t
# Show times
t1 = times['sim']
print(f'Time for simulation (burn-in): {round(1000 * times_sim0, 3)}ms')
print(f'Time for simulation: {round(1000 * t1, 3)}ms')
Get run time for likelihood evaluation
T_irf = T - 20
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once (burn-in)
As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 2.5 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times_mh0 = time.time() - t
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0], para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:, i] = np.mean(para_sim[:, :i + 1], 1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0], kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0], kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm (burn-in): {round(times_mh0, 2)}s')
print(f'Time for Metropolis-Hastings algorithm: {round(t1, 2)}s')
print(f'Acceptance rate: {round(acceptancerate, 2)}')
# save data
if savedata: np.save('import_export/results/ks', [ss, G])
if savedata: np.save('import_export/results/ks_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density, para_sim])
if savedata: np.save('import_export/results/ks_speed', [times])
Calibrate steady state and precompute Jacobian.
times = {}
# Steady state burn-in
t = time.time()
ss = ks.ks_ss(nS=50, nA=5000)
elapsed_ss0 = time.time() - t
# Steady state
t = time.time()
ss = ks.ks_ss(nS=50, nA=5000)
times['ss'] = time.time() - t
# Compute model jacobian G
T = 300
t = time.time()
G = jac.get_G(block_list=[ks.firm, ks.mkt_clearing, ks.household], exogenous=['Z'], unknowns=['K'],
targets=['asset_mkt'], T=T, ss=ss)
elapsed_G0 = time.time() - t
t = time.time()
G = jac.get_G(block_list=[ks.firm, ks.mkt_clearing, ks.household], exogenous=['Z'], unknowns=['K'],
targets=['asset_mkt'], T=T, ss=ss)
times['G'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['G']
print(f'\nTime to compute steady state (burn-in): {round(elapsed_ss0, 2)}s')
print(f'Time to compute model jacobian (burn-in): {round(elapsed_G0, 2)}s')
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time to compute model jacobian: {round(t2, 2)}s')
Bayesian estimation
# Define series to estimate
series = ['y']
data = aux.get_normalized_data(ss, 'import_export/data/data_bayes.csv', series)
# Define option for estimation
shock_series = [('Z', 2)] # specifies shock to hit economy and whether it's an AR-1 or ARMA 1-1
x_guess = [0.4, 0.5, 0.4]
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2), ('Beta', 0.5, 0.2)]
bounds = [(1e-2, 4), (1e-2, 0.98), (1e-2, 0.98)]
outputs = ['Y']
# Estimate
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
elapsed_est0 = time.time() - t
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times['est'] = time.time() - t
# Show times
t1 = times['est']
print(f'Time for estimation (burn-in): {round(elapsed_est0, 2)}s')
print(f'Time for estimation: {round(t1, 2)}s')
Simulation at the posterior mode
T_simul = 100_000
t = time.time()
sim = aux.simulate(G, ['Z'], outputs, {'Z': x[0]}, {'Z': x[1]}, T, T_simul) # note, just simulate AR(1) for now
elapsed_sim0 = time.time() - t
t = time.time()
sim = aux.simulate(G, ['Z'], outputs, {'Z': x[0]}, {'Z': x[1]}, T, T_simul) # note, just simulate AR(1) for now
times['sim'] = time.time() - t
# Show times
t1 = times['sim']
print(f'Time for simulation: {round(1000 * elapsed_sim0, 3)}ms')
print(f'Time for simulation (burn-in): {round(1000 * t1, 3)}ms')
Get run time for likelihood evaluation
T_irf = T - 20
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once (burn-in)
As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 2.5 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds, Nsim, Nburn, c)
elapsed_mh0 = time.time() - t
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds, Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0],para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:,i] = np.mean(para_sim[:,:i+1],1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0],kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0],kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm: {round(elapsed_mh0,2)}s')
print(f'Time for Metropolis-Hastings algorithm: {round(t1,2)}s')
print(f'Acceptance rate: {round(acceptancerate,2)}')
# only need to save speed data
if savedata: np.save('import_export/results/hd_ks', [ss, G])
if savedata: np.save('import_export/results/hd_ks_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density, para_sim])
if savedata: np.save('import_export/results/hd_ks_speed', [times])
Calibrate steady state and precompute Jacobian.
times = {}
# Steady state burn-in
t = time.time()
ss = hank_1a.hank_ss()
elapsed_ss0 = time.time() - t
# Steady state
t = time.time()
ss = hank_1a.hank_ss()
times['ss'] = time.time() - t
# setup
T = 300
exogenous = ['G', 'markup', 'rstar']
unknowns = ['pi', 'w', 'Y']
targets = ['nkpc_res', 'asset_mkt', 'labor_mkt']
# Compute model jacobian G
block_list = [hank_1a.household_trans, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc, hank_1a.mkt_clearing]
t = time.time()
G = jac.get_G(block_list, exogenous, unknowns, targets, T, ss, save=True)
elapsed_G0 = time.time() - t
t = time.time()
G = jac.get_G(block_list, exogenous, unknowns, targets, T, ss, save=True)
times['G'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['G']
print(f'\nTime to compute steady state (burn-in): {round(elapsed_ss0, 2)}s')
print(f'Time to compute model jacobian (burn-in): {round(elapsed_G0, 2)}s')
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time to compute model jacobian: {round(t2, 2)}s')
Bayesian estimation
# Define series to estimate
series = ['y', 'pi', 'i']
data = aux.get_normalized_data(ss,'import_export/data/data_bayes.csv', series)
# Define option for estimation
shock_series = [('rstar', 1), ('G', 1), ('markup', 1)]
x_guess = [0.4, 0.5] * len(shock_series)
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2)] * len(shock_series)
bounds = [(0.01, 4), (0.01, 0.98)] * len(shock_series)
outputs = ['Y', 'pi', 'i']
# Estimate
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times_est0 = time.time() - t
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times['est'] = time.time() - t
# Show times
t1 = times['est']
print(f'Time for estimation (burn-in): {round(t1,2)}s')
print(f'Time for estimation: {round(t1,2)}s')
Simulation at the posterior mode
T_simul = 100_000
t = time.time()
sim = aux.simulate(G, ['rstar', 'G', 'markup'], outputs,
{'rstar': x[0], 'G': x[2], 'markup': x[4]},
{'rstar': x[1], 'G': x[3], 'markup': x[5]}, T, T_simul) # note, just simulate AR(1) for now
elapsed_sim0 = time.time() - t
t = time.time()
sim = aux.simulate(G, ['rstar', 'G', 'markup'], outputs,
{'rstar': x[0], 'G': x[2], 'markup': x[4]},
{'rstar': x[1], 'G': x[3], 'markup': x[5]}, T, T_simul) # note, just simulate AR(1) for now
times['sim'] = time.time() - t
# Show times
t1 = times['sim']
print(f'Time for simulation (burn-in): {round(1000 * elapsed_sim0, 3)}ms')
print(f'Time for simulation: {round(1000 * t1, 3)}ms')
Get run time for likelihood evaluation
T_irf = T - 20
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once (burn-in)
As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 1.1 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
elapsed_mh0 = time.time() - t
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0], para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:, i] = np.mean(para_sim[:, :i + 1], 1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0], kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0], kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm (burn-in): {round(elapsed_mh0, 2)}s')
print(f'Time for Metropolis-Hastings algorithm: {round(t1, 2)}s')
print(f'Acceptance rate: {round(acceptancerate, 2)}')
# save data
if savedata: np.save('import_export/results/hank_1a_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density, para_sim])
if savedata: np.save('import_export/results/hank_1a_speed', [times])
Bayesian estimation
times = {}
# Steady state
t = time.time()
ss = hank_1a.hank_ss()
times['ss'] = time.time() - t
# setup
T = 300
exogenous = ['G', 'markup', 'rstar']
unknowns = ['pi', 'w', 'Y']
targets = ['nkpc_res', 'asset_mkt', 'labor_mkt']
# Define series to estimate
series = ['y', 'pi', 'i']
outputs = ['Y', 'pi', 'i']
data = aux.get_normalized_data(ss, 'import_export/data/data_bayes.csv', series)
# Parameters of estimation
shock_series = [('rstar', 1), ('G', 1), ('markup', 1)]
params = ['phi', 'phi_y', 'kappa']
x_guess = [0.4, 0.5] * len(shock_series) + [ss[k] for k in params]
bounds = [(0.01, 4), (0.01, 0.98)] * len(shock_series) + [(1, 3), (1e-3, 1), (1e-4, 0.5)]
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2)] * len(shock_series) + [('Gamma', 1.5, 0.25), ('Gamma', 0.5, 0.25),
('Gamma', 0.1, 0.05)]
# Compute jacobian of household block
J_ha = jac.get_G([hank_1a.household_trans], ['r', 'w', 'Div', 'Tax'], [], [], T, ss, save=True)
jac_info = {'T': T, 'J_ha': J_ha,
'block_list': [J_ha, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc, hank_1a.mkt_clearing],
'exogenous': exogenous, 'unknowns': unknowns, 'targets': targets, 'ss': ss}
# Estimate
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, params=params,
jac_info=jac_info, bounds=bounds, sd=False)
x = result.x
times['est'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['est']
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time for estimation: {round(t2, 2)}s')
Get run time for likelihood evaluation
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once
As = aux.step1_estfull(x,shock_series,outputs,T,params,jac_info)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_estfull(x,shock_series,outputs,T,params,jac_info)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 0.65 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
n_params = len(params)
x_params = x[-n_params:]
# write new parameters into ss
ss_here = jac_info['ss'].copy()
ss_here.update({param: x_params[j] for j, param in enumerate(params)})
# Compute model jacobian G
G = jac.get_G(jac_info['block_list'], exogenous=jac_info['exogenous'], unknowns=jac_info['unknowns'],
targets=jac_info['targets'], T=jac_info['T'], ss=ss_here, outputs=outputs)
# return likelihood
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0], para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:, i] = np.mean(para_sim[:, :i + 1], 1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0], kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0], kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm: {round(t1, 2)}s')
print(f'Acceptance rate: {round(acceptancerate, 2)}')
# save data
if savedata: np.save('import_export/results/hank_1afull_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density,
para_sim])
if savedata: np.save('import_export/results/hank_1afull_speed', [times])
Calibrate steady state and precompute Jacobian.
times = {}
# Steady state burn-in
t = time.time()
ss = hank_2a.hank_ss(noisy=False)
elapsed_ss0 = time.time() - t
# Steady state
t = time.time()
ss = hank_2a.hank_ss(noisy=False)
times['ss'] = time.time() - t
# Compute model jacobian
T = 300
exogenous = ['rstar', 'markup', 'G', 'beta', 'Z', 'rinv_shock', 'markup_w']
unknowns = ['r', 'w', 'Y']
targets = ['asset_mkt', 'fisher', 'wnkpc']
outputs = ['Y', 'pi', 'i', 'C', 'N', 'I', 'w']
t = time.time()
pricing = solved(block_list=[hank_2a.pricing], unknowns=['pi'], targets=['nkpc'])
arbitrage = solved(block_list=[hank_2a.arbitrage], unknowns=['p'], targets=['equity'])
production = solved(block_list=[hank_2a.labor, hank_2a.investment], unknowns=['Q', 'K'], targets=['inv', 'val'])
block_list = [hank_2a.household_inc, pricing, arbitrage, production, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal,
hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
G = jac.get_G(block_list, exogenous, unknowns, targets, T=T, ss=ss, outputs=outputs)
elapsed_G0 = time.time() - t
t = time.time()
pricing = solved(block_list=[hank_2a.pricing], unknowns=['pi'], targets=['nkpc'])
arbitrage = solved(block_list=[hank_2a.arbitrage], unknowns=['p'], targets=['equity'])
production = solved(block_list=[hank_2a.labor, hank_2a.investment], unknowns=['Q', 'K'], targets=['inv', 'val'])
block_list = [hank_2a.household_inc, pricing, arbitrage, production, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal,
hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
G = jac.get_G(block_list, exogenous, unknowns, targets, T=T, ss=ss, outputs=outputs)
times['G'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['G']
print(f'\nTime to compute steady state (burn-in): {round(elapsed_ss0, 2)}s')
print(f'Time to compute model jacobian (burn-in): {round(elapsed_G0, 2)}s')
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time to compute model jacobian: {round(t2, 2)}s')
Nonlinear impulse response
rho_r = 0.6
drstar = -0.0125 * rho_r ** np.arange(T) # to get pp change in rate, multiply by 4 since model is quarterly
td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, rstar=ss['r'] + drstar, noisy=False)['C'] - ss['C']
td_linear = G['C']['rstar'] @ drstar
Transition to new steady state
# Compute new steady state
ssnew = hank_2a.hank_ss_Z(1.01, ss)
# Compute transition
td_newss = nonlinear.td_solve(ssnew, block_list, unknowns, targets, ss_initial=ss, Z=np.full(500, ssnew['Z']), noisy=False)
Bayesian estimation
# Define data
series = ['y', 'pi', 'i', 'c', 'n', 'I', 'w']
data = aux.get_normalized_data(ss, 'import_export/data/data_bayes.csv', series)
# Parameters of estimation
shock_series = [('Z', 1), ('rstar', 1), ('G', 1), ('beta', 1), ('rinv_shock', 1), ('markup', 1),
('markup_w', 1)] # specifies shock to hit economy and whether it's an AR-1 or AR-2
x_guess = [0.4, 0.6] * len(shock_series)
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2)] * len(shock_series)
bounds = [(1e-2, 10), (1e-2, 0.98)] * len(shock_series)
# Estimation
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times_est0 = time.time() - t
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, G=G, bounds=bounds)
x = result.x
times['est'] = time.time() - t
# Show times
t1 = times['est']
print(f'Time for estimation (burn-in): {round(times_est0, 2)}s')
print(f'Time for estimation: {round(t1, 2)}s')
Simulation at the posterior mode
T_simul = 100_000
t = time.time()
sim = aux.simulate(G, ['Z', 'rstar', 'G', 'beta', 'rinv_shock', 'markup', 'markup_w'], outputs,
{'Z': x[0], 'rstar': x[2], 'G': x[4], 'beta': x[6], 'rinv_shock': x[8], 'markup': x[10],
'markup_w': x[12]},
{'Z': x[1], 'rstar': x[3], 'G': x[5], 'beta': x[7], 'rinv_shock': x[9], 'markup': x[11],
'markup_w': x[13]}, T, T_simul) # note, just simulate AR(1) for now
times['sim'] = time.time() - t
# Show times
t1 = times['sim']
print(f'Time for simulation: {round(1000 * t1, 3)}ms')
Get run time for likelihood evaluation
T_irf = T - 20
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once
As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_est(G, arcoefs, macoefs, shock_series, outputs, T, T_irf, n_se, n_sh)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 0.4 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0], para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:, i] = np.mean(para_sim[:, :i + 1], 1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0], kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0], kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm: {round(t1, 2)}s')
print(f'Acceptance rate: {round(acceptancerate, 2)}')
# save data
if savedata: np.save('import_export/results/hank_2a_transition', [td_nonlinear, td_linear, td_newss, ss])
if savedata: np.save('import_export/results/hank_2a_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density,
para_sim])
if savedata: np.save('import_export/results/hank_2a_speed', [times])
Bayesian estimation
times = {}
# Steady state
t = time.time()
ss = hank_2a.hank_ss(noisy=False)
times['ss'] = time.time() - t
# Define data
series = ['y', 'pi', 'i', 'c', 'n', 'I', 'w']
outputs = ['Y', 'pi', 'i', 'C', 'N', 'I', 'w']
data = aux.get_normalized_data(ss, 'import_export/data/data_bayes.csv', series)
# Options for estimation
params = ['phi', 'phi_y', 'kappap', 'kappaw', 'epsI']
shock_series = [('Z', 1), ('rstar', 1), ('G', 1), ('beta', 1), ('rinv_shock', 1), ('markup', 1),
('markup_w', 1)] # specifies shock to hit economy and whether it's an AR-1 or AR-2
priors = [('Invgamma', 0.4, 4), ('Beta', 0.5, 0.2)] * len(shock_series) + [('Gamma', 1.5, 0.25), ('Gamma', 0.5, 0.25),
('Gamma', 0.1, 0.05), ('Gamma', 0.1, 0.05),
('Gamma', 4, 2)]
bounds = [(1e-2, 10), (1e-2, 0.99)] * len(shock_series) + [(1, 3), (1e-3, 4), (1e-4, 0.7), (1e-4, 0.7), (1e-3, 5)]
x_guess = [0.4, 0.6] * len(shock_series) + [ss[k] for k in params]
T = 300
# Compute jacobian of household block
J_ha = jac.get_G([hank_2a.household_inc], ['ra', 'rb', 'beta', 'w', 'N', 'tax'], [], [], T, ss)
# Store GE information
unknowns = ['r', 'w', 'Y']
targets = ['asset_mkt', 'fisher', 'wnkpc']
exogenous = ['rstar', 'markup', 'G', 'beta', 'Z', 'rinv_shock', 'markup_w']
pricing = solved(block_list=[hank_2a.pricing], unknowns=['pi'], targets=['nkpc'])
arbitrage = solved(block_list=[hank_2a.arbitrage], unknowns=['p'], targets=['equity'])
production = solved(block_list=[hank_2a.labor, hank_2a.investment], unknowns=['Q', 'K'], targets=['inv', 'val'])
block_list = [J_ha, pricing, arbitrage, production, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal, hank_2a.finance,
hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
jac_info = {'T': T, 'block_list': block_list, 'exogenous': exogenous, 'unknowns': unknowns, 'targets': targets, 'ss': ss}
# Estimate
t = time.time()
result, x_sd, times['nfev'] = aux.estimate(outputs, data, x_guess, shock_series, priors, T, params=params,
jac_info=jac_info, bounds=bounds, sd=False)
x = result.x
times['est'] = time.time() - t
# Show times
t1, t2 = times['ss'], times['est']
print(f'\nTime to compute steady state: {round(t1, 2)}s')
print(f'Time for estimation: {round(t2, 2)}s')
Get run time for likelihood evaluation
n_se,n_sh = len(outputs), len(shock_series)
meas_error = np.zeros(n_se)
sigmas, arcoefs, macoefs = aux.get_shocks_arma(x, shock_series)
# run them all once
As = aux.step1_estfull(x,shock_series,outputs,T,params,jac_info)
Sigma = ssj_est.all_covariances(As, sigmas)
llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
# Compute run time
res = %timeit -o -q As = aux.step1_estfull(x,shock_series,outputs,T,params,jac_info)
times['lk_step1'] = np.mean(res.timings)
res = %timeit -o -q Sigma = ssj_est.all_covariances(As, sigmas)
times['lk_step2'] = np.mean(res.timings)
res = %timeit -o -q llh = aux.step3_est(Sigma, data, sigma_o=meas_error)
times['lk_step3'] = np.mean(res.timings)
times['lk'] = times['lk_step1'] + times['lk_step2'] + times['lk_step3']
Metropolis Hastings
# Parameters
c = 0.10 # scale for the variance
x0 = x.copy()
# Define function that returns log-likelihood + log-prior
def lik_f(x):
n_params = len(params)
x_params = x[-n_params:]
# write new parameters into ss
ss_here = jac_info['ss'].copy()
ss_here.update({param: x_params[j] for j, param in enumerate(params)})
# Compute model jacobian G
G = jac.get_G(jac_info['block_list'], exogenous=jac_info['exogenous'], unknowns=jac_info['unknowns'],
targets=jac_info['targets'], T=jac_info['T'], ss=ss_here, outputs=outputs)
# return likelihood
return aux.loglik_f(x, data, outputs, shock_series, priors, T, G)
# Get mode and variance covariance matrix of the log likelihood at the mode
mode = x # mode of the parameters
Sigma = - np.linalg.inv(aux.hessian(lik_f, mode)[0]) # inverse variance-covariance matrix
# MH algorithm
t = time.time()
para_sim, para_avg, para_5, para_95, logposterior, acceptancerate = aux.metropolis_hastings(lik_f, mode, Sigma, bounds,
Nsim, Nburn, c)
times['mh'] = time.time() - t
times['c'], times['rate'] = c, acceptancerate
# Compute recursive mean
rmean = np.zeros((para_avg.shape[0], para_sim.shape[1]))
for i in range(para_sim.shape[1]):
rmean[:, i] = np.mean(para_sim[:, :i + 1], 1)
# Compute kernel density
kde = sm.nonparametric.KDEUnivariate(para_sim[0])
kde.fit()
kernel_support = np.zeros((para_avg.shape[0], kde.support.shape[0]))
kernel_density = np.zeros((para_avg.shape[0], kde.support.shape[0]))
for i in range(para_avg.shape[0]):
kde = sm.nonparametric.KDEUnivariate(para_sim[i])
kde.fit()
kernel_support[i] = kde.support
kernel_density[i] = kde.density
# Show times
t1 = times['mh']
print(f'Time for Metropolis-Hastings algorithm: {round(t1, 2)}s')
print(f'Acceptance rate: {round(acceptancerate, 2)}')
# save data
if savedata: np.save('import_export/results/hank_2afull_est',
[x, para_avg, para_5, para_95, Nsim, acceptancerate, rmean, kernel_support, kernel_density,
para_sim])
if savedata: np.save('import_export/results/hank_2afull_speed', [times])
# parameters
T_list = np.array([100, 125, 150, 175, 200, 300, 400, 500, 1000])
Ttrunc = 100
sigma = 0.01
# Compute stead states
ss_ks = ks.ks_ss()
ss_ha = hank_1a.hank_ss()
ss_ha2 = hank_2a.hank_ss(noisy=False)
def truncation_f(rho):
# Compute model for different grid sizes
irf_list_ks = np.zeros((Ttrunc, len(T_list)))
irf_list_hank = np.zeros((Ttrunc, len(T_list)))
irf_list_hank2 = np.zeros((Ttrunc, len(T_list)))
for i in range(len(T_list)):
# Krusell Smith
G = jac.get_G(block_list=[ks.firm, ks.mkt_clearing, ks.household], exogenous=['Z'], unknowns=['K'],
targets=['asset_mkt'], T=T_list[i], ss=ss_ks)
dZ = ss_ks['Z'] * sigma * rho ** np.arange(T_list[i])
irf_list_ks[:, i] = (G['Y']['Z'] @ dZ / ss_ks['Y'])[:Ttrunc]
# one asset hank
G = jac.get_G([hank_1a.household_trans, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc,
hank_1a.mkt_clearing],
exogenous=['Z'], unknowns=['pi', 'w', 'Y'], targets=['nkpc_res', 'asset_mkt', 'labor_mkt'],
T=T_list[i], ss=ss_ha)
dZ = ss_ha['Z'] * sigma * rho ** np.arange(T_list[i])
irf_list_hank[:, i] = (G['Y']['Z'] @ dZ / ss_ha['Y'])[:Ttrunc]
# two asset hank
pricing = solved(block_list=[hank_2a.pricing], unknowns=['pi'], targets=['nkpc'])
arbitrage = solved(block_list=[hank_2a.arbitrage], unknowns=['p'], targets=['equity'])
production = solved(block_list=[hank_2a.labor, hank_2a.investment], unknowns=['Q', 'K'], targets=['inv', 'val'])
block_list = [hank_2a.household_inc, pricing, arbitrage, production, hank_2a.dividend, hank_2a.taylor,
hank_2a.fiscal, hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
G = jac.get_G(block_list, exogenous=['rstar', 'markup', 'G', 'beta', 'Z', 'rinv_shock', 'markup_w'],
unknowns=['r', 'w', 'Y'], targets=['asset_mkt', 'fisher', 'wnkpc'], T=T_list[i], ss=ss_ha2,
outputs=['Y'])
dZ = ss_ha2['Z'] * sigma * rho ** np.arange(T_list[i])
irf_list_hank2[:, i] = (G['Y']['Z'] @ dZ / ss_ha2['Y'])[:Ttrunc]
# Compute mean squared error
diff_ks = irf_list_ks - np.tile(irf_list_ks[:, -1], (len(T_list), 1)).T
diff_hank = irf_list_hank - np.tile(irf_list_hank[:, -1], (len(T_list), 1)).T
diff_hank2 = irf_list_hank2 - np.tile(irf_list_hank2[:, -1], (len(T_list), 1)).T
rmse_ks = 1 / sigma * np.sqrt(np.sum(diff_ks ** 2, axis=0) / diff_ks.shape[0]) # Report in percent
rmse_hank = 1 / sigma * np.sqrt(np.sum(diff_hank ** 2, axis=0) / diff_hank.shape[0]) # Report in percent
rmse_hank2 = 1 / sigma * np.sqrt(np.sum(diff_hank2 ** 2, axis=0) / diff_hank2.shape[0]) # Report in percent
return rmse_ks, rmse_hank, rmse_hank2
# Compute error for rho = 0.9 and rho = 0.90
rmse_ks_rho90, rmse_hank_rho90, rmse_hank2_rho90 = truncation_f(0.9)
rmse_ks_rho99, rmse_hank_rho99, rmse_hank2_rho99 = truncation_f(0.99)
# save data
if savedata: np.save('import_export/results/truncation',
[T_list, rmse_ks_rho90, rmse_hank_rho90, rmse_hank2_rho90, rmse_ks_rho99, rmse_hank_rho99,
rmse_hank2_rho99])
# Steady state
ss_ks = ks.ks_ss()
ss_ha = hank_1a.hank_ss()
ss_ha2 = hank_2a.hank_ss(noisy=False)
# Parameters for irf
cols = np.array([5, 25, 50, 100])
T = 150
# Compute jacobian with fake news algorithm
J_ks = jac.get_G([ks.household], ['r', 'w'], [], [], T, ss_ks)
J_ha = jac.get_G([hank_1a.household_trans], ['r', 'w', 'Div', 'Tax'], [], [], T, ss_ha)
J_ha2 = jac.get_G([hank_2a.household_inc], ['ra', 'rb', 'beta', 'w', 'N', 'tax'], [], [], T, ss_ha2)
# Compute jacobian directly
J_ks_direct = aux_jac.get_J_direct(ks.household, ['r', 'w'], ['C'], ss_ks, T, cols)
J_ha_direct = aux_jac.get_J_direct(hank_1a.household_trans, ['r', 'w', 'Div', 'Tax'], ['C'], ss_ha, T, cols)
J_ha2_direct = aux_jac.get_J_direct(hank_2a.household_inc, ['ra', 'rb', 'beta', 'w', 'N', 'tax'], ['C'], ss_ha2, T, cols)
# save data
if savedata: np.save('import_export/results/fake_news',
[cols, J_ks, J_ks_direct, J_ha, J_ha_direct, J_ha2, J_ha2_direct])
times = {'direct': {0: {}, 1: {}, 2: {}}, 'fake': {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}}
Krusell-Smith
$n_x=2, n_y=2, n_g=3500.$
# Steady state
ss_ks = ks.ks_ss()
T = 300
## Fake-news algorithm
# Call all the steps once
shock_dict = {'r': {'r': 1}, 'w': {'w': 1}}
ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_pol_pi, a_space = aux_jac.step_fake_0(ks.backward_iterate, ss_ks)
curlyYs, curlyDs = aux_jac.step_fake_1(ks.backward_iterate, {'r': {'r': 1}, 'w': {'w': 1}}, ss_ks, ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_space, T)
curlyPs = aux_jac.step_fake_2(ss_ks, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
# Step 1
res = %timeit -o -q curlyYs, curlyDs = aux_jac.step_fake_1(ks.backward_iterate, {'r': {'r': 1}, 'w': {'w': 1}}, ss_ks, ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_space, T)
times['fake'][1]['ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q curlyPs = aux_jac.step_fake_2(ss_ks, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
times['fake'][2]['ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
times['fake'][3]['ks'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
times['fake'][4]['ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['fake'][i]['ks'])
times['fake'][0]['ks'] = avg
## Direct method
# Call all the steps once
Va_path, a_path, c_path, D_path = aux_jac.ks_backward(ss_ks, ks.backward_iterate, w=np.full(T, ss_ks['w']) + 1E-4*(np.arange(T) == 3))
col1 = aux_jac.ks_forward(ss_ks, T, Va_path, a_path, c_path, D_path)
# Step 1
res = %timeit -o -q Va_path, a_path, c_path, D_path = aux_jac.ks_backward(ss_ks, ks.backward_iterate, w=np.full(T, ss_ks['w']) + 1E-4*(np.arange(T) == 3))
times['direct'][1]['ks'] = np.mean(res.timings) * 600
# multiply by 600 because there are 600 jacobians to compute (2 inputs x 300 periods)
# Step 2
res = %timeit -o -q col1 = aux_jac.ks_forward(ss_ks, T, Va_path, a_path, c_path, D_path)
times['direct'][2]['ks'] = np.mean(res.timings) * 600
# multiply by 600 because there are 600 jacobians to compute (2 inputs x 300 periods)
# Compute total
avg = 0
for i in range(1,3): avg += np.mean(times['direct'][i]['ks'])
times['direct'][0]['ks'] = avg
High-dimensional Krusell-Smith
$n_x=2, n_y=2, n_g=250000.$
# Steady state
ss_ks = ks.ks_ss(nS=50, nA=5000)
T = 300
## Fake-news algorithm
# Call all the steps once
shock_dict = {'r': {'r': 1}, 'w': {'w': 1}}
ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_pol_pi, a_space = aux_jac.step_fake_0(ks.backward_iterate, ss_ks)
curlyYs, curlyDs = aux_jac.step_fake_1(ks.backward_iterate, {'r': {'r': 1}, 'w': {'w': 1}}, ss_ks, ssinput_dict, ssy_list,
outcome_list, V_name, a_pol_i, a_space, T)
curlyPs = aux_jac.step_fake_2(ss_ks, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
# Step 1
res = %timeit -o -q curlyYs, curlyDs = aux_jac.step_fake_1(ks.backward_iterate, {'r': {'r': 1}, 'w': {'w': 1}}, ss_ks, ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_space, T)
times['fake'][1]['hd_ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q curlyPs = aux_jac.step_fake_2(ss_ks, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
times['fake'][2]['hd_ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
times['fake'][3]['hd_ks'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
times['fake'][4]['hd_ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['fake'][i]['hd_ks'])
times['fake'][0]['hd_ks'] = avg
## Direct method
# Call all the steps once
Va_path, a_path, c_path, D_path = aux_jac.ks_backward(ss_ks, ks.backward_iterate, w=np.full(T, ss_ks['w']) + 1E-4*(np.arange(T) == 3))
col1 = aux_jac.ks_forward(ss_ks, T, Va_path, a_path, c_path, D_path)
# Step 1
res = %timeit -o -q Va_path, a_path, c_path, D_path = aux_jac.ks_backward(ss_ks, ks.backward_iterate, w=np.full(T, ss_ks['w']) + 1E-4*(np.arange(T) == 3))
times['direct'][1]['hd_ks'] = np.mean(res.timings) * 600
# multiply by 600 because there are 600 jacobians to compute (2 inputs x 300 periods)
# Step 2
res = %timeit -o -q col1 = aux_jac.ks_forward(ss_ks, T, Va_path, a_path, c_path, D_path)
times['direct'][2]['hd_ks'] = np.mean(res.timings) * 600
# multiply by 600 because there are 600 jacobians to compute (2 inputs x 300 periods)
# Compute total
avg = 0
for i in range(1,3): avg += np.mean(times['direct'][i]['hd_ks'])
times['direct'][0]['hd_ks'] = avg
One-asset hank
$n_x=4, n_y=4, n_g=3500.$
# Steady state
ss_ha = hank_1a.hank_ss()
T = 300
## Fake-news algorithm
# Call all the steps once
T_div = ss_ha['div_rule'] / np.sum(ss_ha['pi_e'] * ss_ha['div_rule'])
T_tax = -ss_ha['tax_rule'] / np.sum(ss_ha['pi_e'] * ss_ha['tax_rule'])
shock_dict = {'r': {'r': 1}, 'w': {'w': 1}, 'Div': {'T': T_div}, 'Tax': {'T': T_tax}}
ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_pol_pi, a_space = aux_jac.step_fake_0(hank_1a.backward_iterate, ss_ha)
curlyYs, curlyDs = aux_jac.step_fake_1(hank_1a.backward_iterate, shock_dict, ss_ha, ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_space, T)
curlyPs = aux_jac.step_fake_2(ss_ha, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
# Step 1
res = %timeit -o -q curlyYs, curlyDs = aux_jac.step_fake_1(hank_1a.backward_iterate, {'r': {'r': 1}, 'w': {'w': 1}}, ss_ha, ssinput_dict, ssy_list, outcome_list, V_name, a_pol_i, a_space, T)
times['fake'][1]['ha'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q curlyPs = aux_jac.step_fake_2(ss_ha, outcome_list, ssy_list, a_pol_i, a_pol_pi, T)
times['fake'][2]['ha'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q F = aux_jac.step_fake_3(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
times['fake'][3]['ha'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q J = aux_jac.step_fake_4(F, shock_dict, outcome_list)
times['fake'][4]['ha'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['fake'][i]['ha'])
times['fake'][0]['ha'] = avg
## Direct method
# Call all the steps once
Va_path, a_path, c_path, n_path, ns_path, D_path = aux_jac.ha_backward(ss_ha, hank_1a.backward_iterate, w=np.full(T, ss_ha['w']) + 1E-4*(np.arange(T)==3))
col1 = aux_jac.ha_forward(ss_ha, T, Va_path, a_path, c_path, n_path, ns_path, D_path)
# Step 1
res = %timeit -o -q Va_path, a_path, c_path, n_path, ns_path, D_path = aux_jac.ha_backward(ss_ha, hank_1a.backward_iterate, w=np.full(T, ss_ha['w']) + 1E-4*(np.arange(T)==3))
times['direct'][1]['ha'] = np.mean(res.timings) * 1200
# multiply by 1200 because there are 1200 jacobians to compute (4 inputs x 300 periods)
# Step 2
res = %timeit -o -q col1 = aux_jac.ha_forward(ss_ha, T, Va_path, a_path, c_path, n_path, ns_path, D_path)
times['direct'][2]['ha'] = np.mean(res.timings) * 1200
# multiply by 1200 because there are 1200 jacobians to compute (4 inputs x 300 periods)
# Compute total
avg = 0
for i in range(1,3): avg += np.mean(times['direct'][i]['ha'])
times['direct'][0]['ha'] = avg
Two-asset hank
$n_x=5, n_y=4, n_g=10500$
# Compute stead states
ss_ha2 = hank_2a.hank_ss(noisy=False)
T = 300
## Fake-news algorithm
# Call all the steps once
shock_dict = {'ra': {'ra': 1},
'rb': {'rb': 1},
'tax': {'z_grid': -ss_ha2['w']*ss_ha2['e_grid']},
'w': {'z_grid': (1-ss_ha2['tax'])*ss_ha2['e_grid']},
'N': {'z_grid': (1-ss_ha2['tax'])*ss_ha2['w']*ss_ha2['e_grid']}}
ssinput_dict, ssy_list, outcome_list, V_list, a_i, a_pi, b_i, b_pi, a_space, b_space = aux_jac.step_fake_0_2d(hank_2a.backward_iterate, ss_ha2)
curlyYs, curlyDs = aux_jac.step_fake_1_2d(hank_2a.backward_iterate, shock_dict, ss_ha2, ssinput_dict, ssy_list, outcome_list, V_list, a_i, a_pi, b_i, b_pi, a_space, b_space, T)
curlyPs = aux_jac.step_fake_2_2d(ss_ha2, outcome_list, ssy_list, a_i, b_i, a_pi, b_pi, T)
F = aux_jac.step_fake_3_2d(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
J = aux_jac.step_fake_4_2d(F, shock_dict, outcome_list)
# Step 1
res = %timeit -o -q curlyYs, curlyDs = aux_jac.step_fake_1_2d(hank_2a.backward_iterate, shock_dict, ss_ha2, ssinput_dict, ssy_list, outcome_list, V_list, a_i, a_pi, b_i, b_pi, a_space, b_space, T)
times['fake'][1]['ha2'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q curlyPs = aux_jac.step_fake_2_2d(ss_ha2, outcome_list, ssy_list, a_i, b_i, a_pi, b_pi, T)
times['fake'][2]['ha2'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q F = aux_jac.step_fake_3_2d(shock_dict, outcome_list, curlyYs, curlyDs, curlyPs)
times['fake'][3]['ha2'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q J = aux_jac.step_fake_4_2d(F, shock_dict, outcome_list)
times['fake'][4]['ha2'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['fake'][i]['ha2'])
times['fake'][0]['ha2'] = avg
## Direct method
# Call all the steps once
Va_t, Vb_t, a_t, b_t, c_t, m_t, D_treated, D_control = aux_jac.ha2_backward(ss_ha2, hank_2a.backward_iterate, ra=np.full(T, ss_ha2['ra']) + 1E-4*(np.arange(T)==3))
col3 = aux_jac.ha2_forward(ss_ha2, T, Va_t, Vb_t, a_t, b_t, c_t, m_t, D_treated, D_control)
# Step 1
res = %timeit -o -q Va_t, Vb_t, a_t, b_t, c_t, m_t, D_treated, D_control = aux_jac.ha2_backward(ss_ha2, hank_2a.backward_iterate, ra=np.full(T, ss_ha2['ra']) + 1E-4*(np.arange(T)==3))
times['direct'][1]['ha2'] = np.mean(res.timings) * 1500
# multiply by 1500 because there are 1500 jacobians to compute (5 inputs x 300 periods)
# Step 2
res = %timeit -o -q col3 = aux_jac.ha2_forward(ss_ha2, T, Va_t, Vb_t, a_t, b_t, c_t, m_t, D_treated, D_control)
times['direct'][2]['ha2'] = np.mean(res.timings) * 1500/2
# dividing by two because this method duplicates effort
# Compute total
avg = 0
for i in range(1,3): avg += np.mean(times['direct'][i]['ha2'])
times['direct'][0]['ha2'] = avg
if savedata: np.save('import_export/results/fake_news_speed', [times])
T = 300
times = {'DAG': {0: {}, 1: {}, 2: {}, 3: {}}, 'no_DAG': {0: {}, 1: {}, 2: {}, 3: {}}}
Krusell-Smith
# Steady state
ss = ks.ks_ss()
# Compute jacobian of household block
J_ha = jac.get_G([ks.household], ['r', 'w'], [], [], T, ss)
## Using DAG
# define model
exogenous = ['Z']
unknowns = ['K']
targets = ['asset_mkt']
outputs = ['K', 'w', 'r', 'C', 'Y']
block_list=[J_ha, ks.firm, ks.mkt_clearing]
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['DAG'][1]['ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['DAG'][2]['ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['DAG'][3]['ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['DAG'][i]['ks'])
times['DAG'][0]['ks'] = avg
## No DAG
# define model
unknowns = ['Y', 'r', 'w', 'K']
targets = ['res_Y', 'res_r', 'res_w', 'asset_mkt']
block_list = [J_ha, ks.ks_nodag]
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['no_DAG'][1]['ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['no_DAG'][2]['ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['no_DAG'][3]['ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['no_DAG'][i]['ks'])
times['no_DAG'][0]['ks'] = avg
One-asset hank
# Steady state
ss = hank_1a.hank_ss()
# Compute jacobian of household block
J_ha = jac.get_G([hank_1a.household_trans], ['r', 'w', 'Div', 'Tax'], [], [], T, ss, save=True)
## Using DAG
# define model
exogenous = ['G', 'markup', 'rstar']
unknowns = ['pi', 'w', 'Y']
targets = ['nkpc_res', 'asset_mkt', 'labor_mkt']
outputs = ['pi', 'Tax', 'Y', 'Div', 'r', 'i', 'N', 'w', 'A', 'NS']
block_list=[J_ha, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc, hank_1a.mkt_clearing]
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['DAG'][1]['ha'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['DAG'][2]['ha'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['DAG'][3]['ha'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['DAG'][i]['ha'])
times['DAG'][0]['ha'] = avg
## No DAG
# define model
unknowns = ['Y', 'L', 'w', 'pi', 'Div', 'Tax', 'r']
targets = ['res_L', 'res_Div', 'res_r', 'res_Tax', 'nkpc_res', 'asset_mkt', 'labor_mkt']
block_list=[J_ha, hank_1a.ha1_nodag]
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['no_DAG'][1]['ha'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['no_DAG'][2]['ha'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['no_DAG'][3]['ha'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['no_DAG'][i]['ha'])
times['no_DAG'][0]['ha'] = avg
Two-asset hank
# Steady state
ss = hank_2a.hank_ss(noisy=False)
# Compute jacobian of household block
J_ha = jac.get_G([hank_2a.household_inc], ['ra', 'rb', 'beta', 'w', 'N', 'tax'], [], [], T, ss)
## Using DAG
# define model
exogenous = ['rstar', 'markup', 'G', 'beta', 'Z', 'rinv_shock', 'markup_w']
unknowns = ['r', 'w', 'Y']
targets = ['asset_mkt', 'fisher', 'wnkpc']
G_pricing = jac.get_G([hank_2a.pricing], ['mc', 'r', 'Y'], ['pi'], ['nkpc'], T, ss)
G_arbitrage = jac.get_G([hank_2a.arbitrage], ['div', 'r'], ['p'],['equity'], T, ss)
G_production = jac.get_G([hank_2a.labor, hank_2a.investment], ['Y', 'w', 'r', 'Z'], ['Q', 'K'], ['inv', 'val'], T, ss)
block_list = [J_ha, G_pricing, G_arbitrage, G_production, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal, hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
outputs = None
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['DAG'][1]['ha2'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['DAG'][2]['ha2'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['DAG'][3]['ha2'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['DAG'][i]['ha2'])
times['DAG'][0]['ha2'] = avg
## No DAG
# define model
unknowns = ['piw', 'psiw', 'rb', 'ra', 'tax', 'i', 'psip', 'I', 'Q', 'w', 'N', 'K', 'div', 'p', 'pi', 'mc', 'r', 'Y']
targets = ['nkpc', 'equity', 'res_N', 'res_mc', 'inv', 'val', 'res_psip', 'res_I', 'res_div', 'res_i', 'res_tax', 'res_rb',
'res_ra', 'fisher', 'res_piw', 'res_psiw', 'wnkpc', 'asset_mkt']
block_list = [J_ha, hank_2a.ha2_nodag]
# run once
curlyJs, required = aux_jac.step0_preliminaries(block_list, exogenous, unknowns, ss)
J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
# Step 1
res = %timeit -o -q J_curlyH = aux_jac.step1_forward_H(curlyJs, exogenous, unknowns, targets, required)
times['no_DAG'][1]['ha2'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q G_U = aux_jac.step2_solve_GU(J_curlyH, exogenous, unknowns, targets, T)
times['no_DAG'][2]['ha2'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q G = aux_jac.step3_forward_G(G_U, curlyJs, exogenous, targets, outputs, required, unknowns)
times['no_DAG'][3]['ha2'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,4): avg += np.mean(times['no_DAG'][i]['ha2'])
times['no_DAG'][0]['ha2'] = avg
if savedata: np.save('import_export/results/G_speed',[times])
T = 300
times = {'DAG': {0: {}, 1: {}, 2:{}, 3:{}, 4:{}}, 'no_DAG': {0: {}, 1: {}, 2:{}, 3:{}, 4:{}}}
Krusell-Smith
# Steady state
ss = ks.ks_ss()
# Compute jacobian of household block
J_ha = jac.get_G([ks.household], ['r', 'w'], [], [], T, ss)
## Using DAG
# define model
exogenous = ['Z']
unknowns = ['K']
targets = ['asset_mkt']
outputs = ['K', 'w', 'r', 'C', 'Y']
block_list=[J_ha, ks.firm, ks.mkt_clearing]
dZ = {'Z': 0.9**np.arange(T)}
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['DAG'][1]['ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['DAG'][2]['ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['DAG'][3]['ks'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['DAG'][4]['ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['DAG'][i]['ks'])
times['DAG'][0]['ks'] = avg
## No DAG
# define model
unknowns = ['Y', 'r', 'w', 'K']
targets = ['res_Y', 'res_r', 'res_w', 'asset_mkt']
block_list = [J_ha, ks.ks_nodag]
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['no_DAG'][1]['ks'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['no_DAG'][2]['ks'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['no_DAG'][3]['ks'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['no_DAG'][4]['ks'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['no_DAG'][i]['ks'])
times['no_DAG'][0]['ks'] = avg
One-asset hank
# Steady state
ss = hank_1a.hank_ss()
# Compute jacobian of household block
J_ha = jac.get_G([hank_1a.household_trans], ['r', 'w', 'Div', 'Tax'], [], [], T, ss, save=True)
## Using DAG
# define model
exogenous = ['G', 'markup', 'rstar']
unknowns = ['pi', 'w', 'Y']
targets = ['nkpc_res', 'asset_mkt', 'labor_mkt']
outputs = ['pi', 'Tax', 'Y', 'Div', 'r', 'i', 'N', 'w', 'A', 'NS']
block_list=[J_ha, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc, hank_1a.mkt_clearing]
dZ = {'Z': 0.9**np.arange(T)}
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['DAG'][1]['ha'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['DAG'][2]['ha'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['DAG'][3]['ha'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['DAG'][4]['ha'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['DAG'][i]['ha'])
times['DAG'][0]['ha'] = avg
## No DAG
# define model
unknowns = ['Y', 'L', 'w', 'pi', 'Div', 'Tax', 'r']
targets = ['res_L', 'res_Div', 'res_r', 'res_Tax', 'nkpc_res', 'asset_mkt', 'labor_mkt']
block_list=[J_ha, hank_1a.ha1_nodag]
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['no_DAG'][1]['ha'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['no_DAG'][2]['ha'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['no_DAG'][3]['ha'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['no_DAG'][4]['ha'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['no_DAG'][i]['ha'])
times['no_DAG'][0]['ha'] = avg
Two-asset hank
# Steady state
ss = hank_2a.hank_ss(noisy=False)
# Compute jacobian of household block
J_ha = jac.get_G([hank_2a.household_inc], ['ra', 'rb', 'beta', 'w', 'N', 'tax'], [], [], T, ss)
## Using DAG
# define model
exogenous = ['rstar', 'markup', 'G', 'beta', 'Z', 'rinv_shock', 'markup_w']
unknowns = ['r', 'w', 'Y']
targets = ['asset_mkt', 'fisher', 'wnkpc']
G_pricing = jac.get_G([hank_2a.pricing], ['mc', 'r', 'Y'], ['pi'], ['nkpc'], T, ss)
G_arbitrage = jac.get_G([hank_2a.arbitrage], ['div', 'r'], ['p'],['equity'], T, ss)
G_production = jac.get_G([hank_2a.labor, hank_2a.investment], ['Y', 'w', 'r', 'Z'], ['Q', 'K'], ['inv', 'val'], T, ss)
block_list = [J_ha, G_pricing, G_arbitrage, G_production, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal, hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
outputs = None
dZ = {'Z': 0.9**np.arange(T)}
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['DAG'][1]['ha2'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['DAG'][2]['ha2'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['DAG'][3]['ha2'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['DAG'][4]['ha2'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['DAG'][i]['ha2'])
times['DAG'][0]['ha2'] = avg
## No DAG
# define model
unknowns = ['piw', 'psiw', 'rb', 'ra', 'tax', 'i', 'psip', 'I', 'Q', 'w', 'N', 'K', 'div', 'p', 'pi', 'mc', 'r', 'Y']
targets = ['nkpc', 'equity', 'res_N', 'res_mc', 'inv', 'val', 'res_psip', 'res_I', 'res_div', 'res_i', 'res_tax', 'res_rb',
'res_ra', 'fisher', 'res_piw', 'res_psiw', 'wnkpc', 'asset_mkt']
block_list = [J_ha, hank_2a.ha2_nodag]
# run once
curlyJs, required = aux_jac.step0_irf_preliminaries(block_list, dZ, unknowns, ss)
H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
# Step 1
res = %timeit -o -q H_U_unpacked = aux_jac.step1_irf_forward_HU(curlyJs, unknowns, targets, required)
times['no_DAG'][1]['ha2'] = np.mean(res.timings)
# Step 2
res = %timeit -o -q J_curlyZ_dZ = aux_jac.step2_irf_forward_Z(curlyJs, dZ, targets, outputs, required)
times['no_DAG'][2]['ha2'] = np.mean(res.timings)
# Step 3
res = %timeit -o -q dU = aux_jac.step3_irf_solve_dU(H_U_unpacked, J_curlyZ_dZ, unknowns, targets, T)
times['no_DAG'][3]['ha2'] = np.mean(res.timings)
# Step 4
res = %timeit -o -q dX = aux_jac.step4_irf_forward_U_combine(curlyJs, dU, J_curlyZ_dZ, outputs, required, T)
times['no_DAG'][4]['ha2'] = np.mean(res.timings)
# Compute total
avg = 0
for i in range(1,5): avg += np.mean(times['no_DAG'][i]['ha2'])
times['no_DAG'][0]['ha2'] = avg
if savedata: np.save('import_export/results/irf_speed',[times])
times = {}
T = 300
dZ = 0.01*0.8**np.arange(T)
# Krusell-Smith
ss = ks.ks_ss()
unknowns=['K']
targets=['asset_mkt']
block_list=[ks.firm, ks.mkt_clearing, ks.household]
G = jac.get_G(block_list=block_list, exogenous=unknowns, unknowns=[], targets=[], outputs=targets, T=T, ss=ss)
H_U = jac.pack_jacobians(G, inputs=unknowns, outputs=targets, T=T)
td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
res = %timeit -o -q td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
times['ks'] = np.mean(res.timings)
# Krusell-Smith HD
ss = ks.ks_ss(nS=50, nA=5000)
unknowns=['K']
targets=['asset_mkt']
block_list=[ks.firm, ks.mkt_clearing, ks.household]
G = jac.get_G(block_list=block_list, exogenous=unknowns, unknowns=[], targets=[], outputs=targets, T=T, ss=ss)
H_U = jac.pack_jacobians(G, inputs=unknowns, outputs=targets, T=T)
td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z= ss['Z']*(1+dZ), noisy=False)
res = %timeit -o -q td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
times['hd_ks'] = np.mean(res.timings)
# 1-asset hank
ss = hank_1a.hank_ss()
unknowns = ['pi', 'w', 'Y']
targets = ['nkpc_res', 'asset_mkt', 'labor_mkt']
block_list = [hank_1a.household_trans, hank_1a.firm, hank_1a.monetary, hank_1a.fiscal, hank_1a.nkpc, hank_1a.mkt_clearing]
G = jac.get_G(block_list=block_list, exogenous=unknowns, unknowns=[], targets=[], outputs=targets, T=T, ss=ss)
H_U = jac.pack_jacobians(G, inputs=unknowns, outputs=targets, T=T)
td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z= ss['Z']*(1+dZ), noisy=False)
res = %timeit -o -q td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
times['ha'] = np.mean(res.timings)
# 2-asset hank
ss = hank_2a.hank_ss(noisy=False)
unknowns = ['r', 'w', 'Y', 'K', 'Q', 'pi', 'p']
targets = ['asset_mkt', 'fisher', 'wnkpc', 'inv', 'val', 'equity', 'nkpc']
block_list = [hank_2a.household_inc, hank_2a.pricing, hank_2a.arbitrage, hank_2a.labor, hank_2a.investment, hank_2a.dividend, hank_2a.taylor, hank_2a.fiscal, hank_2a.finance, hank_2a.wage, hank_2a.union, hank_2a.mkt_clearing]
G = jac.get_G(block_list=block_list, exogenous=unknowns, unknowns=[], targets=[], outputs=targets, T=T, ss=ss)
H_U = jac.pack_jacobians(G, inputs=unknowns, outputs=targets, T=T)
td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
res = %timeit -o -q td_nonlinear = nonlinear.td_solve(ss, block_list, unknowns, targets, H_U=H_U, Z=ss['Z']*(1+dZ), noisy=False)
times['ha2'] = np.mean(res.timings)
if savedata: np.save('import_export/results/nonlinear_speed', [times])