From 963f0c05b4e47932b24c860c3384a116114fd82e Mon Sep 17 00:00:00 2001 From: llorracc Date: Thu, 23 Dec 2021 13:41:46 -0500 Subject: [PATCH 1/9] Clean up and rationalize --- HARK/ConsumptionSaving/ConsIndShockModel.py | 58 +++++++++++++++++---- 1 file changed, 47 insertions(+), 11 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 0defb840b..933ff73ef 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -1794,6 +1794,14 @@ def transition(self): # Calculate new states: normalized market resources and permanent income level pLvlNow = pLvlPrev*self.shocks['PermShk'] # Updated permanent income level + # Asymptotically it can't hurt to impose true restrictions + # (at least if the GICRaw holds) + pLvlNowMean = 1.0 + if self.normalize_levels == True: + pLvlNowMean = np.mean(pLvlNow) + + pLvlNow = pLvlNow / pLvlNowMean # Does nothing if normalize_levels != True + # Updated aggregate permanent productivity level PlvlAggNow = self.state_prev['PlvlAgg']*self.PermShkAggNow # "Effective" interest factor on normalized assets @@ -2037,6 +2045,8 @@ def check_conditions(self, verbose=None): "vFuncBool": False, # Whether to calculate the value function during solution "CubicBool": False, # Use cubic spline interpolation when True, linear interpolation when False "neutral_measure": False, # Use permanent income neutral measure (see Harmenberg 2021) during simulations when True. + "normalize_shocks": False, # In sims, normalize mean of collection of shocks to population mean + "normalize_levels": False, # In sims, normalize mean of a level variable (like permanent income) to the population mean } ) @@ -2192,25 +2202,44 @@ def get_shocks(self): # Get random draws of income shocks from the discrete distribution IncShks = IncShkDstnNow.draw(N) - PermShkNow[these] = ( - IncShks[0, :] * PermGroFacNow - ) # permanent "shock" includes expected growth - TranShkNow[these] = IncShks[1, :] + # In the limit, it cannot hurt to impose "true" restrictions, + # like the fact that the mean value of the shocks should be one + PermShkMeanNow, TranShkMeanNow = 1.0, 1.0 # Dividing by 1 changes nothing + if self.normalize_shocks == True: + PermShkMeanNow = np.mean(IncShks[0]) + TranShkMeanNow = np.mean(IncShks[1]) + + PermShkNow[these] = (# permanent "shock" includes expected growth + (IncShks[0, :] / PermShkMeanNow) * PermGroFacNow + ) + TranShkNow[these] = ( + (IncShks[1, :] / TranShkMeanNow) + ) # That procedure used the *last* period in the sequence for newborns, but that's not right # Redraw shocks for newborns, using the *first* period in the sequence. Approximation. N = np.sum(newborn) if N > 0: these = newborn - IncShkDstnNow = self.IncShkDstn[0] # set current income distribution + IncShkDstnNow = self.IncShkDstn[0] # set current shock distribution PermGroFacNow = self.PermGroFac[0] # and permanent growth factor # Get random draws of income shocks from the discrete distribution EventDraws = IncShkDstnNow.draw_events(N) - PermShkNow[these] = ( - IncShkDstnNow.X[0][EventDraws] * PermGroFacNow - ) # permanent "shock" includes expected growth - TranShkNow[these] = IncShkDstnNow.X[1][EventDraws] + + # In the limit, it cannot hurt to impose "true" restrictions, + # like the fact that the mean value of the shocks should be one + PermShkMeanNow, TranShkMeanNow = 1.0, 1.0 # Dividing by 1 changes nothing + if self.normalize_shocks == True: + PermShkMeanNow = np.mean(IncShkDstnNow.X[0][EventDraws]) + TranShkMeanNow = np.mean(IncShkDstnNow.X[1][EventDraws]) + + PermShkNow[these] = (# permanent "shock" includes expected growth + (IncShkDstnNow.X[0][EventDraws] / PermShkMeanNow)* PermGroFacNow + ) + TranShkNow[these] = ( + (IncShkDstnNow.X[1][EventDraws] / TranShkMeanNow) + ) # PermShkNow[newborn] = 1.0 TranShkNow[newborn] = 1.0 @@ -2728,9 +2757,16 @@ def construct_lognormal_income_process_unemployment(self): PermShkCount, tail_N=0 ) + if not hasattr(self, "normalize_shocks"): + self.normalize_shocks = False + + if not hasattr(self, "normalize_levels"): + self.normalize_levels = False + if not hasattr(self, "neutral_measure"): self.neutral_measure = False - + + # Use Harmenberg (2021) permanent income neutral measure if self.neutral_measure == True: PermShkDstn_t.pmf = PermShkDstn_t.X*PermShkDstn_t.pmf @@ -3067,4 +3103,4 @@ def construct_assets_grid(parameters): init_cyclical['PermShkStd'] = [0.1, 0.1, 0.1, 0.1] init_cyclical['TranShkStd'] = [0.1, 0.1, 0.1, 0.1] init_cyclical['LivPrb'] = 4*[0.98] -init_cyclical['T_cycle'] = 4 \ No newline at end of file +init_cyclical['T_cycle'] = 4 From 362d3de8e6bb072f3bd08355a239c7e661284da9 Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 12:14:09 -0500 Subject: [PATCH 2/9] Restore update_income_process() in pre_solve --- HARK/ConsumptionSaving/ConsIndShockModel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 933ff73ef..9b4a2a442 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2399,7 +2399,7 @@ def pre_solve(self): # AgentType.pre_solve(self) # Update all income process variables to match any attributes that might # have been changed since `__init__` or `solve()` was last called. - # self.update_income_process() + self.update_income_process() self.update_solution_terminal() if not self.quiet: self.check_conditions(verbose=self.verbose) From 387e57b93afa32b3777588d059bee55af9db7730 Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 12:52:41 -0500 Subject: [PATCH 3/9] Make PR easier to read --- HARK/ConsumptionSaving/ConsIndShockModel.py | 22 ++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 9b4a2a442..32979de0c 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2209,11 +2209,13 @@ def get_shocks(self): PermShkMeanNow = np.mean(IncShks[0]) TranShkMeanNow = np.mean(IncShks[1]) - PermShkNow[these] = (# permanent "shock" includes expected growth - (IncShks[0, :] / PermShkMeanNow) * PermGroFacNow - ) + PermShkNow[these] = ( + (IncShks[0, :] * PermGroFacNow + / PermShkMeanNow) + ) # permanent "shock" includes expected growth TranShkNow[these] = ( - (IncShks[1, :] / TranShkMeanNow) + (IncShks[1, :] + / TranShkMeanNow) ) # That procedure used the *last* period in the sequence for newborns, but that's not right @@ -2221,7 +2223,7 @@ def get_shocks(self): N = np.sum(newborn) if N > 0: these = newborn - IncShkDstnNow = self.IncShkDstn[0] # set current shock distribution + IncShkDstnNow = self.IncShkDstn[0] # set current income distribution PermGroFacNow = self.PermGroFac[0] # and permanent growth factor # Get random draws of income shocks from the discrete distribution @@ -2234,11 +2236,13 @@ def get_shocks(self): PermShkMeanNow = np.mean(IncShkDstnNow.X[0][EventDraws]) TranShkMeanNow = np.mean(IncShkDstnNow.X[1][EventDraws]) - PermShkNow[these] = (# permanent "shock" includes expected growth - (IncShkDstnNow.X[0][EventDraws] / PermShkMeanNow)* PermGroFacNow + PermShkNow[these] = ( + (IncShkDstnNow.X[0][EventDraws] * PermGroFacNow + / PermShkMeanNow) ) TranShkNow[these] = ( - (IncShkDstnNow.X[1][EventDraws] / TranShkMeanNow) + (IncShkDstnNow.X[1][EventDraws] + / TranShkMeanNow) ) # PermShkNow[newborn] = 1.0 TranShkNow[newborn] = 1.0 @@ -2399,7 +2403,7 @@ def pre_solve(self): # AgentType.pre_solve(self) # Update all income process variables to match any attributes that might # have been changed since `__init__` or `solve()` was last called. - self.update_income_process() + # self.update_income_process() self.update_solution_terminal() if not self.quiet: self.check_conditions(verbose=self.verbose) From 6f356457240b14897b5fe5e8ed1c4eb8e8330901 Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 12:55:21 -0500 Subject: [PATCH 4/9] Further improvement in clarity --- HARK/ConsumptionSaving/ConsIndShockModel.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 32979de0c..76c396360 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2213,10 +2213,7 @@ def get_shocks(self): (IncShks[0, :] * PermGroFacNow / PermShkMeanNow) ) # permanent "shock" includes expected growth - TranShkNow[these] = ( - (IncShks[1, :] - / TranShkMeanNow) - ) + TranShkNow[these] = IncShks[1, :] / TranShkMeanNow # That procedure used the *last* period in the sequence for newborns, but that's not right # Redraw shocks for newborns, using the *first* period in the sequence. Approximation. From c2fe14612994a3824e58728631a6a8e0f9121ce3 Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 12:57:38 -0500 Subject: [PATCH 5/9] Even simpler --- HARK/ConsumptionSaving/ConsIndShockModel.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 76c396360..5ed052894 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2236,11 +2236,8 @@ def get_shocks(self): PermShkNow[these] = ( (IncShkDstnNow.X[0][EventDraws] * PermGroFacNow / PermShkMeanNow) - ) - TranShkNow[these] = ( - (IncShkDstnNow.X[1][EventDraws] - / TranShkMeanNow) - ) + ) # permanent "shock" includes expected growth + TranShkNow[these] = IncShkDstnNow.X[1][EventDraws] / TranShkMeanNow # PermShkNow[newborn] = 1.0 TranShkNow[newborn] = 1.0 From 0b11590ef0a8321cff95681da919b5e6047201f8 Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 13:03:35 -0500 Subject: [PATCH 6/9] Final clarity tweak --- HARK/ConsumptionSaving/ConsIndShockModel.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 5ed052894..3ea44d951 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -1800,7 +1800,7 @@ def transition(self): if self.normalize_levels == True: pLvlNowMean = np.mean(pLvlNow) - pLvlNow = pLvlNow / pLvlNowMean # Does nothing if normalize_levels != True + pLvlNow = pLvlNow / pLvlNowMean # Divide by 1.0 if normalize_levels=False # Updated aggregate permanent productivity level PlvlAggNow = self.state_prev['PlvlAgg']*self.PermShkAggNow @@ -2211,7 +2211,7 @@ def get_shocks(self): PermShkNow[these] = ( (IncShks[0, :] * PermGroFacNow - / PermShkMeanNow) + / PermShkMeanNow) # Divide by 1.0 if normalize_shocks=False ) # permanent "shock" includes expected growth TranShkNow[these] = IncShks[1, :] / TranShkMeanNow @@ -2235,7 +2235,7 @@ def get_shocks(self): PermShkNow[these] = ( (IncShkDstnNow.X[0][EventDraws] * PermGroFacNow - / PermShkMeanNow) + / PermShkMeanNow) # Divide by 1.0 if normalize_shocks=False ) # permanent "shock" includes expected growth TranShkNow[these] = IncShkDstnNow.X[1][EventDraws] / TranShkMeanNow # PermShkNow[newborn] = 1.0 @@ -2763,8 +2763,7 @@ def construct_lognormal_income_process_unemployment(self): if not hasattr(self, "neutral_measure"): self.neutral_measure = False - - # Use Harmenberg (2021) permanent income neutral measure + if self.neutral_measure == True: PermShkDstn_t.pmf = PermShkDstn_t.X*PermShkDstn_t.pmf From b4c668a0dac3c9e3f1c4237ac0103c7de9b70fef Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 13:12:53 -0500 Subject: [PATCH 7/9] Make sure normalize booleans have values --- HARK/ConsumptionSaving/ConsIndShockModel.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 3ea44d951..a24114d67 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -1797,6 +1797,12 @@ def transition(self): # Asymptotically it can't hurt to impose true restrictions # (at least if the GICRaw holds) pLvlNowMean = 1.0 + if not hasattr(self, "normalize_shocks"): + self.normalize_shocks = False + + if not hasattr(self, "normalize_levels"): + self.normalize_levels = False + if self.normalize_levels == True: pLvlNowMean = np.mean(pLvlNow) From 41f944d089a466c163a755fae3e05209787a8ead Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 15:10:04 -0500 Subject: [PATCH 8/9] Move test for hasattribute normalizations to simulation code --- HARK/ConsumptionSaving/ConsIndShockModel.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index a24114d67..0fd7d4daf 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2761,11 +2761,11 @@ def construct_lognormal_income_process_unemployment(self): PermShkCount, tail_N=0 ) - if not hasattr(self, "normalize_shocks"): - self.normalize_shocks = False + # if not hasattr(self, "normalize_shocks"): + # self.normalize_shocks = False - if not hasattr(self, "normalize_levels"): - self.normalize_levels = False + # if not hasattr(self, "normalize_levels"): + # self.normalize_levels = False if not hasattr(self, "neutral_measure"): self.neutral_measure = False From 8f0057e0cb51f9acf918222954463e2cd4f7fe7f Mon Sep 17 00:00:00 2001 From: llorracc Date: Tue, 4 Jan 2022 23:00:50 -0500 Subject: [PATCH 9/9] Remove unneeded comments --- HARK/ConsumptionSaving/ConsIndShockModel.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/HARK/ConsumptionSaving/ConsIndShockModel.py b/HARK/ConsumptionSaving/ConsIndShockModel.py index 0fd7d4daf..aedf3df11 100644 --- a/HARK/ConsumptionSaving/ConsIndShockModel.py +++ b/HARK/ConsumptionSaving/ConsIndShockModel.py @@ -2761,12 +2761,6 @@ def construct_lognormal_income_process_unemployment(self): PermShkCount, tail_N=0 ) - # if not hasattr(self, "normalize_shocks"): - # self.normalize_shocks = False - - # if not hasattr(self, "normalize_levels"): - # self.normalize_levels = False - if not hasattr(self, "neutral_measure"): self.neutral_measure = False