@@ -17,11 +17,15 @@ kernelspec:
17
17
```
18
18
19
19
20
- In this lecture we study a basic infinite-horizon job search with Markov wage
20
+ In this lecture we study a basic infinite-horizon job search problem with Markov wage
21
21
draws
22
22
23
- The exercise at the end asks you to add recursive preferences and compare
24
- the result.
23
+ ``` {note}
24
+ For background on infinite horizon job search see, e.g., [DP1](https://dp.quantecon.org/).
25
+ ```
26
+
27
+ The exercise at the end asks you to add risk-sensitive preferences and see how
28
+ the main results change.
25
29
26
30
In addition to what’s in Anaconda, this lecture will need the following libraries:
27
31
@@ -49,23 +53,32 @@ We study an elementary model where
49
53
50
54
* jobs are permanent
51
55
* unemployed workers receive current compensation $c$
52
- * the wage offer distribution $\{ W_t\} $ is Markovian
53
56
* the horizon is infinite
54
57
* an unemployment agent discounts the future via discount factor $\beta \in (0,1)$
55
58
56
- The wage process obeys
59
+ ### Set up
60
+
61
+ At the start of each period, an unemployed worker receives wage offer $W_t$.
62
+
63
+ To build a wage offer process we consider the dynamics
57
64
58
65
$$
59
- W_{t+1} = \rho W_t + \nu Z_{t+1},
60
- \qquad \{Z_t\} \text{ is IID and } N(0, 1)
66
+ W_{t+1} = \rho W_t + \nu Z_{t+1}
61
67
$$
62
68
63
- We discretize this using Tauchen's method to produce a stochastic matrix $P$
69
+ where $(Z_t)_ {t \geq 0}$ is IID and standard normal.
70
+
71
+ We then discretize this wage process using Tauchen's method to produce a stochastic matrix $P$.
72
+
73
+ Successive wage offers are drawn from $P$.
74
+
75
+ ### Rewards
64
76
65
77
Since jobs are permanent, the return to accepting wage offer $w$ today is
66
78
67
79
$$
68
- w + \beta w + \beta^2 w + \cdots = \frac{w}{1-\beta}
80
+ w + \beta w + \beta^2 w +
81
+ \cdots = \frac{w}{1-\beta}
69
82
$$
70
83
71
84
The Bellman equation is
79
92
80
93
We solve this model using value function iteration.
81
94
95
+ +++
96
+
97
+ ## Code
82
98
83
99
Let's set up a ` namedtuple ` to store information needed to solve the model.
84
100
85
101
``` {code-cell} ipython3
86
102
Model = namedtuple('Model', ('n', 'w_vals', 'P', 'β', 'c'))
87
103
```
88
104
89
- The function below holds default values and populates the namedtuple.
105
+ The function below holds default values and populates the ` namedtuple ` .
90
106
91
107
``` {code-cell} ipython3
92
108
def create_js_model(
93
109
n=500, # wage grid size
94
110
ρ=0.9, # wage persistence
95
111
ν=0.2, # wage volatility
96
112
β=0.99, # discount factor
97
- c=1.0 # unemployment compensation
113
+ c=1.0, # unemployment compensation
98
114
):
99
115
"Creates an instance of the job search model with Markov wages."
100
116
mc = qe.tauchen(n, ρ, ν)
101
- w_vals, P = jnp.exp(mc.state_values), mc.P
102
- P = jnp.array(P)
117
+ w_vals, P = jnp.exp(mc.state_values), jnp.array(mc.P)
103
118
return Model(n, w_vals, P, β, c)
104
119
```
105
120
121
+ Let's test it:
122
+
123
+ ``` {code-cell} ipython3
124
+ model = create_js_model(β=0.98)
125
+ ```
126
+
127
+ ``` {code-cell} ipython3
128
+ model.c
129
+ ```
130
+
131
+ ``` {code-cell} ipython3
132
+ model.β
133
+ ```
134
+
135
+ ``` {code-cell} ipython3
136
+ model.w_vals.mean()
137
+ ```
138
+
106
139
Here's the Bellman operator.
107
140
108
141
``` {code-cell} ipython3
135
168
136
169
Here $\mathbf 1$ is an indicator function.
137
170
138
- The statement above means that the worker accepts ( $\sigma(w) = 1$) when the value of stopping
139
- is higher than the value of continuing .
171
+ * $\sigma(w) = 1$ means stop
172
+ * $\sigma(w) = 0$ means continue .
140
173
141
174
``` {code-cell} ipython3
142
175
@jax.jit
143
176
def get_greedy(v, model):
144
- """ Get a v-greedy policy."" "
177
+ "Get a v-greedy policy."
145
178
n, w_vals, P, β, c = model
146
179
e = w_vals / (1 - β)
147
180
h = c + β * P @ v
@@ -153,8 +186,7 @@ Here's a routine for value function iteration.
153
186
154
187
``` {code-cell} ipython3
155
188
def vfi(model, max_iter=10_000, tol=1e-4):
156
- """Solve the infinite-horizon Markov job search model by VFI."""
157
-
189
+ "Solve the infinite-horizon Markov job search model by VFI."
158
190
print("Starting VFI iteration.")
159
191
v = jnp.zeros_like(model.w_vals) # Initial guess
160
192
i = 0
@@ -171,29 +203,47 @@ def vfi(model, max_iter=10_000, tol=1e-4):
171
203
return v_star, σ_star
172
204
```
173
205
174
- ### Computing the solution
206
+
207
+ +++
208
+
209
+ ## Computing the solution
175
210
176
211
Let's set up and solve the model.
177
212
178
213
``` {code-cell} ipython3
179
214
model = create_js_model()
180
215
n, w_vals, P, β, c = model
181
216
182
- %time v_star, σ_star = vfi(model)
217
+ v_star, σ_star = vfi(model)
183
218
```
184
219
185
- We run it again to eliminate compile time.
220
+ Here's the optimal policy:
186
221
187
222
``` {code-cell} ipython3
188
- %time v_star, σ_star = vfi(model)
223
+ fig, ax = plt.subplots()
224
+ ax.plot(σ_star)
225
+ ax.set_xlabel("wage values")
226
+ ax.set_ylabel("optimal choice (stop=1)")
227
+ plt.show()
189
228
```
190
229
191
230
We compute the reservation wage as the first $w$ such that $\sigma(w)=1$.
192
231
193
232
``` {code-cell} ipython3
194
- res_wage = w_vals[jnp.searchsorted(σ_star, 1.0)]
233
+ stop_indices = jnp.where(σ_star == 1)
234
+ stop_indices
235
+ ```
236
+
237
+ ``` {code-cell} ipython3
238
+ res_wage_index = min(stop_indices[0])
239
+ ```
240
+
241
+ ``` {code-cell} ipython3
242
+ res_wage = w_vals[res_wage_index]
195
243
```
196
244
245
+ Here's a joint plot of the value function and the reservation wage.
246
+
197
247
``` {code-cell} ipython3
198
248
fig, ax = plt.subplots()
199
249
ax.plot(w_vals, v_star, alpha=0.8, label="value function")
228
278
$$
229
279
230
280
231
- When $\theta < 0$ the agent is risk sensitive .
281
+ When $\theta < 0$ the agent is risk averse .
232
282
233
283
Solve the model when $\theta = -0.1$ and compare your result to the risk neutral
234
284
case.
235
285
236
286
Try to interpret your result.
237
287
288
+ You can start with the following code:
289
+
290
+ ``` {code-cell} ipython3
291
+
292
+ RiskModel = namedtuple('Model', ('n', 'w_vals', 'P', 'β', 'c', 'θ'))
293
+
294
+ def create_risk_sensitive_js_model(
295
+ n=500, # wage grid size
296
+ ρ=0.9, # wage persistence
297
+ ν=0.2, # wage volatility
298
+ β=0.99, # discount factor
299
+ c=1.0, # unemployment compensation
300
+ θ=-0.1 # risk parameter
301
+ ):
302
+ "Creates an instance of the job search model with Markov wages."
303
+ mc = qe.tauchen(n, ρ, ν)
304
+ w_vals, P = jnp.exp(mc.state_values), mc.P
305
+ P = jnp.array(P)
306
+ return RiskModel(n, w_vals, P, β, c, θ)
307
+
308
+ ```
309
+
310
+ Now you need to modify ` T ` and ` get_greedy ` and then run value function iteration again.
311
+
238
312
``` {exercise-end}
239
313
```
240
314
@@ -311,25 +385,25 @@ model_rs = create_risk_sensitive_js_model()
311
385
312
386
n, w_vals, P, β, c, θ = model_rs
313
387
314
- %time v_star_rs, σ_star_rs = vfi(model_rs)
388
+ v_star_rs, σ_star_rs = vfi(model_rs)
315
389
```
316
390
317
- We run it again to eliminate the compilation time.
318
-
319
- ``` {code-cell} ipython3
320
- %time v_star_rs, σ_star_rs = vfi(model_rs)
321
- ```
391
+ Let's plot the results together with the original risk neutral case and see what we get.
322
392
323
393
``` {code-cell} ipython3
324
- res_wage_rs = w_vals[jnp.searchsorted(σ_star_rs, 1.0)]
394
+ stop_indices = jnp.where(σ_star_rs == 1)
395
+ res_wage_index = min(stop_indices[0])
396
+ res_wage_rs = w_vals[res_wage_index]
325
397
```
326
398
327
399
``` {code-cell} ipython3
328
400
fig, ax = plt.subplots()
329
- ax.plot(w_vals, v_star, alpha=0.8, label="RN $v$")
330
- ax.plot(w_vals, v_star_rs, alpha=0.8, label="RS $v$")
331
- ax.vlines((res_wage,), 150, 400, ls='--', color='darkblue', alpha=0.5, label=r"RV $\bar w$")
332
- ax.vlines((res_wage_rs,), 150, 400, ls='--', color='orange', alpha=0.5, label=r"RS $\bar w$")
401
+ ax.plot(w_vals, v_star, alpha=0.8, label="risk neutral $v$")
402
+ ax.plot(w_vals, v_star_rs, alpha=0.8, label="risk sensitive $v$")
403
+ ax.vlines((res_wage,), 100, 400, ls='--', color='darkblue',
404
+ alpha=0.5, label=r"risk neutral $\bar w$")
405
+ ax.vlines((res_wage_rs,), 100, 400, ls='--', color='orange',
406
+ alpha=0.5, label=r"risk sensitive $\bar w$")
333
407
ax.legend(frameon=False, fontsize=12, loc="lower right")
334
408
ax.set_xlabel("$w$", fontsize=12)
335
409
plt.show()
0 commit comments