diff --git a/_freeze/.DS_Store b/_freeze/.DS_Store
deleted file mode 100644
index 70b93ab..0000000
Binary files a/_freeze/.DS_Store and /dev/null differ
diff --git a/_freeze/assignments/.DS_Store b/_freeze/assignments/.DS_Store
deleted file mode 100644
index 9ff92aa..0000000
Binary files a/_freeze/assignments/.DS_Store and /dev/null differ
diff --git a/_freeze/assignments/A1/execute-results/html.json b/_freeze/assignments/A1/execute-results/html.json
index 86d8e36..8d2563a 100644
--- a/_freeze/assignments/A1/execute-results/html.json
+++ b/_freeze/assignments/A1/execute-results/html.json
@@ -2,7 +2,7 @@
"hash": "0a06cdc60c376941ca67f1c87c69f03e",
"result": {
"engine": "knitr",
- "markdown": "---\ntitle: Individual Assignment 1 (5%)\n---\n\n\n\n**You must provide forecasts for the following items:**\n\n 1. Google closing stock price on 21 March 2025 [[Data](https://finance.yahoo.com/quote/GOOG/)].\n 2. Maximum temperature at Melbourne airport on 11 April 2025 [[Data](http://www.bom.gov.au/climate/dwo/IDCJDW3049.latest.shtml)].\n 3. The difference in points (Collingwood minus Essendon) scored in the AFL match between Collingwood and Essendon for the Anzac Day clash. 25 April 2025 [[Data](https://en.wikipedia.org/wiki/Anzac_Day_match)].\n 4. The seasonally adjusted estimate of total employment for April 2025 in ('000). ABS CAT 6202, to be released around mid May 2025 [[Data](https://www.abs.gov.au/statistics/labour/employment-and-unemployment/labour-force-australia/latest-release)].\n 5. Google closing stock price on 23 May 2025 [[Data](https://finance.yahoo.com/quote/GOOG/)].\n\nFor each of these, give a **point forecast** and an **80% prediction interval**, and **explain in a couple of sentences** how each was obtained.\n\n* Prepare short justifications with your forecasts and forecast intervals explaining in no more than 50-70 words how these were obtained. Only a couple of sentences are required. There is no need to use any fancy models or sophisticated methods. \n* **Full marks will be awarded** if you submit the required information, and are able to meaningfully justify your results in a couple of sentences in each case. \n\n### Submission\n\nYour forecasts should be submitted in moodle by **9.30am, on Monday, March 10**.\n\n\n\n\n\n
Due: 10 March 2025 Submit (ETF3231) \n\n\n\n\n\n\n\n#### Tips\n\n* The [Data] links give you possible data to start with, but you are free to use any data you like.\n* There is no need to use any fancy models or sophisticated methods. Simple is better for this assignment. The methods you use should be understandable to any high school student.\n\n## Forecasting competition\n\nOnce the true values in each case are available, we will come back to this exercise and see who did the best using the scoring method described in class. The student with the best score will be the winner of our forecasting competition (something really nice to have on your CV), and will also win a $100 Amazon gift voucher.\n\nNote: your assignment mark is not dependent at all on the score you obtain in the competition.\n\n\n### Scoring \n\\small\n\nLet $y=$ actual, $\\hat{y}=$ point forecast, $[\\hat{\\ell},\\hat{u}]=$ prediction interval\n\n#### Point forecasts:\n\n$$\\text{Absolute Error} = |y-\\hat{y}|$$\n\n\\vspace{-0.4cm}\n\n * Rank results for all students in class\n * Add ranks across all five items\n\n#### Prediction intervals:\n\n$$\n\\text{Interval Score} = (\\hat u - \\hat\\ell) + 10(\\hat\\ell - y)_+ + 10 (y-\\hat u)_+\n$$\n\n\\vspace{-0.5cm}\n\n* $u_+=max(u,0)$\n* Rank results for all students\n* Add ranks across all five items\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
+ "markdown": "---\ntitle: Individual Assignment 1 (5%)\n---\n\n\n\n**You must provide forecasts for the following items:**\n\n 1. Google closing stock price on 21 March 2025 [[Data](https://finance.yahoo.com/quote/GOOG/)].\n 2. Maximum temperature at Melbourne airport on 11 April 2025 [[Data](http://www.bom.gov.au/climate/dwo/IDCJDW3049.latest.shtml)].\n 3. The difference in points (Collingwood minus Essendon) scored in the AFL match between Collingwood and Essendon for the Anzac Day clash. 25 April 2025 [[Data](https://en.wikipedia.org/wiki/Anzac_Day_match)].\n 4. The seasonally adjusted estimate of total employment for April 2025 in ('000). ABS CAT 6202, to be released around mid May 2025 [[Data](https://www.abs.gov.au/statistics/labour/employment-and-unemployment/labour-force-australia/latest-release)].\n 5. Google closing stock price on 23 May 2025 [[Data](https://finance.yahoo.com/quote/GOOG/)].\n\nFor each of these, give a **point forecast** and an **80% prediction interval**, and **explain in a couple of sentences** how each was obtained.\n\n* Prepare short justifications with your forecasts and forecast intervals explaining in no more than 50-70 words how these were obtained. Only a couple of sentences are required. There is no need to use any fancy models or sophisticated methods. \n* **Full marks will be awarded** if you submit the required information, and are able to meaningfully justify your results in a couple of sentences in each case. \n\n### Submission\n\nYour forecasts should be submitted in moodle by **9.30am, on Monday, March 10**.\n\n\n\n\n\n
Due: 10 March 2025 Submit (ETF3231) \n\n\n\n\n\n\n\n#### Tips\n\n* The [Data] links give you possible data to start with, but you are free to use any data you like.\n* There is no need to use any fancy models or sophisticated methods. Simple is better for this assignment. The methods you use should be understandable to any high school student.\n\n## Forecasting competition\n\nOnce the true values in each case are available, we will come back to this exercise and see who did the best using the scoring method described in class. The student with the best score will be the winner of our forecasting competition (something really nice to have on your CV), and will also win a $100 Amazon gift voucher.\n\nNote: your assignment mark is not dependent at all on the score you obtain in the competition.\n\n\n### Scoring \n\\small\n\nLet $y=$ actual, $\\hat{y}=$ point forecast, $[\\hat{\\ell},\\hat{u}]=$ prediction interval\n\n#### Point forecasts:\n\n$$\\text{Absolute Error} = |y-\\hat{y}|$$\n\n\\vspace{-0.4cm}\n\n * Rank results for all students in class\n * Add ranks across all five items\n\n#### Prediction intervals:\n\n$$\n\\text{Interval Score} = (\\hat u - \\hat\\ell) + 10(\\hat\\ell - y)_+ + 10 (y-\\hat u)_+\n$$\n\n\\vspace{-0.5cm}\n\n* $u_+=max(u,0)$\n* Rank results for all students\n* Add ranks across all five items\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/A2/execute-results/html.json b/_freeze/assignments/A2/execute-results/html.json
index 44676a8..8e060e0 100644
--- a/_freeze/assignments/A2/execute-results/html.json
+++ b/_freeze/assignments/A2/execute-results/html.json
@@ -2,7 +2,7 @@
"hash": "01940f7fae546294603982d29e7e968e",
"result": {
"engine": "knitr",
- "markdown": "---\ntitle: Individual Assignment 2 (7%)\n---\n\n\n\nUse the `IA2_template.Rmd` to get started. Leave the \"## Questions\" and the pagebreaks \"\\newpage\" as they are (do not remove these - these will help with marking the assignment). You are to submit both an \"IA2_NameSurname.Rmd\" file and a compiled `IA2_NameSurname.pdf` file for your assignment.\n\nFollow the assignment questions/instructions to complete some specific tasks. Add code where necessary (you can add more R chunks if and where you need to) and text to answer the questions where necessary. Keep within the suggested word restrictions in order to get full marks (marks will be deducted for grossly going over the word limit). This is to encourage you to be concise in your writing. \n\nYou will be using this data in future assignments as you will build on the analysis you perform in this assignment. Furthermore, in the final assignment you will need to submit an .Rmd file that you will develop for all the assignment tasks throughout the semester. Marks will be allocated for code that runs without errors and produces the correct output. Make sure that every time your code works without errors. If you need help with this, please ask during consultation times. Keeping detailed comments in your program is always good practice (see for example the programs we are using in the lectures). This will help you remember in week 12 why you did certain things in week 4. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA2_template.Rmd.zip){download=\"IA2_template.Rmd.zip\"}\n:::\n\n* Download the data \n\n::: {.callout appearance=\"minimal\"}\n [Download IA_data.csv](../assignment_data/IA_data.csv){download=\"IA_data.csv\"}\n:::\n\n\n\n\n\n
Due: 7 April 2025 Submit (ETF3231) \n",
+ "markdown": "---\ntitle: Individual Assignment 2 (7%)\n---\n\n\n\nUse the `IA2_template.Rmd` to get started. Leave the \"## Questions\" and the pagebreaks \"\\newpage\" as they are (do not remove these - these will help with marking the assignment). You are to submit both an \"IA2_NameSurname.Rmd\" file and a compiled `IA2_NameSurname.pdf` file for your assignment.\n\nFollow the assignment questions/instructions to complete some specific tasks. Add code where necessary (you can add more R chunks if and where you need to) and text to answer the questions where necessary. Keep within the suggested word restrictions in order to get full marks (marks will be deducted for grossly going over the word limit). This is to encourage you to be concise in your writing. \n\nYou will be using this data in future assignments as you will build on the analysis you perform in this assignment. Furthermore, in the final assignment you will need to submit an .Rmd file that you will develop for all the assignment tasks throughout the semester. Marks will be allocated for code that runs without errors and produces the correct output. Make sure that every time your code works without errors. If you need help with this, please ask during consultation times. Keeping detailed comments in your program is always good practice (see for example the programs we are using in the lectures). This will help you remember in week 12 why you did certain things in week 4. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA2_template.Rmd.zip){download=\"IA2_template.Rmd.zip\"}\n:::\n\n* Download the data \n\n::: {.callout appearance=\"minimal\"}\n [Download IA_data.csv](../assignment_data/IA_data.csv){download=\"IA_data.csv\"}\n:::\n\n\n\n\n\n
Due: 7 April 2025 Submit (ETF3231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/A3/execute-results/html.json b/_freeze/assignments/A3/execute-results/html.json
index d8d95ed..0d5476b 100644
--- a/_freeze/assignments/A3/execute-results/html.json
+++ b/_freeze/assignments/A3/execute-results/html.json
@@ -1,7 +1,8 @@
{
"hash": "cef3d58e62b136835640c2868e9718ec",
"result": {
- "markdown": "---\ntitle: Individual Assignment 3 (10%)\n---\n\n\nThe data you are to forecast is the same series you used in IA2. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA3_template.Rmd.zip){download=\"IA3_template.Rmd.zip\"}\n:::\n\n\nYou are to submit both an `IA3_NameSurname.Rmd` file and a compiled `IA3_NameSurname.pdf` file for your assignment.\n\n\n
Due: 24 April 2024 Submit (ETF5231) \n",
+ "engine": "knitr",
+ "markdown": "---\ntitle: Individual Assignment 3 (10%)\n---\n\n\n\nThe data you are to forecast is the same series you used in IA2. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA3_template.Rmd.zip){download=\"IA3_template.Rmd.zip\"}\n:::\n\n\nYou are to submit both an `IA3_NameSurname.Rmd` file and a compiled `IA3_NameSurname.pdf` file for your assignment.\n\n\n\n
Due: 28 April 2025 Submit (ETF3231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/A4/execute-results/html.json b/_freeze/assignments/A4/execute-results/html.json
index aedde1d..3f0d218 100644
--- a/_freeze/assignments/A4/execute-results/html.json
+++ b/_freeze/assignments/A4/execute-results/html.json
@@ -1,7 +1,8 @@
{
"hash": "8f7c3aa5af4c1fa1c66d535efb588403",
"result": {
- "markdown": "---\ntitle: Individual Assignment 4 (18%)\n---\n\n\nThe data you are to forecast is the same series you used in IA2. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA4_template.Rmd){download=\"IA4_template.Rmd\"}\n:::\n\n\n\n\nYou are to submit both an `IA4_NameSurname.Rmd` file and a compiled `IA4_NameSurname.pdf` file for your assignment.\n\n\n\n\n
Due: 15 May 2024 Submit (ETF5231) \n",
+ "engine": "knitr",
+ "markdown": "---\ntitle: Individual Assignment 4 (18%)\n---\n\n\n\nThe data you are to forecast is the same series you used in IA2. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/IA4_template.Rmd){download=\"IA4_template.Rmd\"}\n:::\n\n\n\n\nYou are to submit both an `IA4_NameSurname.Rmd` file and a compiled `IA4_NameSurname.pdf` file for your assignment.\n\n\n\n\n\n
Due: 19 May 2025 Submit (ETF3231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/G1/execute-results/html.json b/_freeze/assignments/G1/execute-results/html.json
index 2bc3d64..6f50f50 100644
--- a/_freeze/assignments/G1/execute-results/html.json
+++ b/_freeze/assignments/G1/execute-results/html.json
@@ -2,7 +2,7 @@
"hash": "4690fa96f707ac5e20d629c7cc3427be",
"result": {
"engine": "knitr",
- "markdown": "---\ntitle: Group Assignments\n---\n\n\n\nIt is expected that all group members participate equally to the group assignments. It sometimes helps to keep a log of the tasks each member is to complete. This may assist in keeping some balance throughout the semester. To ensure that all group members are contributing the following rules will apply for all the group assignments throughout the semester.\n\nEvery group member has the right to express concern/dislike in terms of a group member being absent from group activities. This has to be expressed directly to me (and only me) via a formal email stating facts that have hindered the operations of the group (saying I do not like George because he is tall is not a reason for expressing concern). If at least two such complaints are submitted during the completion of an assignment, the group member concerned will receive a warning. If the situation is not improved during the next assignment and another two complaints are again submitted, the group member will be immediately removed from the group and will be expected to complete the rest of the group assignments alone. \n\nIn my involvement with group assignments we have rarely needed to enforce such rule so I do no expect any major problems here. However they will be enforced if need be. I hope this is clear. \n\nWe need only **one submission per group** for each group assignment. \n\n## Group Assignment 1\n\nThe aim of this assignment is to meet your group members and begin interacting. Full marks will be allocated to the group by completing the tasks in the google form. Please try and be as efficient as possible in your writing (we are looking for short, sharp responses).\n\n**Submission**\n\nMoodle by **9.30am, on Monday, 24 March**.\n\n\n\n
Due: 24 March 2025 Submit (ETF5231) \n",
+ "markdown": "---\ntitle: Group Assignments\n---\n\n\n\nIt is expected that all group members participate equally to the group assignments. It sometimes helps to keep a log of the tasks each member is to complete. This may assist in keeping some balance throughout the semester. To ensure that all group members are contributing the following rules will apply for all the group assignments throughout the semester.\n\nEvery group member has the right to express concern/dislike in terms of a group member being absent from group activities. This has to be expressed directly to me (and only me) via a formal email stating facts that have hindered the operations of the group (saying I do not like George because he is tall is not a reason for expressing concern). If at least two such complaints are submitted during the completion of an assignment, the group member concerned will receive a warning. If the situation is not improved during the next assignment and another two complaints are again submitted, the group member will be immediately removed from the group and will be expected to complete the rest of the group assignments alone. \n\nIn my involvement with group assignments we have rarely needed to enforce such rule so I do no expect any major problems here. However they will be enforced if need be. I hope this is clear. \n\nWe need only **one submission per group** for each group assignment. \n\n## Group Assignment 1\n\nThe aim of this assignment is to meet your group members and begin interacting. Full marks will be allocated to the group by completing the tasks in the google form. Please try and be as efficient as possible in your writing (we are looking for short, sharp responses).\n\n**Submission**\n\nMoodle by **9.30am, on Monday, 24 March**.\n\n\n\n
Due: 24 March 2025 Submit (ETF5231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/G2/execute-results/html.json b/_freeze/assignments/G2/execute-results/html.json
index b94b7a0..46fd92e 100644
--- a/_freeze/assignments/G2/execute-results/html.json
+++ b/_freeze/assignments/G2/execute-results/html.json
@@ -2,10 +2,8 @@
"hash": "1799becadb9f5663f18523a558ee280d",
"result": {
"engine": "knitr",
- "markdown": "---\ntitle: Group Assignment 2 (7%)\n---\n\n\nUse the \"GA2_template.Rmd to get started. Leave the \"## Questions\" and the pagebreaks \"\\newpage\" as they are (do not remove these - these will help with marking the assignment). You are to submit both an \"GA2_GroupNo.Rmd\" file and a compiled \"GA2_GroupNo.pdf\" file for your assignment.\n\nFollow the assignment questions/instructions to complete some specific tasks. Add code were necessary (you can add more R chunks if and where you need to) and text to answer the questions where necessary. Keep within the suggested word restrictions in order to get full marks (marks will be deducted for grossly going over the word limit). This is to encourage you to be concise in your writing. \n\nYou will be using this data in future assignments as you will build on the analysis you perform in this assignment. Furthermore, in the final assignment you will need to submit an .Rmd file that you will develop for all the assignment tasks throughout the semester. Marks will be allocated for code that runs without errors and produces the correct output. Make sure that every time your code works without errors. If you need help with this, please ask during consultation times. Keeping detailed comments in your program is always good practice (see for example the programs we are using in the lectures). This will help you remember in week 12 why you did certain things in week 4. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA2_template.Rmd.zip){download=\"GA2_template.Rmd.zip\"}\n:::\n\n* Download the data \n\n::: {.callout appearance=\"minimal\"}\n [Download GA_data.csv](../assignment_data/GA_data.csv){download=\"GA_data.csv\"}\n:::\n\n\n\n
Due: 17 April 2024 Submit (ETF3231) \n",
- "supporting": [
- "G2_files"
- ],
+ "markdown": "---\ntitle: Group Assignment 2 (7%)\n---\n\n\n\nUse the \"GA2_template.Rmd to get started. Leave the \"## Questions\" and the pagebreaks \"\\newpage\" as they are (do not remove these - these will help with marking the assignment). You are to submit both an \"GA2_GroupNo.Rmd\" file and a compiled \"GA2_GroupNo.pdf\" file for your assignment.\n\nFollow the assignment questions/instructions to complete some specific tasks. Add code were necessary (you can add more R chunks if and where you need to) and text to answer the questions where necessary. Keep within the suggested word restrictions in order to get full marks (marks will be deducted for grossly going over the word limit). This is to encourage you to be concise in your writing. \n\nYou will be using this data in future assignments as you will build on the analysis you perform in this assignment. Furthermore, in the final assignment you will need to submit an .Rmd file that you will develop for all the assignment tasks throughout the semester. Marks will be allocated for code that runs without errors and produces the correct output. Make sure that every time your code works without errors. If you need help with this, please ask during consultation times. Keeping detailed comments in your program is always good practice (see for example the programs we are using in the lectures). This will help you remember in week 12 why you did certain things in week 4. \n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA2_template.Rmd.zip){download=\"GA2_template.Rmd.zip\"}\n:::\n\n* Download the data \n\n::: {.callout appearance=\"minimal\"}\n [Download GA_data.csv](../assignment_data/GA_data.csv){download=\"GA_data.csv\"}\n:::\n\n\n\n\n
Due: 14 April 2025 Submit (ETF5231) \n",
+ "supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
],
diff --git a/_freeze/assignments/G3/execute-results/html.json b/_freeze/assignments/G3/execute-results/html.json
index b6a3066..57fcce2 100644
--- a/_freeze/assignments/G3/execute-results/html.json
+++ b/_freeze/assignments/G3/execute-results/html.json
@@ -1,7 +1,8 @@
{
"hash": "d75a84b7902bd6c92ba0f322a570e9ee",
"result": {
- "markdown": "---\ntitle: Group Assignment 3 (10%)\n---\n\n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA3_template.Rmd.zip){download=\"GA3_template.Rmd.zip\"}\n:::\n\n\n\n
Due: 1 May 2024 Submit (ETF5231) \n",
+ "engine": "knitr",
+ "markdown": "---\ntitle: Group Assignment 3 (10%)\n---\n\n\n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA3_template.Rmd.zip){download=\"GA3_template.Rmd.zip\"}\n:::\n\n\n\n\n
Due: 5 May 2025 Submit (ETF5231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/G4/execute-results/html.json b/_freeze/assignments/G4/execute-results/html.json
index a9b40da..9d7ed14 100644
--- a/_freeze/assignments/G4/execute-results/html.json
+++ b/_freeze/assignments/G4/execute-results/html.json
@@ -2,7 +2,7 @@
"hash": "64af05d8d6a5956252391abf29f135ab",
"result": {
"engine": "knitr",
- "markdown": "---\ntitle: Group Assignment 4 (18%)\n---\n\n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA4_template.Rmd.zip){download=\"GA4_template.Rmd.zip\"}\n:::\n\n\n\n
Due: 22 May 2024 Submit (ETF5231) \n",
+ "markdown": "---\ntitle: Group Assignment 4 (18%)\n---\n\n\n\n**Files for download**:\n\n* Download the template\n\n::: {.callout appearance=\"minimal\"}\n [Download template](../assignment_data/GA4_template.Rmd.zip){download=\"GA4_template.Rmd.zip\"}\n:::\n\n\n\n\n
Due: 26 May 2025 Submit (ETF5231) \n",
"supporting": [],
"filters": [
"rmarkdown/pagebreak.lua"
diff --git a/_freeze/assignments/Project/execute-results/html.json b/_freeze/assignments/Project/execute-results/html.json
deleted file mode 100644
index fc03cb6..0000000
--- a/_freeze/assignments/Project/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "2dc1e4f426524f9526895c6bbcaf5b29",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: Retail Project\n---\n\n\n**Objective:** To forecast a real time series using ETS and ARIMA models.\n\n**Data:** Each student will be use a different time series, selected using their student ID number as follows. This is the same series that you used in [Assignment 2](A2.qmd).\n\n```r\n# Use your student ID as the seed\nset.seed(12345678)\nretail <- readr::read_rds(\"https://bit.ly/monashretaildata\") |>\n filter(`Series ID` == sample(`Series ID`, 1))\n```\n\n**Assignment value:** This assignment is worth 20% of the overall unit assessment.\n\n**Report:**\n\nYou should produce forecasts of the series using ETS and ARIMA models. Write a report in Rmarkdown or Quarto format of your analysis explaining carefully what you have done and why you have done it. Your report should include the following elements.\n\n* A discussion of the statistical features of the original data, including the effect of COVID-19 on your series. [4 marks]\n* Explanation of transformations and differencing used. You should use a unit-root test as part of the discussion. [5 marks]\n* A description of the methodology used to create a short-list of appropriate ARIMA models and ETS models. Include discussion of AIC values as well as results from applying the models to a test-set consisting of the last 24 months of the data provided. [6 marks]\n* Choose one ARIMA model and one ETS model based on this analysis and show parameter estimates, residual diagnostics, forecasts and prediction intervals for both models. Diagnostic checking for both models should include ACF graphs and the Ljung-Box test. [8 marks]\n* Comparison of the results from each of your preferred models. Which method do you think gives the better forecasts? Explain with reference to the test-set. [2 marks]\n* Apply your two chosen models to the full data set, re-estimating the parameters but not changing the model structure. Produce out-of-sample point forecasts and 80% prediction intervals for each model for two years past the end of the data provided. [4 marks]\n* Obtain up-to-date data from the [ABS website](https://www.abs.gov.au/statistics/industry/retail-and-wholesale-trade/retail-trade-australia) (Table 11). You may need to use the previous release of data, rather than the latest release. Compare your forecasts with the actual numbers. How well did you do? [5 marks]\n* A discussion of benefits and limitations of the models for your data. [3 marks]\n* Graphs should be properly labelled, including appropriate units of measurement. [3 marks]\n\n**Notes**\n\n* Your submission must include the Rmarkdown or Quarto file (.Rmd or .qmd), and should run without error.\n* There will be a 5 marks penalty if file does not run without error.\n* You may also include a knitted version of the document (HTML preferred), but it is not required.\n* When using the updated ABS data set, do not edit the downloaded file in any way.\n* There is no need to provide the updated ABS data with your submission.\n\n\n
Due: 24 May 2024 Submit \n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week1/.DS_Store b/_freeze/week1/.DS_Store
deleted file mode 100644
index afc3885..0000000
Binary files a/_freeze/week1/.DS_Store and /dev/null differ
diff --git a/_freeze/week1/activities/execute-results/html.json b/_freeze/week1/activities/execute-results/html.json
deleted file mode 100644
index eef1466..0000000
--- a/_freeze/week1/activities/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "639f42b95b92df253bb42218953aaa82",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Activities: Week 1\"\neditor: source\nengine: knitr\nfilters:\n - webr-teachr\n - quiz-teachr\nwebr:\n packages: [\"fpp3\", \"urca\"]\n autoload-packages: true\n---\n\n\n\n# Time series data and patterns\n\n## Exercise 1\n\nThe `pedestrian` dataset contains hourly pedestrian counts from 2015-01-01 to 2016-12-31 at 4 sensors in the city of Melbourne.\n\nThe data is shown below:\n\n\n\n::: {.cell}\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tibble: 66,037 × 5\n Sensor Date_Time Date Time Count\n \n 1 Birrarung Marr 2015-01-01 00:00:00 2015-01-01 0 1630\n 2 Birrarung Marr 2015-01-01 01:00:00 2015-01-01 1 826\n 3 Birrarung Marr 2015-01-01 02:00:00 2015-01-01 2 567\n 4 Birrarung Marr 2015-01-01 03:00:00 2015-01-01 3 264\n 5 Birrarung Marr 2015-01-01 04:00:00 2015-01-01 4 139\n 6 Birrarung Marr 2015-01-01 05:00:00 2015-01-01 5 77\n 7 Birrarung Marr 2015-01-01 06:00:00 2015-01-01 6 44\n 8 Birrarung Marr 2015-01-01 07:00:00 2015-01-01 7 56\n 9 Birrarung Marr 2015-01-01 08:00:00 2015-01-01 8 113\n10 Birrarung Marr 2015-01-01 09:00:00 2015-01-01 9 166\n# ℹ 66,027 more rows\n```\n\n\n:::\n:::\n\n\n\n::: {.callout-caution}\n## Your turn!\n\nIdentify the `index` variable, `key` variable(s), and measured variable(s) of this dataset.\n:::\n\n::: {.callout-tip}\n## Hint\n\n* The `index` variable contains the complete time information\n* The `key` variable(s) identify each time series\n* The measured variable(s) are what you want to explore/forecast.\n:::\n\n::: columns\n\n::: {.column width=\"30%\"}\n\n## `index` variable\n:::{.quiz-singlechoice}\n- [ ] [Sensor]{hint=\"x\"}\n- [X] [Date_Time]{hint=\"o\"}\n- [ ] [Date]{hint=\"x\"}\n- [ ] [Time]{hint=\"x\"}\n- [ ] [Count]{hint=\"x\"}\n:::\n:::\n\n::: {.column width=\"30%\"}\n\n## `key` variable(s)\n:::{.quiz-multichoice}\n- [X] [Sensor]{hint=\"o\"}\n- [ ] [Date_Time]{hint=\"x\"}\n- [ ] [Date]{hint=\"x\"}\n- [ ] [Time]{hint=\"x\"}\n- [ ] [Count]{hint=\"x\"}\n:::\n:::\n\n::: {.column width=\"40%\"}\n\n## measured variable(s)\n:::{.quiz-multichoice}\n- [ ] [Sensor]{hint=\"x\"}\n- [ ] [Date_Time]{hint=\"x\"}\n- [ ] [Date]{hint=\"x\"}\n- [ ] [Time]{hint=\"x\"}\n- [X] [Count]{hint=\"o\"}\n:::\n:::\n:::\n\n## Exercise 2\n\nThe `aus_accommodation` dataset contains quarterly data on Australian tourist accommodation from short-term non-residential accommodation with 15 or more rooms, 1998 Q1 - 2016 Q2.\n\nThe units of the measured variables are as follows:\n\n* Takings are in millions of Australian dollars\n* Occupancy is a percentage of rooms occupied\n* CPI is an index with value 100 in 2012 Q1.\n\n::: {.callout-caution}\n## Your turn!\n\nComplete the code to convert this dataset into a tsibble.\n:::\n\n```{webr-teachr}\nlibrary(<>)\n\naus_accommodation <- read.csv(\n \"https://workshop.nectric.com.au/user2024/data/aus_accommodation.csv\"\n) |>\n mutate(Date = as.Date(Date)) |>\n as_tsibble(\n <>\n )\n???\n\nif(!(\"fpp3\" %in% .packages())) return(c(\"You need to load the fpp3 package!\" = TRUE))\n\nchecks <- c(\n \"You need to use the as_tsibble() function to convert the data into a tsibble.\" = !search_ast(.code, .fn = as_tsibble),\n \"You should specify which column provides the time of the measurements with `index`.\" = !search_ast(.code, .fn = as_tsibble, index = Date),\n \"You need to specify the key variables that identify each time series\" = exists_in(.errored, grepl, pattern = \"distinct rows\", fixed = TRUE)\n)\n\nif(any(checks)) return(checks)\n\nif(!is_yearquarter(aus_accommodation$Date)) cat(\"Great, you've got a tsibble!\\nAlthough something doesn't look right - check the frequency of the data, why isn't it quarterly?\\n\")\nFALSE\n```\n\n\n## Exercise 3\n\n:::{.callout-important}\n## Temporal granularity\n\nThe previous exercise produced a dataset with daily frequency - although clearly the data is quarterly! This is because we are using a daily granularity which is inappropriate for this data.\n:::\n\nCommon temporal granularities can be created with these functions:\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n\n|Granularity |Function |\n|:-----------|:--------------------|\n|Annual |`as.integer()` |\n|Quarterly |`yearquarter()` |\n|Monthly |`yearmonth()` |\n|Weekly |`yearweek()` |\n|Daily |`as_date()`, `ymd()` |\n|Sub-daily |`as_datetime()` |\n\n\n:::\n:::\n\n\n\n\n::: {.callout-caution}\n## Your turn!\n\nUse the appropriate granularity for the `aus_accommodation` dataset, and verify that the frequency is now quarterly.\n:::\n\n\n```{webr-teachr}\naus_accommodation <- read.csv(\n \"https://workshop.nectric.com.au/user2024/data/aus_accommodation.csv\"\n) |>\n mutate(<>) |>\n as_tsibble(\n key = State, index = <>\n )\n???\n\nif(!(\"fpp3\" %in% .packages())) return(c(\"You need to load the fpp3 package!\" = TRUE))\n\nc(\n \"You need to save the dataset as `aus_accommodation`\" = !exists(\"aus_accommodation\"),\n \"You need to use the as_tsibble() function to convert the data into a tsibble.\" = !search_ast(.code, .fn = as_tsibble),\n \"You need to specify the key variables that identify each time series\" = exists_in(.errored, grepl, pattern = \"distinct rows\", fixed = TRUE),\n \"You should use `yearquarter()` to change the time column into a quarterly granularity\" = !is_yearquarter(aus_accommodation[[index_var(aus_accommodation)]])\n)\n```\n\n## Exercise 4\n\nThe `tourism` dataset contains the quarterly overnight trips from 1998 Q1 to 2016 Q4 across Australia.\n\nIt is disaggregated by 3 key variables:\n\n* `State`: States and territories of Australia\n* `Region`: The tourism regions are formed through the aggregation of Statistical Local Areas (SLAs) which are defined by the various State and Territory tourism authorities according to their research and marketing needs\n* `Purpose`: Stopover purpose of visit: \"Holiday\", \"Visiting friends and relatives\", \"Business\", \"Other reason\".\n\nCalculate the total quarterly tourists visiting Victoria from the `tourism` dataset.\n\n```{webr-teachr}\ntourism |>\n filter(<>) |>\n summarise(<>)\n\n???\n\nif(!(\"fpp3\" %in% .packages())) return(c(\"You need to load the fpp3 package!\" = TRUE))\n\nc(\n \"You need to use the filter() function to extract only Victorian tourists.\" = !search_ast(.code, .fn = filter),\n \"You need to use the summarise() function to sum over the Region and Purpose keys.\" = !search_ast(.code, .fn = summarise),\n)\n```\n\nFind what combination of `Region` and `Purpose` had the maximum number of overnight trips on average.\n\n```{webr-teachr}\ntourism |>\n as_tibble() |>\n group_by(<>) |>\n summarise(<>) |>\n filter(<>)\n\n???\n\nif(!(\"fpp3\" %in% .packages())) return(c(\"You need to load the fpp3 package!\" = TRUE))\n\nc(\n \"You need to use the as_tibble() function to convert back to a tibble object.\" = !search_ast(.code, .fn = as_tibble),\n \"You need to use the group_by() function to group by Region and Purpose.\" = !search_ast(.code, .fn = group_by),\n)\n```\n\nCreate a new tsibble which combines the Purposes and Regions, and just has total trips by State.\n\n```{webr-teachr}\ntourism\n\n???\n\nif(!(\"fpp3\" %in% .packages())) return(c(\"You need to load the fpp3 package!\" = TRUE))\n\nc(\n \"You need to use the filter() function to extract only Victorian tourists.\" = !search_ast(.code, .fn = filter),\n \"You need to use the summarise() function to sum over the Region and Purpose keys.\" = !search_ast(.code, .fn = summarise),\n)\n```\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week1/index/execute-results/html.json b/_freeze/week1/index/execute-results/html.json
deleted file mode 100644
index eec22b5..0000000
--- a/_freeze/week1/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "8210bd1a75592d650f579f831b83fa1d",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 1: What is forecasting?\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* How to think about forecasting from a statistical perspective\n* What makes something easy or hard to forecast?\n* Using the `tsibble` package in R\n\n## Pre-class activities\n\n- Before we start classes, make sure you are familiar with R, RStudio and the tidyverse packages. If you've already done one of ETX2250/ETC1010 or something equivalent you should be fairly familiar with these concepts and probably will not need much help. If you're new to R and the tidyverse, then you will need to get yourself up-to-speed. Work through the first five modules of the **StartR** tutorial at [startr.numbat.space](https://startr.numbat.space). Do as much of it as you think you need. For those students new to R, it is strongly recommended that you do all five modules. For those who have previously used R, concentrate on the parts where you feel you are weakest.\n\n- Read the Appendix in the text book ([https://otexts.com/fpp3/appendix-using-r.html](https://otexts.com/fpp3/appendix-using-r.html)) and follow instructions to install R and RStudio on your personal computer. \n\n* Read [Chapter 1 of the textbook](http://OTexts.com/fpp3/intro.html) and watch all embedded videos. Pay particular attention to [Section 1.7](https://otexts.com/fpp3/perspective.html).\n\n* Read [Section 2.1 of the textbook](https://otexts.com/fpp3/tsibbles.html#tsibbles) and watch the embedded video.\n\n\n\n\n\n\n## [Workshop activities](activities.qmd)\n\n\n\n\n\n\n\n\n\n\n\n\n## Tutorial exercises\n\nThe main tasks for Week 1 tutorials will be:\n\n1. To ensure that you have successfully installed R and RStudio on your own laptop.\n2. Work your way through Chapters 1-5 [startr.numbat.space](https://startr.numbat.space). This is material we have prepared for you and other Monash students working in R. You should do these at your own pace to understand the concepts.\n\nYour tutors will be in your tutorial class to assist you. \n\n\n\n\n\n\n\n## Assignments\n\n* [IA1](../assignments/A1.qmd) is due on Monday 10 March.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week1/slides/execute-results/html.json b/_freeze/week1/slides/execute-results/html.json
deleted file mode 100644
index 21bf3a7..0000000
--- a/_freeze/week1/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "f38477057393e1244554f7e930629677",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 1: Introduction to forecasting and R\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n keep_tex: yes\n---\n\n\n\n\n\n## Contact details\n\n\\vspace*{0.2cm}\n\\begin{alertblock}{Chief Examiner: Professor Rob Hyndman}\n\\href{mailto:rob.hyndman@monash.edu}{\\faicon{envelope} rob.hyndman@monash.edu}\n\n\\href{https://robjhyndman.com}{\\faicon{home} robjhyndman.com}\n\n\\href{https://twitter.com/robjhyndman}{\\faicon{twitter} @robjhyndman}\n\\end{alertblock}\n\n\\begin{block}{Tutors}\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item \\textbf{Mitchell O'Hara-Wild}\n\\item Elena Sanina\n\\item Xiaoqian Wang\n\\item Zhixiang (Elvis) Yang\n\\end{itemize}\n\\end{block}\n\n## Brief bio\n\\fontsize{13}{16}\\sf\n\n - Professor of Statistics, Monash University\n - Co-author of most popular forecasting textbook in the world\n - Developer of most popular forecasting software in the world\n\n### How my forecasting methodology is used:\n - Pharmaceutical Benefits Scheme\n - Electricity demand\n - Australian tourism demand\n - Ageing population\n - COVID-19 cases\n - TAC large claims\n\n## Unit objectives\n\\fontsize{13}{14}\\sf\n\n 1. To obtain an understanding of common statistical methods used in business and economic forecasting.\n 2. To develop the computer skills required to forecast business and economic time series data;\n 3. To gain insights into the problems of implementing and operating large scale forecasting systems for use in business.\n\n\\pause\n\n### Teaching and learning approach\n\\vspace*{-0.2cm}\n\n* Approximately one hour of online videos each week.\n* One 90 minute in-person tutorial each week.\n* One 50 minute in-person seminar each Friday.\n* One tutorial will be recorded each week and posted online.\n\n## Key reference\n\n\\begin{block}{}\\bf\n\\hangafter=1\\hangindent=.3cm\n {Hyndman, R.~J. \\& Athanasopoulos, G. (2021) \\emph{Forecasting: principles and practice}, 3rd edition}\n\\end{block}\\pause\n\\begin{alertblock}{}\\Large\n\\centerline{\\bf OTexts.com/fpp3/}\n\\end{alertblock}\n\n\\pause\n\n * Free and online\n * Data sets in associated R packages\n * R code for examples\n * Embedded online lectures\n\n## Outline\n\n\n\n::: {.cell-output-display}\n\\begin{tabular}[t]{rlr}\n\\textbf{Week} & \\textbf{Topic} & \\textbf{Chapter}\\\\\n\\midrule\n1 & Introduction to forecasting and R & 1\\\\\n2 & Time series graphics & 2\\\\\n3 & Time series decomposition & 3\\\\\n4 & The forecaster's toolbox & 5\\\\\n5--6 & Exponential smoothing & 8\\\\\n7--9 & ARIMA models & 9\\\\\n10 & Multiple regression and forecasting & 7\\\\\n11--12 & Dynamic regression & 10\\\\\n\\end{tabular}\n:::\n\n\n\n## Assessment\n\\vspace*{-0.2cm}\n\n - Four assignments and one larger project: 40%\n - Exam (2 hours): 60%.\n\n\\pause\n\n\n\n\\begin{block}{}\\small\\centerline{::: {.cell-output-display}\n\\begin{tabular}[t]{llr}\n\\textbf{Task} & \\textbf{Due Date} & \\textbf{Value}\\\\\n\\midrule\nIA1 & Fri 8 Mar & 2\\%\\\\\nIA2 & Fri 22 Mar & 3\\%\\\\\nIA3 & Fri 26 Apr & 3\\%\\\\\nIA4 & Fri 17 May & 3\\%\\\\\nGA1 & Fri 15 Mar & 3\\%\\\\\nGA2 & Fri 29 Mar & 3\\%\\\\\nGA3 & Fri 3 May & 3\\%\\\\\nGA4 & Fri 24 May & 20\\%\\\\\nFinal Exam & Official exam period & 60\\%\\\\\n\\end{tabular}\n:::\n\n}\\end{block}\n\n\n\n\\pause\\vspace*{-0.4cm}\n\n - Need at least 45\\% for exam, and 50\\% for total.\n - \\textbf{ETC5550 students:} Extra exam question.\n\n## Unit website\n\\fontsize{18}{24}\\sf\\vspace*{0.3cm}\n\n\\begin{alertblock}{}\n\\centerline{\\Large\\textbf{af.numbat.space}}\n\\end{alertblock}\n\n - Includes all course materials\n - Links for assignment submissions\n - Link to discussion forum.\n\n\\vspace*{0.5cm}\n\n###\n\\vspace*{-0.5cm}\n\\centerline{\\textbf{Please don't send emails. Use the forum.}}\n\n## International Institute of Forecasters\n\n\\placefig{1}{3}{width=4cm}{iifLOGO2}\n\n\\begin{textblock}{8}(7,3)\n \\begin{block}{}\n \\begin{itemize}\n \\item The IIF provides a prize to the top student in this subject each year.\n \\item US\\$100 plus one year membership.\n \\end{itemize}\n \\end{block}\n\\end{textblock}\n\n## R\n\\fontsize{13}{13}\\sf\n\n{height=1.3cm}\n\nAvailable for download from CRAN:\n\nhttps://cran.r-project.org\n\n\\vspace*{-0.5cm}\\noindent\\rule{\\textwidth}{1pt}\n\n{height=1.3cm}\n\nAvailable for download from RStudio:\n\nhttps://www.rstudio.com/products/rstudio/download/\n\n\n\n\n\n## Main packages\n\n\\placefig{4.2}{1.4}{width=3.8cm}{tsibble.png}\n\\placefig{8.0}{1.4}{width=3.8cm}{tsibbledata.png}\n\\placefig{2.3}{4.65}{width=3.8cm}{tidyverse.png}\n\\placefig{6.1}{4.65}{width=3.8cm}{feasts.png}\n\\placefig{9.9}{4.65}{width=3.8cm}{fable.png}\n\n## Main packages\n\n```r\n# Install required packages (do once)\ninstall.packages(c(\"tidyverse\", \"fpp3\", \"GGally), dependencies = TRUE)\n```\n\n\\pause\n\n```r\n# At the start of each session\nlibrary(fpp3)\n```\n\n## Exercises Week 1\n\\fontsize{18}{24}\\sf\n\n * Make sure you are familiar with R, RStudio and the tidyverse packages.\n * Do first five chapters of `learnr.numbat.space`.\n * Assignment 1\n\n## Assignment 1: forecast the following series\n\\small\\vspace*{-0.2cm}\n\n 1. Google closing stock price on 20 March 2024\n 2. Maximum temperature at Melbourne airport on 10 April 2024\n 3. The difference in points (Collingwood minus Essendon) scored in the AFL match between Collingwood and Essendon for the Anzac Day clash. 25 April 2024\n 4. The seasonally adjusted estimate of total employment for April 2024. ABS CAT 6202, to be released around mid May 2024\n 5. Google closing stock price on 22 May 2024\n\n\\begin{block}{Due Friday 8 March}\nFor each of these, give a point forecast and an 80\\% prediction interval.\n\\end{block}\\pause\n\\begin{alertblock}{}\nPrize: \\$50 cash prize\n\\end{alertblock}\n\n## Assignment 1: scoring\n\\small\n\n$Y=$ actual, $F=$ point forecast, $[L,U]=$ prediction interval\n\n### Point forecasts:\n\\vspace*{-0.8cm}\n$$\\text{Absolute Error} = |Y-F|\n$$\\vspace*{-1cm}\n\n * Rank results for all students in class\n * Add ranks across all five items\n\n### Prediction intervals:\n\\vspace*{-0.8cm}\n$$\n\\text{Interval Score} = (U - L) + 10(L - Y)_+ + 10 (Y-U)_+\n$$\\vspace*{-1cm}\n\n * $u_+ = \\text{max}(u,0)$\n * Rank results for all students\n * Add ranks across all five items\n\n## `tsibble` objects\n\n\\fontsize{10}{10.8}\\sf\n\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nglobal_economy\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n# A tsibble: 15,150 x 6 [1Y]\n# Key: Country [263]\n Year Country GDP Imports Exports Population\n \n 1 1960 Afghanistan 537777811. 7.02 4.13 8996351\n 2 1961 Afghanistan 548888896. 8.10 4.45 9166764\n 3 1962 Afghanistan 546666678. 9.35 4.88 9345868\n 4 1963 Afghanistan 751111191. 16.9 9.17 9533954\n 5 1964 Afghanistan 800000044. 18.1 8.89 9731361\n 6 1965 Afghanistan 1006666638. 21.4 11.3 9938414\n 7 1966 Afghanistan 1399999967. 18.6 8.57 10152331\n 8 1967 Afghanistan 1673333418. 14.2 6.77 10372630\n 9 1968 Afghanistan 1373333367. 15.2 8.90 10604346\n10 1969 Afghanistan 1408888922. 15.0 10.1 10854428\n# ℹ 15,140 more rows\n```\n:::\n:::\n\n\n\n\\only<2->{\\begin{textblock}{.75}(1.65,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Index\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<3->{\\begin{textblock}{1.6}(2.78,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Key\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<4>{\\begin{textblock}{6.7}(4.8,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Measured variables\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\n## `tsibble` objects\n\n\\fontsize{10}{10.8}\\sf\n\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ntourism\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n# A tsibble: 24,320 x 5 [1Q]\n# Key: Region, State, Purpose [304]\n Quarter Region State Purpose Trips\n \n 1 1998 Q1 Adelaide SA Business 135.\n 2 1998 Q2 Adelaide SA Business 110.\n 3 1998 Q3 Adelaide SA Business 166.\n 4 1998 Q4 Adelaide SA Business 127.\n 5 1999 Q1 Adelaide SA Business 137.\n 6 1999 Q2 Adelaide SA Business 200.\n 7 1999 Q3 Adelaide SA Business 169.\n 8 1999 Q4 Adelaide SA Business 134.\n 9 2000 Q1 Adelaide SA Business 154.\n10 2000 Q2 Adelaide SA Business 169.\n# ℹ 24,310 more rows\n```\n:::\n:::\n\n\n\n\\only<3->{\\begin{textblock}{.98}(1.65,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Index\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<4->{\\begin{textblock}{3.9}(3,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Keys\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<5>{\\begin{textblock}{1.5}(7.4,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Measure\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\n\\only<2->{\\begin{textblock}{3}(9,5)\n\\begin{block}{}\\fontsize{10}{10}\\sf Domestic visitor nights in thousands by state/region and purpose.\\phantom{dg}\\end{block}\n\\end{textblock}}\n\n## `tsibble` objects\n\n* A `tsibble` allows storage and manipulation of multiple time series in R.\n\n* It contains:\n\n + An index: time information about the observation\n + Measured variable(s): numbers of interest\n + Key variable(s): optional unique identifiers for each series\n\n* It works with tidyverse functions.\n\n## The `tsibble` index\n\nTime index variables can be created with these functions:\n\n###\n\\vspace*{-0.2cm}\n\n\n\n::: {.cell-output-display}\n\\begin{tabular}[t]{ll}\n\\textbf{Frequency} & \\textbf{Function}\\\\\n\\midrule\nAnnual & \\texttt{start:end}\\\\\nQuarterly & \\texttt{yearquarter()}\\\\\nMonthly & \\texttt{yearmonth()}\\\\\nWeekly & \\texttt{yearweek()}\\\\\nDaily & \\texttt{as\\_date()}, \\texttt{ymd()}\\\\\nSub-daily & \\texttt{as\\_datetime()}\\\\\n\\end{tabular}\n:::\n\n\n\n\n## Your turn\n\n\n\n\n```{.r .cell-code}\nshow_activity(1, title = FALSE)\n```\n\n\n1. Download `tourism.xlsx` from [`http://robjhyndman.com/data/tourism.xlsx`](http://robjhyndman.com/data/tourism.xlsx), and read it into R using `read_excel()` from the `readxl` package.\n2. Create a tsibble which is identical to the `tourism` tsibble from the `tsibble` package.\n3. Find what combination of `Region` and `Purpose` had the maximum number of overnight trips on average.\n4. Create a new tsibble which combines the Purposes and Regions, and just has total trips by State.\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week1/slides/execute-results/tex.json b/_freeze/week1/slides/execute-results/tex.json
deleted file mode 100644
index fbae9c1..0000000
--- a/_freeze/week1/slides/execute-results/tex.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "hash": "5cbbc89b44fb7cf4ae70666740d672ce",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 1: Introduction to forecasting and R\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n keep_tex: yes\n---\n\n\n\n\n## Contact details\n\n\\vspace*{0.2cm}\n\\begin{alertblock}{Chief Examiner: Professor Rob Hyndman}\n\\href{mailto:rob.hyndman@monash.edu}{\\faicon{envelope} rob.hyndman@monash.edu}\n\n\\href{https://robjhyndman.com}{\\faicon{home} robjhyndman.com}\n\n\\href{https://twitter.com/robjhyndman}{\\faicon{twitter} @robjhyndman}\n\\end{alertblock}\n\n\\begin{block}{Tutors}\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item \\textbf{Mitchell O'Hara-Wild}\n\\item Elena Sanina\n\\item Xiaoqian Wang\n\\item Zhixiang (Elvis) Yang\n\\end{itemize}\n\\end{block}\n\n## Brief bio\n\\fontsize{13}{16}\\sf\n\n - Professor of Statistics, Monash University\n - Co-author of most popular forecasting textbook in the world\n - Developer of most popular forecasting software in the world\n\n### How my forecasting methodology is used:\n - Pharmaceutical Benefits Scheme\n - Electricity demand\n - Australian tourism demand\n - Ageing population\n - COVID-19 cases\n - TAC large claims\n\n## Unit objectives\n\\fontsize{13}{14}\\sf\n\n 1. To obtain an understanding of common statistical methods used in business and economic forecasting.\n 2. To develop the computer skills required to forecast business and economic time series data;\n 3. To gain insights into the problems of implementing and operating large scale forecasting systems for use in business.\n\n\\pause\n\n### Teaching and learning approach\n\\vspace*{-0.2cm}\n\n* Approximately one hour of online videos each week.\n* One 90 minute in-person tutorial each week.\n* One 50 minute in-person seminar each Friday.\n* One tutorial will be recorded each week and posted online.\n\n## Key reference\n\n\\begin{block}{}\\bf\n\\hangafter=1\\hangindent=.3cm\n {Hyndman, R.~J. \\& Athanasopoulos, G. (2021) \\emph{Forecasting: principles and practice}, 3rd edition}\n\\end{block}\\pause\n\\begin{alertblock}{}\\Large\n\\centerline{\\bf OTexts.com/fpp3/}\n\\end{alertblock}\n\n\\pause\n\n * Free and online\n * Data sets in associated R packages\n * R code for examples\n * Embedded online lectures\n\n## Outline\n\n\n::: {.cell-output-display}\n\\begin{tabular}[t]{rlr}\n\\textbf{Week} & \\textbf{Topic} & \\textbf{Chapter}\\\\\n\\midrule\n1 & Introduction to forecasting and R & 1\\\\\n2 & Time series graphics & 2\\\\\n3 & Time series decomposition & 3\\\\\n4 & The forecaster's toolbox & 5\\\\\n5--6 & Exponential smoothing & 8\\\\\n7--9 & ARIMA models & 9\\\\\n10 & Multiple regression and forecasting & 7\\\\\n11--12 & Dynamic regression & 10\\\\\n\\end{tabular}\n:::\n\n\n## Assessment\n\\vspace*{-0.2cm}\n\n - Four assignments and one larger project: 40%\n - Exam (2 hours): 60%.\n\n\\pause\n\n\n\\begin{block}{}\\small\\centerline{::: {.cell-output-display}\n\\begin{tabular}[t]{llr}\n\\textbf{Task} & \\textbf{Due Date} & \\textbf{Value}\\\\\n\\midrule\nAssignment 1 & Fri 8 Mar & 2\\%\\\\\nAssignment 2 & Fri 22 Mar & 6\\%\\\\\nAssignment 3 & Fri 12 Apr & 6\\%\\\\\nAssignment 4 & Fri 3 May & 6\\%\\\\\nRetail Project & Fri 24 May & 20\\%\\\\\nFinal Exam & Official exam period & 60\\%\\\\\n\\end{tabular}\n:::\n\n}\\end{block}\n\n\n\\pause\\vspace*{-0.4cm}\n\n - Need at least 45\\% for exam, and 50\\% for total.\n - \\textbf{ETC5550 students:} Extra exam question.\n\n## Unit website\n\\fontsize{18}{24}\\sf\\vspace*{0.3cm}\n\n\\begin{alertblock}{}\n\\centerline{\\Large\\textbf{af.numbat.space}}\n\\end{alertblock}\n\n - Includes all course materials\n - Links for assignment submissions\n - Link to discussion forum.\n\n\\vspace*{0.5cm}\n\n###\n\\vspace*{-0.5cm}\n\\centerline{\\textbf{Please don't send emails. Use the forum.}}\n\n## International Institute of Forecasters\n\n\\placefig{1}{3}{width=4cm}{iifLOGO2}\n\n\\begin{textblock}{8}(7,3)\n \\begin{block}{}\n \\begin{itemize}\n \\item The IIF provides a prize to the top student in this subject each year.\n \\item US\\$100 plus one year membership.\n \\end{itemize}\n \\end{block}\n\\end{textblock}\n\n## R\n\\fontsize{13}{13}\\sf\n\n{height=1.3cm}\n\nAvailable for download from CRAN:\n\nhttps://cran.r-project.org\n\n\\vspace*{-0.5cm}\\noindent\\rule{\\textwidth}{1pt}\n\n{height=1.3cm}\n\nAvailable for download from RStudio:\n\nhttps://www.rstudio.com/products/rstudio/download/\n\n\n\n\n\n## Main packages\n\n\\placefig{4.2}{1.4}{width=3.8cm}{tsibble.png}\n\\placefig{8.0}{1.4}{width=3.8cm}{tsibbledata.png}\n\\placefig{2.3}{4.65}{width=3.8cm}{tidyverse.png}\n\\placefig{6.1}{4.65}{width=3.8cm}{feasts.png}\n\\placefig{9.9}{4.65}{width=3.8cm}{fable.png}\n\n## Main packages\n\n```r\n# Install required packages (do once)\ninstall.packages(c(\"tidyverse\", \"fpp3\", \"GGally), dependencies = TRUE)\n```\n\n\\pause\n\n```r\n# At the start of each session\nlibrary(fpp3)\n```\n\n## Exercises Week 1\n\\fontsize{18}{24}\\sf\n\n * Make sure you are familiar with R, RStudio and the tidyverse packages.\n * Do first five chapters of `learnr.numbat.space`.\n * Assignment 1\n\n## Assignment 1: forecast the following series\n\\small\\vspace*{-0.2cm}\n\n 1. Google closing stock price on 20 March 2024\n 2. Maximum temperature at Melbourne airport on 10 April 2024\n 3. The difference in points (Collingwood minus Essendon) scored in the AFL match between Collingwood and Essendon for the Anzac Day clash. 25 April 2024\n 4. The seasonally adjusted estimate of total employment for April 2024. ABS CAT 6202, to be released around mid May 2024\n 5. Google closing stock price on 22 May 2024\n\n\\begin{block}{Due Friday 8 March}\nFor each of these, give a point forecast and an 80\\% prediction interval.\n\\end{block}\\pause\n\\begin{alertblock}{}\nPrize: \\$50 cash prize\n\\end{alertblock}\n\n## Assignment 1: scoring\n\\small\n\n$Y=$ actual, $F=$ point forecast, $[L,U]=$ prediction interval\n\n### Point forecasts:\n\\vspace*{-0.8cm}\n$$\\text{Absolute Error} = |Y-F|\n$$\\vspace*{-1cm}\n\n * Rank results for all students in class\n * Add ranks across all five items\n\n### Prediction intervals:\n\\vspace*{-0.8cm}\n$$\n\\text{Interval Score} = (U - L) + 10(L - Y)_+ + 10 (Y-U)_+\n$$\\vspace*{-1cm}\n\n * $u_+ = \\text{max}(u,0)$\n * Rank results for all students\n * Add ranks across all five items\n\n## `tsibble` objects\n\n\\fontsize{10}{10.8}\\sf\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nglobal_economy\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n# A tsibble: 15,150 x 6 [1Y]\n# Key: Country [263]\n Year Country GDP Imports Exports Population\n \n 1 1960 Afghanistan 537777811. 7.02 4.13 8996351\n 2 1961 Afghanistan 548888896. 8.10 4.45 9166764\n 3 1962 Afghanistan 546666678. 9.35 4.88 9345868\n 4 1963 Afghanistan 751111191. 16.9 9.17 9533954\n 5 1964 Afghanistan 800000044. 18.1 8.89 9731361\n 6 1965 Afghanistan 1006666638. 21.4 11.3 9938414\n 7 1966 Afghanistan 1399999967. 18.6 8.57 10152331\n 8 1967 Afghanistan 1673333418. 14.2 6.77 10372630\n 9 1968 Afghanistan 1373333367. 15.2 8.90 10604346\n10 1969 Afghanistan 1408888922. 15.0 10.1 10854428\n# i 15,140 more rows\n```\n:::\n:::\n\n\n\\only<2->{\\begin{textblock}{.75}(1.65,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Index\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<3->{\\begin{textblock}{1.6}(2.78,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Key\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<4>{\\begin{textblock}{6.7}(4.8,3.4)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Measured variables\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\n## `tsibble` objects\n\n\\fontsize{10}{10.8}\\sf\n\n\n::: {.cell}\n\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\ntourism\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n# A tsibble: 24,320 x 5 [1Q]\n# Key: Region, State, Purpose [304]\n Quarter Region State Purpose Trips\n \n 1 1998 Q1 Adelaide SA Business 135.\n 2 1998 Q2 Adelaide SA Business 110.\n 3 1998 Q3 Adelaide SA Business 166.\n 4 1998 Q4 Adelaide SA Business 127.\n 5 1999 Q1 Adelaide SA Business 137.\n 6 1999 Q2 Adelaide SA Business 200.\n 7 1999 Q3 Adelaide SA Business 169.\n 8 1999 Q4 Adelaide SA Business 134.\n 9 2000 Q1 Adelaide SA Business 154.\n10 2000 Q2 Adelaide SA Business 169.\n# i 24,310 more rows\n```\n:::\n:::\n\n\n\\only<3->{\\begin{textblock}{.98}(1.65,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Index\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<4->{\\begin{textblock}{3.9}(3,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Keys\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\\only<5>{\\begin{textblock}{1.5}(7.4,3.37)\n\\begin{alertblock}{}\\fontsize{9}{9}\\sf Measure\\phantom{dg}\\end{alertblock}\n\\end{textblock}}\n\n\\only<2->{\\begin{textblock}{3}(9,5)\n\\begin{block}{}\\fontsize{10}{10}\\sf Domestic visitor nights in thousands by state/region and purpose.\\phantom{dg}\\end{block}\n\\end{textblock}}\n\n## `tsibble` objects\n\n* A `tsibble` allows storage and manipulation of multiple time series in R.\n\n* It contains:\n\n + An index: time information about the observation\n + Measured variable(s): numbers of interest\n + Key variable(s): optional unique identifiers for each series\n\n* It works with tidyverse functions.\n\n## The `tsibble` index\n\nTime index variables can be created with these functions:\n\n###\n\\vspace*{-0.2cm}\n\n\n::: {.cell-output-display}\n\\begin{tabular}[t]{ll}\n\\textbf{Frequency} & \\textbf{Function}\\\\\n\\midrule\nAnnual & \\texttt{start:end}\\\\\nQuarterly & \\texttt{yearquarter()}\\\\\nMonthly & \\texttt{yearmonth()}\\\\\nWeekly & \\texttt{yearweek()}\\\\\nDaily & \\texttt{as\\_date()}, \\texttt{ymd()}\\\\\nSub-daily & \\texttt{as\\_datetime()}\\\\\n\\end{tabular}\n:::\n\n\n\n## Your turn\n\n\n\n```{.r .cell-code}\nshow_activity(1, title = FALSE)\n```\n\n\n1. Download `tourism.xlsx` from [`http://robjhyndman.com/data/tourism.xlsx`](http://robjhyndman.com/data/tourism.xlsx), and read it into R using `read_excel()` from the `readxl` package.\n2. Create a tsibble which is identical to the `tourism` tsibble from the `tsibble` package.\n3. Find what combination of `Region` and `Purpose` had the maximum number of overnight trips on average.\n4. Create a new tsibble which combines the Purposes and Regions, and just has total trips by State.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {
- "include-in-header": [
- "\\usepackage{booktabs}\n\\usepackage{longtable}\n\\usepackage{array}\n\\usepackage{multirow}\n\\usepackage{wrapfig}\n\\usepackage{float}\n\\usepackage{colortbl}\n\\usepackage{pdflscape}\n\\usepackage{tabu}\n\\usepackage{threeparttable}\n\\usepackage{threeparttablex}\n\\usepackage[normalem]{ulem}\n\\usepackage{makecell}\n\\usepackage{xcolor}\n"
- ]
- },
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week10/.DS_Store b/_freeze/week10/.DS_Store
deleted file mode 100644
index 7b10057..0000000
Binary files a/_freeze/week10/.DS_Store and /dev/null differ
diff --git a/_freeze/week10/index/execute-results/html.json b/_freeze/week10/index/execute-results/html.json
deleted file mode 100644
index 48e97f6..0000000
--- a/_freeze/week10/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "621b0230efa75d0d376727e3a2f534df",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 10: Multiple regression and forecasting\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Useful predictors for time series forecasting using regression\n* Selecting predictors\n* Ex ante and ex post forecasting\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 9-13 from [Section 9.11 of the book](https://otexts.com/fpp3/arima-exercises.html).\n\n## Exercise solutions\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Chapter 7 of the textbook](https://otexts.com/fpp3/regression.html) and watch all embedded videos\n\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Build a multiple regression model to forecast US consumption expenditure in the `us_change` tsibble. \n\n2. Build a harmonic regression model to forecast weekly US finished motor gasoline products supplied in barrels in the `us_gasoline` tsibble. \n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week10.R](Seminar_code_week10.R){download=\"Seminar_code_week10.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [IA4](../assignments/A4.qmd) is due on Monday 19 May.\n* [GA4](../assignments/G4.qmd) is due on Monday 26 May.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week10/slides/execute-results/html.json b/_freeze/week10/slides/execute-results/html.json
deleted file mode 100644
index 217ab0f..0000000
--- a/_freeze/week10/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "7e087d0de23fc82a0ec731b366bc0363",
- "result": {
- "markdown": "---\ntitle: \"ETC3550/ETC5550 Applied forecasting\"\nauthor: \"Week 10: Regression models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## Multiple regression and forecasting\n\n\\vspace*{0.2cm}\\begin{block}{}\\vspace*{-0.3cm}\n$$\n y_t = \\beta_0 + \\beta_1 x_{1,t} + \\beta_2 x_{2,t} + \\cdots + \\beta_kx_{k,t} + \\varepsilon_t.\n$$\n\\end{block}\n\n* $y_t$ is the variable we want to predict: the \"response\" variable\n* Each $x_{j,t}$ is numerical and is called a \"predictor\".\n They are usually assumed to be known for all past and future times.\n* The coefficients $\\beta_1,\\dots,\\beta_k$ measure the effect of each\npredictor *after taking account of the effect of all other predictors\nin the model*.\n* $\\varepsilon_t$ is a white noise error term\n\n## Trend\n\n**Linear trend**\n\n\\centerline{$x_t = t,\\qquad t = 1,2,\\dots,$}\\pause\n\n**Piecewise linear trend with bend at $\\tau$**\n\\vspace*{-0.6cm}\n\\begin{align*}\nx_{1,t} &= t \\\\\nx_{2,t} &= \\left\\{ \\begin{array}{ll}\n 0 & t <\\tau\\\\\n (t-\\tau) & t \\ge \\tau\n\\end{array}\\right.\n\\end{align*}\n\\pause\\vspace*{-0.8cm}\n\n**Quadratic or higher order trend**\n\n\\centerline{$x_{1,t} =t,\\quad x_{2,t}=t^2,\\quad \\dots$}\n\n\\pause\\vspace*{-0.1cm}\n\\centerline{\\textcolor{orange}{\\textbf{NOT RECOMMENDED!}}}\n\n## Uses of dummy variables\n\\fontsize{13}{14}\\sf\n\n**Seasonal dummies**\n\n* For quarterly data: use 3 dummies\n* For monthly data: use 11 dummies\n* For daily data: use 6 dummies\n* What to do with weekly data?\n\n\\pause\n\n**Outliers**\n\n* A dummy variable can remove its effect.\n\n\\pause\n\n**Public holidays**\n\n* For daily data: if it is a public holiday, dummy=1, otherwise dummy=0.\n\n## Holidays\n\n**For monthly data**\n\n* Christmas: always in December so part of monthly seasonal effect\n* Easter: use a dummy variable $v_t=1$ if any part of Easter is in that month, $v_t=0$ otherwise.\n* Ramadan and Chinese New Year similar.\n\n## Fourier series\n\nPeriodic seasonality can be handled using pairs of Fourier \\rlap{terms:}\\vspace*{-0.3cm}\n$$\ns_{k}(t) = \\sin\\left(\\frac{2\\pi k t}{m}\\right)\\qquad c_{k}(t) = \\cos\\left(\\frac{2\\pi k t}{m}\\right)\n$$\n$$\ny_t = a + bt + \\sum_{k=1}^K \\left[\\alpha_k s_k(t) + \\beta_k c_k(t)\\right] + \\varepsilon_t$$\\vspace*{-0.8cm}\n\n* Every periodic function can be approximated by sums of sin and cos terms for large enough $K$.\n* Choose $K$ by minimizing AICc or CV.\n* Called \"harmonic regression\"\n\n## Distributed lags\n\nLagged values of a predictor.\n\nExample: $x$ is advertising which has a delayed effect\n\n\\vspace*{-0.8cm}\\begin{align*}\n x_{1} &= \\text{advertising for previous month;} \\\\\n x_{2} &= \\text{advertising for two months previously;} \\\\\n & \\vdots \\\\\n x_{m} &= \\text{advertising for $m$ months previously.}\n\\end{align*}\n\n## Comparing regression models\n\\fontsize{13}{14}\\sf\n\n* $R^2$ does not allow for \"degrees of freedom\".\n* Adding *any* variable tends to increase the value of $R^2$, even if that variable is irrelevant.\n\\pause\n\nTo overcome this problem, we can use *adjusted $R^2$*:\n\\begin{block}{}\n$$\n\\bar{R}^2 = 1-(1-R^2)\\frac{T-1}{T-k-1}\n$$\nwhere $k=$ no.\\ predictors and $T=$ no.\\ observations.\n\\end{block}\n\n\\pause\n\n\\begin{alertblock}{Maximizing $\\bar{R}^2$ is equivalent to minimizing $\\hat\\sigma^2$.}\n\\centerline{$\\displaystyle\n\\hat{\\sigma}^2 = \\frac{1}{T-k-1}\\sum_{t=1}^T \\varepsilon_t^2$\n}\n\\end{alertblock}\n\n## Akaike's Information Criterion\n\n\\vspace*{0.2cm}\\begin{block}{}\n\\centerline{$\\text{AIC} = -2\\log(L) + 2(k+2)$}\n\\end{block}\\vspace*{-0.5cm}\n\n* $L=$ likelihood\n* $k=$ \\# predictors in model.\n* AIC penalizes terms more heavily than $\\bar{R}^2$.\n\n\\pause\\begin{block}{}\n\\centerline{$\\text{AIC}_{\\text{C}} = \\text{AIC} + \\frac{2(k+2)(k+3)}{T-k-3}$}\n\\end{block}\n\n* Minimizing the AIC or AICc is asymptotically equivalent to minimizing MSE via **leave-one-out cross-validation** (for any linear regression).\n\n## Leave-one-out cross-validation\n\nFor regression, leave-one-out cross-validation is faster and more efficient than time-series cross-validation.\n\n* Select one observation for test set, and use *remaining* observations in training set. Compute error on test observation.\n* Repeat using each possible observation as the test set.\n* Compute accuracy measure over all errors.\n\n\n\n::: {.cell}\n\n:::\n\n\n\n## Cross-validation {-}\n\n**Traditional evaluation**\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n\\pause\n\n**Time series cross-validation**\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## Cross-validation {-}\n\n**Traditional evaluation**\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n**Leave-one-out cross-validation**\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n\\only<2>{\\begin{textblock}{4}(6,6)\\begin{block}{}\\fontsize{13}{15}\\sf\nCV = MSE on \\textcolor[HTML]{D55E00}{test sets}\\end{block}\\end{textblock}}\n\n## Bayesian Information Criterion\n\n\\begin{block}{}\n$$\n\\text{BIC} = -2\\log(L) + (k+2)\\log(T)\n$$\n\\end{block}\nwhere $L$ is the likelihood and $k$ is the number of predictors in the model.\\pause\n\n* BIC penalizes terms more heavily than AIC\n* Also called SBIC and SC.\n* Minimizing BIC is asymptotically equivalent to leave-$v$-out cross-validation when $v = T[1-1/(log(T)-1)]$.\n\n## Choosing regression variables\n\\fontsize{14}{15}\\sf\n\n**Best subsets regression**\n\n* Fit all possible regression models using one or more of the predictors.\n* Choose the best model based on one of the measures of predictive ability (CV, AIC, AICc).\n\\pause\n\n**Backwards stepwise regression**\n\n* Start with a model containing all variables.\n* Subtract one variable at a time. Keep model if lower CV.\n* Iterate until no further improvement.\n* Not guaranteed to lead to best model.\n\n## Ex-ante versus ex-post forecasts\n\n * *Ex ante forecasts* are made using only information available in advance.\n - require forecasts of predictors\n * *Ex post forecasts* are made using later information on the predictors.\n - useful for studying behaviour of forecasting models.\n\n * trend, seasonal and calendar variables are all known in advance, so these don't need to be forecast.\n",
- "supporting": [
- "slides_files\\figure-html"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week10/slides/execute-results/tex.json b/_freeze/week10/slides/execute-results/tex.json
deleted file mode 100644
index 982225b..0000000
--- a/_freeze/week10/slides/execute-results/tex.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "79652497113d1f059760c711fe2b746f",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"ETC3550/ETC5550 Applied forecasting\"\nauthor: \"Week 10: Regression models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n## Multiple regression and forecasting\n\n\\vspace*{0.2cm}\\begin{block}{}\\vspace*{-0.3cm}\n$$\n y_t = \\beta_0 + \\beta_1 x_{1,t} + \\beta_2 x_{2,t} + \\cdots + \\beta_kx_{k,t} + \\varepsilon_t.\n$$\n\\end{block}\n\n* $y_t$ is the variable we want to predict: the \"response\" variable\n* Each $x_{j,t}$ is numerical and is called a \"predictor\".\n They are usually assumed to be known for all past and future times.\n* The coefficients $\\beta_1,\\dots,\\beta_k$ measure the effect of each\npredictor *after taking account of the effect of all other predictors\nin the model*.\n* $\\varepsilon_t$ is a white noise error term\n\n## Trend\n\n**Linear trend**\n\n\\centerline{$x_t = t,\\qquad t = 1,2,\\dots,$}\\pause\n\n**Piecewise linear trend with bend at $\\tau$**\n\\vspace*{-0.6cm}\n\\begin{align*}\nx_{1,t} &= t \\\\\nx_{2,t} &= \\left\\{ \\begin{array}{ll}\n 0 & t <\\tau\\\\\n (t-\\tau) & t \\ge \\tau\n\\end{array}\\right.\n\\end{align*}\n\\pause\\vspace*{-0.8cm}\n\n**Quadratic or higher order trend**\n\n\\centerline{$x_{1,t} =t,\\quad x_{2,t}=t^2,\\quad \\dots$}\n\n\\pause\\vspace*{-0.1cm}\n\\centerline{\\textcolor{orange}{\\textbf{NOT RECOMMENDED!}}}\n\n## Uses of dummy variables\n\\fontsize{13}{14}\\sf\n\n**Seasonal dummies**\n\n* For quarterly data: use 3 dummies\n* For monthly data: use 11 dummies\n* For daily data: use 6 dummies\n* What to do with weekly data?\n\n\\pause\n\n**Outliers**\n\n* A dummy variable can remove its effect.\n\n\\pause\n\n**Public holidays**\n\n* For daily data: if it is a public holiday, dummy=1, otherwise dummy=0.\n\n## Holidays\n\n**For monthly data**\n\n* Christmas: always in December so part of monthly seasonal effect\n* Easter: use a dummy variable $v_t=1$ if any part of Easter is in that month, $v_t=0$ otherwise.\n* Ramadan and Chinese New Year similar.\n\n## Fourier series\n\nPeriodic seasonality can be handled using pairs of Fourier \\rlap{terms:}\\vspace*{-0.3cm}\n$$\ns_{k}(t) = \\sin\\left(\\frac{2\\pi k t}{m}\\right)\\qquad c_{k}(t) = \\cos\\left(\\frac{2\\pi k t}{m}\\right)\n$$\n$$\ny_t = a + bt + \\sum_{k=1}^K \\left[\\alpha_k s_k(t) + \\beta_k c_k(t)\\right] + \\varepsilon_t$$\\vspace*{-0.8cm}\n\n* Every periodic function can be approximated by sums of sin and cos terms for large enough $K$.\n* Choose $K$ by minimizing AICc or CV.\n* Called \"harmonic regression\"\n\n## Distributed lags\n\nLagged values of a predictor.\n\nExample: $x$ is advertising which has a delayed effect\n\n\\vspace*{-0.8cm}\\begin{align*}\n x_{1} &= \\text{advertising for previous month;} \\\\\n x_{2} &= \\text{advertising for two months previously;} \\\\\n & \\vdots \\\\\n x_{m} &= \\text{advertising for $m$ months previously.}\n\\end{align*}\n\n## Comparing regression models\n\\fontsize{13}{14}\\sf\n\n* $R^2$ does not allow for \"degrees of freedom\".\n* Adding *any* variable tends to increase the value of $R^2$, even if that variable is irrelevant.\n\\pause\n\nTo overcome this problem, we can use *adjusted $R^2$*:\n\\begin{block}{}\n$$\n\\bar{R}^2 = 1-(1-R^2)\\frac{T-1}{T-k-1}\n$$\nwhere $k=$ no.\\ predictors and $T=$ no.\\ observations.\n\\end{block}\n\n\\pause\n\n\\begin{alertblock}{Maximizing $\\bar{R}^2$ is equivalent to minimizing $\\hat\\sigma^2$.}\n\\centerline{$\\displaystyle\n\\hat{\\sigma}^2 = \\frac{1}{T-k-1}\\sum_{t=1}^T \\varepsilon_t^2$\n}\n\\end{alertblock}\n\n## Akaike's Information Criterion\n\n\\vspace*{0.2cm}\\begin{block}{}\n\\centerline{$\\text{AIC} = -2\\log(L) + 2(k+2)$}\n\\end{block}\\vspace*{-0.5cm}\n\n* $L=$ likelihood\n* $k=$ \\# predictors in model.\n* AIC penalizes terms more heavily than $\\bar{R}^2$.\n\n\\pause\\begin{block}{}\n\\centerline{$\\text{AIC}_{\\text{C}} = \\text{AIC} + \\frac{2(k+2)(k+3)}{T-k-3}$}\n\\end{block}\n\n* Minimizing the AIC or AICc is asymptotically equivalent to minimizing MSE via **leave-one-out cross-validation** (for any linear regression).\n\n## Leave-one-out cross-validation\n\nFor regression, leave-one-out cross-validation is faster and more efficient than time-series cross-validation.\n\n* Select one observation for test set, and use *remaining* observations in training set. Compute error on test observation.\n* Repeat using each possible observation as the test set.\n* Compute accuracy measure over all errors.\n\n\n::: {.cell}\n\n:::\n\n\n## Cross-validation {-}\n\n**Traditional evaluation**\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n\\pause\n\n**Time series cross-validation**\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## Cross-validation {-}\n\n**Traditional evaluation**\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n**Leave-one-out cross-validation**\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n\\only<2>{\\begin{textblock}{4}(6,6)\\begin{block}{}\\fontsize{13}{15}\\sf\nCV = MSE on \\textcolor[HTML]{D55E00}{test sets}\\end{block}\\end{textblock}}\n\n## Bayesian Information Criterion\n\n\\begin{block}{}\n$$\n\\text{BIC} = -2\\log(L) + (k+2)\\log(T)\n$$\n\\end{block}\nwhere $L$ is the likelihood and $k$ is the number of predictors in the model.\\pause\n\n* BIC penalizes terms more heavily than AIC\n* Also called SBIC and SC.\n* Minimizing BIC is asymptotically equivalent to leave-$v$-out cross-validation when $v = T[1-1/(log(T)-1)]$.\n\n## Choosing regression variables\n\\fontsize{14}{15}\\sf\n\n**Best subsets regression**\n\n* Fit all possible regression models using one or more of the predictors.\n* Choose the best model based on one of the measures of predictive ability (CV, AIC, AICc).\n\\pause\n\n**Backwards stepwise regression**\n\n* Start with a model containing all variables.\n* Subtract one variable at a time. Keep model if lower CV.\n* Iterate until no further improvement.\n* Not guaranteed to lead to best model.\n\n## Ex-ante versus ex-post forecasts\n\n * *Ex ante forecasts* are made using only information available in advance.\n - require forecasts of predictors\n * *Ex post forecasts* are made using later information on the predictors.\n - useful for studying behaviour of forecasting models.\n\n * trend, seasonal and calendar variables are all known in advance, so these don't need to be forecast.\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week10/slides/figure-beamer/traintest1-1.pdf b/_freeze/week10/slides/figure-beamer/traintest1-1.pdf
deleted file mode 100644
index 12c8b85..0000000
Binary files a/_freeze/week10/slides/figure-beamer/traintest1-1.pdf and /dev/null differ
diff --git a/_freeze/week10/slides/figure-beamer/traintest1a-1.pdf b/_freeze/week10/slides/figure-beamer/traintest1a-1.pdf
deleted file mode 100644
index 7da116d..0000000
Binary files a/_freeze/week10/slides/figure-beamer/traintest1a-1.pdf and /dev/null differ
diff --git a/_freeze/week10/slides/figure-beamer/tscvggplot1-1.pdf b/_freeze/week10/slides/figure-beamer/tscvggplot1-1.pdf
deleted file mode 100644
index 6b8e801..0000000
Binary files a/_freeze/week10/slides/figure-beamer/tscvggplot1-1.pdf and /dev/null differ
diff --git a/_freeze/week10/slides/figure-beamer/unnamed-chunk-1-1.pdf b/_freeze/week10/slides/figure-beamer/unnamed-chunk-1-1.pdf
deleted file mode 100644
index f1dd61a..0000000
Binary files a/_freeze/week10/slides/figure-beamer/unnamed-chunk-1-1.pdf and /dev/null differ
diff --git a/_freeze/week10/slides/figure-html/traintest1-1.png b/_freeze/week10/slides/figure-html/traintest1-1.png
deleted file mode 100644
index bd283ab..0000000
Binary files a/_freeze/week10/slides/figure-html/traintest1-1.png and /dev/null differ
diff --git a/_freeze/week10/slides/figure-html/traintest1a-1.png b/_freeze/week10/slides/figure-html/traintest1a-1.png
deleted file mode 100644
index bd283ab..0000000
Binary files a/_freeze/week10/slides/figure-html/traintest1a-1.png and /dev/null differ
diff --git a/_freeze/week10/slides/figure-html/tscvggplot1-1.png b/_freeze/week10/slides/figure-html/tscvggplot1-1.png
deleted file mode 100644
index 38a40e8..0000000
Binary files a/_freeze/week10/slides/figure-html/tscvggplot1-1.png and /dev/null differ
diff --git a/_freeze/week10/slides/figure-html/unnamed-chunk-1-1.png b/_freeze/week10/slides/figure-html/unnamed-chunk-1-1.png
deleted file mode 100644
index e0451ea..0000000
Binary files a/_freeze/week10/slides/figure-html/unnamed-chunk-1-1.png and /dev/null differ
diff --git a/_freeze/week11/.DS_Store b/_freeze/week11/.DS_Store
deleted file mode 100644
index 3eaa23d..0000000
Binary files a/_freeze/week11/.DS_Store and /dev/null differ
diff --git a/_freeze/week11/index/execute-results/html.json b/_freeze/week11/index/execute-results/html.json
deleted file mode 100644
index e01e982..0000000
--- a/_freeze/week11/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "bd2c2400897863124a05d246fe0e5d33",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 11: Dynamic regression\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* How to combine regression models with ARIMA models to form dynamic regression models\n* Dynamic harmonic regression to handle complex seasonality\n* Lagged predictors\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 1-7 from [Section 7.10 of the book](https://otexts.com/fpp3/regression-exercises.html).\n\n## Exercise solutions\n\n\n\n\n\n## Pre-class seminar\n\nRead [Chapter 10 of the textbook](https://otexts.com/fpp3/dynamic.html) and watch all embedded videos\n\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Review the model for US gasoline data from last week to now be a dynamic harmonic regression model. \n\n\n2. Turn the half hourly electricity data into daily data using the following code. Try and understand what each line does.\n\n ```r\n vic_elec_daily <- vic_elec |> \n index_by(Date = date(Time)) |> \n summarise( \n Demand = sum(Demand)/1e3, \n Temperature = max(Temperature), \n Holiday = any(Holiday) \n ) |> \n mutate(Day_Type = case_when( \n Holiday ~ \"Holiday\", \n wday(Date) %in% 2:6 ~ \"Weekday\", \n TRUE ~ \"Weekend\" \n )) \n ```\n Explore the seasonal patterns.\n \n a. Fit an ETS, ARIMA and a dynamic harmonic regression model using the following code:\n \n ```r\n elec_fit <- vic_elec_daily |>\n model(\n ets = ETS(Demand),\n arima = ARIMA(log(Demand)),\n dhr = ARIMA(log(Demand) ~ Temperature + I(Temperature^2) + \n (Day_Type == \"Weekday\") + \n fourier(period = \"year\", K = 4))\n )\n ```\n Explore the model fits and residuals. \n \n b. Generate forecast for 14-days-ahead using the following code. \n \n ```r\n vic_elec_future <- new_data(vic_elec_daily, 14) |>\n mutate(\n Temperature = c(rep(32, 7), rep(25, 7)),\n Holiday = c(TRUE, rep(FALSE, 13)),\n Day_Type = case_when(\n Holiday ~ \"Holiday\",\n wday(Date) %in% 2:6 ~ \"Weekday\",\n TRUE ~ \"Weekend\"\n )\n )\n ```\n\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week11.R](Seminar_code_week11.R){download=\"Seminar_code_week11.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [IA4](../assignments/A4.qmd) is due on Monday 19 May.\n* [GA4](../assignments/G4.qmd) is due on Monday 26 May.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week11/slides/execute-results/html.json b/_freeze/week11/slides/execute-results/html.json
deleted file mode 100644
index c645f67..0000000
--- a/_freeze/week11/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "d852c1098ca4af9adc1a25b8ad386176",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 11: Dynamic regression models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## Regression with ARIMA errors\n\n\\vspace*{0.2cm}\\begin{block}{Regression models}\\vspace*{-0.2cm}\n\\[\n y_t = \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\varepsilon_t,\n\\]\n\\end{block}\\vspace*{-0.3cm}\n\n * $y_t$ modeled as function of $k$ explanatory variables\n$x_{1,t},\\dots,x_{k,t}$.\n * In regression, we assume that $\\varepsilon_t$ is WN.\n * Now we want to allow $\\varepsilon_t$ to be autocorrelated.\n\\vspace*{0.1cm}\n\\pause\n\\begin{alertblock}{Example: ARIMA(1,1,1) errors}\\vspace*{-0.8cm}\n\\begin{align*}\n y_t &= \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\eta_t,\\\\\n & (1-\\phi_1B)(1-B)\\eta_t = (1+\\theta_1B)\\varepsilon_t,\n\\end{align*}\n\\end{alertblock}\n\\rightline{where $\\varepsilon_t$ is white noise.}\n\n## Estimation\n\nIf we minimize $\\sum \\eta_t^2$ (by using ordinary regression):\n\n 1. Estimated coefficients $\\hat{\\beta}_0,\\dots,\\hat{\\beta}_k$ are no longer optimal as some information ignored;\n 2. Statistical tests associated with the model (e.g., t-tests on the coefficients) are incorrect.\n 3. AIC of fitted models misleading.\n\n\\pause\\vspace*{0.4cm}\n\n * Minimizing $\\sum \\varepsilon_t^2$ avoids these problems.\n * Maximizing likelihood similar to minimizing $\\sum \\varepsilon_t^2$.\n\n## Regression with ARIMA errors\n\\fontsize{14}{15}\\sf\n\nAny regression with an ARIMA error can be rewritten as a regression with an ARMA error by differencing all variables.\\pause\n\n\\begin{block}{Original data}\\vspace*{-0.8cm}\n\\begin{align*}\n y_t & = \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\eta_t\\\\\n \\mbox{where}\\quad\n & \\phi(B)(1-B)^d\\eta_t = \\theta(B)\\varepsilon_t\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.1cm}\n\\begin{block}{After differencing all variables}\\vspace*{-0.2cm}\n$$\n y'_t = \\beta_1 x'_{1,t} + \\dots + \\beta_k x'_{k,t} + \\eta'_t.\n$$\nwhere $\\phi(B)\\eta'_t = \\theta(B)\\varepsilon_t$,\\vspace*{0.1cm}\n\n$y_t' = (1-B)^dy_t$,\\quad $x_{i,t}' = (1-B)^dx_{i,t}$,\\quad and $\\eta_t' = (1-B)^d \\eta_t$\n\\end{block}\n\n## Regression with ARIMA errors\n\n * In R, we can specify an ARIMA($p,d,q$) for the errors, and $d$ levels of differencing will be applied to all variables ($y, x_{1,t},\\dots,x_{k,t}$) during estimation.\n * Check that $\\varepsilon_t$ series looks like white noise.\n * AICc can be calculated for final model.\n * Repeat procedure for all subsets of predictors to be considered, and select model with lowest AICc value.\n\n## Forecasting\n\n * To forecast a regression model with ARIMA errors, we need to forecast the\nregression part of the model and the ARIMA part of the model and combine the\nresults.\n * Some predictors are known into the future (e.g., time, dummies).\n * Separate forecasting models may be needed for other predictors.\n * Forecast intervals ignore the uncertainty in forecasting the predictors.\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week11/slides/execute-results/tex.json b/_freeze/week11/slides/execute-results/tex.json
deleted file mode 100644
index f1ac170..0000000
--- a/_freeze/week11/slides/execute-results/tex.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "9a19066dde7df6fd34eb01fc2043ed49",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 11: Dynamic regression models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n## Regression with ARIMA errors\n\n\\vspace*{0.2cm}\\begin{block}{Regression models}\\vspace*{-0.2cm}\n\\[\n y_t = \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\varepsilon_t,\n\\]\n\\end{block}\\vspace*{-0.3cm}\n\n * $y_t$ modeled as function of $k$ explanatory variables\n$x_{1,t},\\dots,x_{k,t}$.\n * In regression, we assume that $\\varepsilon_t$ is WN.\n * Now we want to allow $\\varepsilon_t$ to be autocorrelated.\n\\vspace*{0.1cm}\n\\pause\n\\begin{alertblock}{Example: ARIMA(1,1,1) errors}\\vspace*{-0.8cm}\n\\begin{align*}\n y_t &= \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\eta_t,\\\\\n & (1-\\phi_1B)(1-B)\\eta_t = (1+\\theta_1B)\\varepsilon_t,\n\\end{align*}\n\\end{alertblock}\n\\rightline{where $\\varepsilon_t$ is white noise.}\n\n## Estimation\n\nIf we minimize $\\sum \\eta_t^2$ (by using ordinary regression):\n\n 1. Estimated coefficients $\\hat{\\beta}_0,\\dots,\\hat{\\beta}_k$ are no longer optimal as some information ignored;\n 2. Statistical tests associated with the model (e.g., t-tests on the coefficients) are incorrect.\n 3. AIC of fitted models misleading.\n\n\\pause\\vspace*{0.4cm}\n\n * Minimizing $\\sum \\varepsilon_t^2$ avoids these problems.\n * Maximizing likelihood similar to minimizing $\\sum \\varepsilon_t^2$.\n\n## Regression with ARIMA errors\n\\fontsize{14}{15}\\sf\n\nAny regression with an ARIMA error can be rewritten as a regression with an ARMA error by differencing all variables.\\pause\n\n\\begin{block}{Original data}\\vspace*{-0.8cm}\n\\begin{align*}\n y_t & = \\beta_0 + \\beta_1 x_{1,t} + \\dots + \\beta_k x_{k,t} + \\eta_t\\\\\n \\mbox{where}\\quad\n & \\phi(B)(1-B)^d\\eta_t = \\theta(B)\\varepsilon_t\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.1cm}\n\\begin{block}{After differencing all variables}\\vspace*{-0.2cm}\n$$\n y'_t = \\beta_1 x'_{1,t} + \\dots + \\beta_k x'_{k,t} + \\eta'_t.\n$$\nwhere $\\phi(B)\\eta'_t = \\theta(B)\\varepsilon_t$,\\vspace*{0.1cm}\n\n$y_t' = (1-B)^dy_t$,\\quad $x_{i,t}' = (1-B)^dx_{i,t}$,\\quad and $\\eta_t' = (1-B)^d \\eta_t$\n\\end{block}\n\n## Regression with ARIMA errors\n\n * In R, we can specify an ARIMA($p,d,q$) for the errors, and $d$ levels of differencing will be applied to all variables ($y, x_{1,t},\\dots,x_{k,t}$) during estimation.\n * Check that $\\varepsilon_t$ series looks like white noise.\n * AICc can be calculated for final model.\n * Repeat procedure for all subsets of predictors to be considered, and select model with lowest AICc value.\n\n## Forecasting\n\n * To forecast a regression model with ARIMA errors, we need to forecast the\nregression part of the model and the ARIMA part of the model and combine the\nresults.\n * Some predictors are known into the future (e.g., time, dummies).\n * Separate forecasting models may be needed for other predictors.\n * Forecast intervals ignore the uncertainty in forecasting the predictors.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week12/.DS_Store b/_freeze/week12/.DS_Store
deleted file mode 100644
index fdece40..0000000
Binary files a/_freeze/week12/.DS_Store and /dev/null differ
diff --git a/_freeze/week12/index/execute-results/html.json b/_freeze/week12/index/execute-results/html.json
deleted file mode 100644
index 2064ebb..0000000
--- a/_freeze/week12/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "b8ab04a22203da7bcafe431578a13bf8",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 12: Review\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Review Assignment 1\n* Announce winner of the forecasting competition\n* Discuss exam\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 1-7 from [Section 10.7 of the book](https://otexts.com/fpp3/dynamic-exercises.html).\n\n## Exercise solutions\n\n\n\n\n\n## Pre-seminar activities\n\nCatch up on any exercises not yet done\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n\n## Post-class activities\n\n* Do any exercises not yet finished.\n* Complete past exams: [[2021](/exams/exam2021.pdf)] [[2022](/exams/exam2022.pdf)] [[2023](/exams/exam2023.pdf)]\n\n\n* Re-read the textbook\n* Listen again to all lectures\n\n\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week12/slides/execute-results/html.json b/_freeze/week12/slides/execute-results/html.json
deleted file mode 100644
index d64f4b7..0000000
--- a/_freeze/week12/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "cec1366a8a71eb66b01681ddf3a30b32",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Revision\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n# Assignment 1\n\n## Assignment 1\n\n**Stock price forecasting** (Q1 and Q5)\n\n* Hard to beat naive forecast\n* Random walk model says forecast variance = $h\\sigma^2$.\n\n\\pause\n\n**Maximum temperature at Melbourne airport** (Q2)\n\n* Weather is relatively stationary over similar time of year and recent years.\n* So take mean and var of max temp in April over last 10 years.\n\n## Assignment 1\n\n**Difference in points in AFL match** (Q3)\n\n* Teams vary in strength from year to year.\n* Could look at distribution of for-against points for last few years across all games for each team. Assume distributions independent.\n\n\\pause\n\n**Seasonally adjusted estimate of total employment** (Q4)\n\n* Probably locally trended.\n* Perhaps use drift method based on average monthly change in last 2 years.\n\n# Some case studies\n\n## CASE STUDY 1: Paperware company\n\n\\fontsize{11.5}{13}\\sf\n\n\\begin{textblock}{9.2}(0.2,1.5)\n\\textbf{Problem:} Want forecasts of each of hundreds of\nitems. Series can be stationary, trended or seasonal. They currently\nhave a large forecasting program written in-house but it doesn't seem\nto produce sensible forecasts. They want me to fix it.\n\n\\textbf{Additional information}\\vspace*{-0.2cm}\\fontsize{11.5}{13}\\sf\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item Program written in COBOL making numerical calculations limited. It is not possible to do any optimisation.\n\\item Their programmer has little experience in numerical computing.\n\\item They employ no statisticians and want the program to produce forecasts automatically.\n\\end{itemize}\n\\end{textblock}\n\n\\placefig{10.2}{1.4}{width=5.8cm}{tableware2}\n\n## CASE STUDY 1: Paperware company\n\n### Methods currently used\n\\vspace*{0.2cm}\n\nA\n: 12 month average\n\nC\n: 6 month average\n\nE\n: straight line regression over last 12 months\n\nG\n: straight line regression over last 6 months\n\nH\n: average slope between last year's and this year's values.\n (Equivalent to differencing at lag 12 and taking mean.)\n\nI\n: Same as H except over 6 months.\n\nK\n: I couldn't understand the explanation.\n\n## CASE STUDY 2: PBS\n\n\\fullwidth{pills}\n\n## CASE STUDY 2: PBS\n\n### The Pharmaceutical Benefits Scheme (PBS) is the Australian government drugs subsidy scheme.\n\n * Many drugs bought from pharmacies are subsidised to allow more equitable access to modern drugs.\n * The cost to government is determined by the number and types of drugs purchased. Currently nearly 1\\% of GDP.\n * The total cost is budgeted based on forecasts of drug usage.\n\n## CASE STUDY 2: PBS\n\n\\fullheight{pbs2}\n\n## CASE STUDY 2: PBS\n\n * In 2001: \\$4.5 billion budget, under-forecasted by \\$800 million.\n * Thousands of products. Seasonal demand.\n * Subject to covert marketing, volatile products, uncontrollable expenditure.\n * Although monthly data available for 10 years, data are aggregated to annual values, and only the first three years are used in estimating the forecasts.\n * All forecasts being done with the \\texttt{FORECAST} function in MS-Excel!\n\n## CASE STUDY 3: Car fleet company\n\n**Client:** One of Australia's largest car fleet companies\n\n**Problem:** how to forecast resale value of vehicles? How\nshould this affect leasing and sales policies?\n\n\\pause\n\n### Additional information\n - They can provide a large amount of data on previous vehicles and their eventual resale values.\n - The resale values are currently estimated by a group of specialists. They see me as a threat and do not cooperate.\n\n## CASE STUDY 4: Airline\n\n\\fullheight{ansettlogo}\n\n## CASE STUDY 4: Airline\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## CASE STUDY 4: Airline\n\n**Problem:** how to forecast passenger traffic on major routes?\n\n### Additional information\n\n * They can provide a large amount of data on previous routes.\n * Traffic is affected by school holidays, special events such as\nthe Grand Prix, advertising campaigns, competition behaviour, etc.\n * They have a highly capable team of people who are able to do\nmost of the computing.\n\n# Exam\n\n## Exam: 5.00pm (AEST) 13 June\n\\fontsize{13}{14}\\sf\n\nFive Sections, all to be attempted.\\vspace*{-0.1cm}\n\nA\n: Short answers/explanations. Write about 1/4 page on four topics (out of six possible topics). Nuanced answers required. \\pause\n\nB\n: Describing a time series, decomposition, choosing a forecasting method. \\pause\n\nC, D, E\n: Benchmarks, ETS models, ARIMA models, Dynamic regression models, forecast evaluation.\\pause\n\n* Interpretation of R output, but no coding.\n* Closed book\n* Allowed: a calculator, 1 A4 double-sided sheet of notes, 5 working sheets\n\n## Preparing for the exam\n\n - Exams from 2018--2022 on Moodle already.\n - Solutions available from 5 June\n - Exercises. Make sure you have done them all (especially the last two topics -- revise the lecture examples)!\n - Identify your weak points and practice them.\n - Write your own summary of the material.\n - Practice explaining the material to a class-mate.\n\n## Help available\n\n * See us during the consultation times (for details refer to the moodle page).\n * Discuss on the moodle forum.\n\n## Useful resources for forecasters\n\\fontsize{14}{14}\\sf\n\n\\alert{Organization:}\n\n * International Institute of Forecasters.\n\n\\alert{Annual Conference:}\n\n * International Symposium on Forecasting\\vspace*{-0.2cm}\n\n * Charlottesville, Virginia, June 25--28, 2023\n\n\\alert{Journals:}\n\n * International Journal of Forecasting\n * Foresight (the practitioner's journal)\n\n###\nLinks to all of the above at **forecasters.org**\n\n## IIF Best Student Award\n\\fontsize{14}{16}\\sf\n\n - https://forecasters.org/programs/research-awards/students/\n - US$100\n - A certificate of achievement from the IIF\n - One year free membership of the Institute with all attendant benefits. Subscriptions to:\n\n - the International Journal of Forecasting\n - the practitioner journal: Foresight\n - The Oracle newsletter\n\nDiscounts on conference and workshop fees, and links to a worldwide community of forecasters in many disciplines.\n\n## Happy forecasting\n\n\\begin{block}{}\nGood forecasters are not smarter than everyone else, they merely have their ignorance better organised.\n\\end{block}\n\n\\vspace*{2cm}\\pause\n\n\\begin{alertblock}{}\\centering\nPlease fill in your SETU\n\\end{alertblock}\n",
- "supporting": [
- "slides_files\\figure-html"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week12/slides/execute-results/tex.json b/_freeze/week12/slides/execute-results/tex.json
deleted file mode 100644
index 1dca2f3..0000000
--- a/_freeze/week12/slides/execute-results/tex.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "4700e7964d1409aebe5d950dc4026d1d",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Revision\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n# Assignment 1\n\n## Assignment 1\n\n**Stock price forecasting** (Q1 and Q5)\n\n* Hard to beat naive forecast\n* Random walk model says forecast variance = $h\\sigma^2$.\n\n\\pause\n\n**Maximum temperature at Melbourne airport** (Q2)\n\n* Weather is relatively stationary over similar time of year and recent years.\n* So take mean and var of max temp in April over last 10 years.\n\n## Assignment 1\n\n**Difference in points in AFL match** (Q3)\n\n* Teams vary in strength from year to year.\n* Could look at distribution of for-against points for last few years across all games for each team. Assume distributions independent.\n\n\\pause\n\n**Seasonally adjusted estimate of total employment** (Q4)\n\n* Probably locally trended.\n* Perhaps use drift method based on average monthly change in last 2 years.\n\n# Some case studies\n\n## CASE STUDY 1: Paperware company\n\n\\fontsize{11.5}{13}\\sf\n\n\\begin{textblock}{9.2}(0.2,1.5)\n\\textbf{Problem:} Want forecasts of each of hundreds of\nitems. Series can be stationary, trended or seasonal. They currently\nhave a large forecasting program written in-house but it doesn't seem\nto produce sensible forecasts. They want me to fix it.\n\n\\textbf{Additional information}\\vspace*{-0.2cm}\\fontsize{11.5}{13}\\sf\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item Program written in COBOL making numerical calculations limited. It is not possible to do any optimisation.\n\\item Their programmer has little experience in numerical computing.\n\\item They employ no statisticians and want the program to produce forecasts automatically.\n\\end{itemize}\n\\end{textblock}\n\n\\placefig{10.2}{1.4}{width=5.8cm}{tableware2}\n\n## CASE STUDY 1: Paperware company\n\n### Methods currently used\n\\vspace*{0.2cm}\n\nA\n: 12 month average\n\nC\n: 6 month average\n\nE\n: straight line regression over last 12 months\n\nG\n: straight line regression over last 6 months\n\nH\n: average slope between last year's and this year's values.\n (Equivalent to differencing at lag 12 and taking mean.)\n\nI\n: Same as H except over 6 months.\n\nK\n: I couldn't understand the explanation.\n\n## CASE STUDY 2: PBS\n\n\\fullwidth{pills}\n\n## CASE STUDY 2: PBS\n\n### The Pharmaceutical Benefits Scheme (PBS) is the Australian government drugs subsidy scheme.\n\n * Many drugs bought from pharmacies are subsidised to allow more equitable access to modern drugs.\n * The cost to government is determined by the number and types of drugs purchased. Currently nearly 1\\% of GDP.\n * The total cost is budgeted based on forecasts of drug usage.\n\n## CASE STUDY 2: PBS\n\n\\fullheight{pbs2}\n\n## CASE STUDY 2: PBS\n\n * In 2001: \\$4.5 billion budget, under-forecasted by \\$800 million.\n * Thousands of products. Seasonal demand.\n * Subject to covert marketing, volatile products, uncontrollable expenditure.\n * Although monthly data available for 10 years, data are aggregated to annual values, and only the first three years are used in estimating the forecasts.\n * All forecasts being done with the \\texttt{FORECAST} function in MS-Excel!\n\n## CASE STUDY 3: Car fleet company\n\n**Client:** One of Australia's largest car fleet companies\n\n**Problem:** how to forecast resale value of vehicles? How\nshould this affect leasing and sales policies?\n\n\\pause\n\n### Additional information\n - They can provide a large amount of data on previous vehicles and their eventual resale values.\n - The resale values are currently estimated by a group of specialists. They see me as a threat and do not cooperate.\n\n## CASE STUDY 4: Airline\n\n\\fullheight{ansettlogo}\n\n## CASE STUDY 4: Airline\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## CASE STUDY 4: Airline\n\n**Problem:** how to forecast passenger traffic on major routes?\n\n### Additional information\n\n * They can provide a large amount of data on previous routes.\n * Traffic is affected by school holidays, special events such as\nthe Grand Prix, advertising campaigns, competition behaviour, etc.\n * They have a highly capable team of people who are able to do\nmost of the computing.\n\n# Exam\n\n## Exam: 5.00pm (AEST) 13 June\n\\fontsize{13}{14}\\sf\n\nFive Sections, all to be attempted.\\vspace*{-0.1cm}\n\nA\n: Short answers/explanations. Write about 1/4 page on four topics (out of six possible topics). Nuanced answers required. \\pause\n\nB\n: Describing a time series, decomposition, choosing a forecasting method. \\pause\n\nC, D, E\n: Benchmarks, ETS models, ARIMA models, Dynamic regression models, forecast evaluation.\\pause\n\n* Interpretation of R output, but no coding.\n* Closed book\n* Allowed: a calculator, 1 A4 double-sided sheet of notes, 5 working sheets\n\n## Preparing for the exam\n\n - Exams from 2018--2022 on Moodle already.\n - Solutions available from 5 June\n - Exercises. Make sure you have done them all (especially the last two topics -- revise the lecture examples)!\n - Identify your weak points and practice them.\n - Write your own summary of the material.\n - Practice explaining the material to a class-mate.\n\n## Help available\n\n * See us during the consultation times (for details refer to the moodle page).\n * Discuss on the moodle forum.\n\n## Useful resources for forecasters\n\\fontsize{14}{14}\\sf\n\n\\alert{Organization:}\n\n * International Institute of Forecasters.\n\n\\alert{Annual Conference:}\n\n * International Symposium on Forecasting\\vspace*{-0.2cm}\n\n * Charlottesville, Virginia, June 25--28, 2023\n\n\\alert{Journals:}\n\n * International Journal of Forecasting\n * Foresight (the practitioner's journal)\n\n###\nLinks to all of the above at **forecasters.org**\n\n## IIF Best Student Award\n\\fontsize{14}{16}\\sf\n\n - https://forecasters.org/programs/research-awards/students/\n - US$100\n - A certificate of achievement from the IIF\n - One year free membership of the Institute with all attendant benefits. Subscriptions to:\n\n - the International Journal of Forecasting\n - the practitioner journal: Foresight\n - The Oracle newsletter\n\nDiscounts on conference and workshop fees, and links to a worldwide community of forecasters in many disciplines.\n\n## Happy forecasting\n\n\\begin{block}{}\nGood forecasters are not smarter than everyone else, they merely have their ignorance better organised.\n\\end{block}\n\n\\vspace*{2cm}\\pause\n\n\\begin{alertblock}{}\\centering\nPlease fill in your SETU\n\\end{alertblock}\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week12/slides/figure-beamer/unnamed-chunk-1-1.pdf b/_freeze/week12/slides/figure-beamer/unnamed-chunk-1-1.pdf
deleted file mode 100644
index b9f9a49..0000000
Binary files a/_freeze/week12/slides/figure-beamer/unnamed-chunk-1-1.pdf and /dev/null differ
diff --git a/_freeze/week12/slides/figure-html/unnamed-chunk-1-1.png b/_freeze/week12/slides/figure-html/unnamed-chunk-1-1.png
deleted file mode 100644
index deb29f3..0000000
Binary files a/_freeze/week12/slides/figure-html/unnamed-chunk-1-1.png and /dev/null differ
diff --git a/_freeze/week2/.DS_Store b/_freeze/week2/.DS_Store
deleted file mode 100644
index a34aeca..0000000
Binary files a/_freeze/week2/.DS_Store and /dev/null differ
diff --git a/_freeze/week2/activities/execute-results/html.json b/_freeze/week2/activities/execute-results/html.json
deleted file mode 100644
index b05bc67..0000000
--- a/_freeze/week2/activities/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "b2e1cb4f3666f361ef376245bd6458a1",
- "result": {
- "markdown": "\n\n1. We have introduced various functions for time series graphics include `autoplot()`, `gg_season()`, `gg_subseries()`, `gg_lag()` and `ACF`. Use these functions to explore the quarterly tourism data for the Snowy Mountains.\n\n ```r\n snowy <- tourism |> filter(Region == \"Snowy Mountains\")\n ```\n\n What do you learn?\n\n2. Which time plot corresponds to which ACF plot?\n\n\n::: {.cell fig.asp='0.5'}\n::: {.cell-output-display}\n{width=100%}\n:::\n:::\n\n\n3. You can compute the daily changes in the Google stock price in 2018 using the code below. Do the daily changes look like white noise?\n\n\\fontsize{11.5}{15}\\sf\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndgoog <- gafa_stock |>\n filter(Symbol == \"GOOG\", year(Date) >= 2018) |>\n mutate(trading_day = row_number()) |>\n update_tsibble(index=trading_day, regular=TRUE) |>\n mutate(diff = difference(Close))\n```\n:::\n\n\n\\fontsize{14}{16}\\sf\n\n \n",
- "supporting": [
- "activities_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week2/activities/figure-html/acf-quiz-1.png b/_freeze/week2/activities/figure-html/acf-quiz-1.png
deleted file mode 100644
index c24626d..0000000
Binary files a/_freeze/week2/activities/figure-html/acf-quiz-1.png and /dev/null differ
diff --git a/_freeze/week2/index/execute-results/html.json b/_freeze/week2/index/execute-results/html.json
deleted file mode 100644
index 57db79a..0000000
--- a/_freeze/week2/index/execute-results/html.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "80d3392f31b8a83308273a149c6802c3",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 2: Time series graphics\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Different types of plots for time series including time plots, season plots, subseries plots, lag plots and ACF plots\n* The difference between seasonal patterns and cyclic patterns in time series\n* What is \"white noise\" and how to identify it.\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 1-5 from [Section 2.10 of the book](https://otexts.com/fpp3/graphics-exercises.html).\n\n\n\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Chapter 2 of the textbook](https://otexts.com/fpp3/graphics.html) and watch all embedded videos\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n## Assignments\n\n* [IA1](../assignments/A1.qmd) is due on Monday 10 March.\n* [GA1](../assignments/G1.qmd) is due on Monday 24 March.\n",
- "supporting": [
- "index_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week2/index/figure-html/acf-quiz-1.png b/_freeze/week2/index/figure-html/acf-quiz-1.png
deleted file mode 100644
index bcf4ede..0000000
Binary files a/_freeze/week2/index/figure-html/acf-quiz-1.png and /dev/null differ
diff --git a/_freeze/week2/slides/execute-results/html.json b/_freeze/week2/slides/execute-results/html.json
deleted file mode 100644
index 8a30cd3..0000000
--- a/_freeze/week2/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "1d5a4ce8b36cb5165ea8e01478db44d7",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 2: Time series graphics\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## CASE STUDY 1: Paperware company\n\n\\fontsize{11.5}{13}\\sf\n\n\\begin{textblock}{9.2}(0.2,1.5)\n\\textbf{Problem:} Want forecasts of each of hundreds of\nitems. Series can be stationary, trended or seasonal. They currently\nhave a large forecasting program written in-house but it doesn't seem\nto produce sensible forecasts. They want me to fix it.\n\n\\textbf{Additional information}\\vspace*{-0.2cm}\\fontsize{11.5}{13}\\sf\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item Program written in COBOL making numerical calculations limited. It is not possible to do any optimisation.\n\\item Their programmer has little experience in numerical computing.\n\\item They employ no statisticians and want the program to produce forecasts automatically.\n\\end{itemize}\n\\end{textblock}\n\n\\placefig{10.2}{1.4}{width=5.8cm}{tableware2}\n\n## CASE STUDY 1: Paperware company\n\\vspace*{0.2cm}\n\n### Methods currently used\n\nA\n: 12 month average\n\nC\n: 6 month average\n\nE\n: straight line regression over last 12 months\n\nG\n: straight line regression over last 6 months\n\nH\n: average slope between last year's and this year's values.\n (Equivalent to differencing at lag 12 and taking mean.)\n\nI\n: Same as H except over 6 months.\n\nK\n: I couldn't understand the explanation.\n\n## CASE STUDY 2: PBS\n\n\\fullwidth{pills}\n\n## CASE STUDY 2: PBS\n\n### The Pharmaceutical Benefits Scheme (PBS) is the Australian government drugs subsidy scheme.\n\n * Many drugs bought from pharmacies are subsidised to allow more equitable access to modern drugs.\n * The cost to government is determined by the number and types of drugs purchased. Currently nearly 1\\% of GDP.\n * The total cost is budgeted based on forecasts of drug usage.\n\n## CASE STUDY 2: PBS\n\n\\fullheight{pbs2}\n\n## CASE STUDY 2: PBS\n\n * In 2001: \\$4.5 billion budget, under-forecasted by \\$800 million.\n * Thousands of products. Seasonal demand.\n * Subject to covert marketing, volatile products, uncontrollable expenditure.\n * Although monthly data available for 10 years, data are aggregated to annual values, and only the first three years are used in estimating the forecasts.\n * All forecasts being done with the \\texttt{FORECAST} function in MS-Excel!\n\n## CASE STUDY 3: Car fleet company\n\n**Client:** One of Australia's largest car fleet companies\n\n**Problem:** how to forecast resale value of vehicles? How\nshould this affect leasing and sales policies?\n\n\\pause\n\n### Additional information\n - They can provide a large amount of data on previous vehicles and their eventual resale values.\n - The resale values are currently estimated by a group of specialists. They see me as a threat and do not cooperate.\n\n## CASE STUDY 4: Airline\n\n\\fullheight{ansettlogo}\n\n## CASE STUDY 4: Airline\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n\\only<2>{\\begin{textblock}{4.2}(11,5.5)\n\\begin{alertblock}{}\nNot the real data! Or is it?\n\\end{alertblock}\n\\end{textblock}}\n\n## CASE STUDY 4: Airline\n\n**Problem:** how to forecast passenger traffic on major routes?\n\n### Additional information\n\n * They can provide a large amount of data on previous routes.\n * Traffic is affected by school holidays, special events such as\nthe Grand Prix, advertising campaigns, competition behaviour, etc.\n * They have a highly capable team of people who are able to do\nmost of the computing.\n\n## Seasonal or cyclic?\n\n\\alert{Differences between seasonal and cyclic patterns:}\n\n * seasonal pattern constant length; cyclic pattern variable length\n * average length of cycle longer than length of seasonal pattern\n * magnitude of cycle more variable than magnitude of seasonal pattern\n\n\\pause\n\n\\begin{alertblock}{}\nThe timing of peaks and troughs is predictable with seasonal data, but unpredictable in the long term with cyclic data.\n\\end{alertblock}\n\n\n## Trend and seasonality in ACF plots\n\n- When data have a trend, the autocorrelations for small lags tend to be large and positive.\n- When data are seasonal, the autocorrelations will be larger at the seasonal lags (i.e., at multiples of the seasonal frequency)\n- When data are trended and seasonal, you see a combination of these effects.\n\n## Your turn\n\nWe have introduced various functions for time series graphics include `autoplot()`, `gg_season()`, `gg_subseries()`, `gg_lag()` and `ACF`. Use these functions to explore the quarterly tourism data for the Snowy Mountains.\n\n```r\nsnowy <- tourism |> filter(Region == \"Snowy Mountains\")\n```\n\nWhat do you learn?\n\n\n## Which is which?\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=14.5cm}\n:::\n:::\n",
- "supporting": [
- "slides_files\\figure-html"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week2/slides/execute-results/tex.json b/_freeze/week2/slides/execute-results/tex.json
deleted file mode 100644
index 9456120..0000000
--- a/_freeze/week2/slides/execute-results/tex.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "0c2b7d878f98ef4c951977b9b55c1428",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 2: Time series graphics\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n## CASE STUDY 1: Paperware company\n\n\\fontsize{11.5}{13}\\sf\n\n\\begin{textblock}{9.2}(0.2,1.5)\n\\textbf{Problem:} Want forecasts of each of hundreds of\nitems. Series can be stationary, trended or seasonal. They currently\nhave a large forecasting program written in-house but it doesn't seem\nto produce sensible forecasts. They want me to fix it.\n\n\\textbf{Additional information}\\vspace*{-0.2cm}\\fontsize{11.5}{13}\\sf\n\\begin{itemize}\\itemsep=0cm\\parskip=0cm\n\\item Program written in COBOL making numerical calculations limited. It is not possible to do any optimisation.\n\\item Their programmer has little experience in numerical computing.\n\\item They employ no statisticians and want the program to produce forecasts automatically.\n\\end{itemize}\n\\end{textblock}\n\n\\placefig{10.2}{1.4}{width=5.8cm}{tableware2}\n\n## CASE STUDY 1: Paperware company\n\\vspace*{0.2cm}\n\n### Methods currently used\n\nA\n: 12 month average\n\nC\n: 6 month average\n\nE\n: straight line regression over last 12 months\n\nG\n: straight line regression over last 6 months\n\nH\n: average slope between last year's and this year's values.\n (Equivalent to differencing at lag 12 and taking mean.)\n\nI\n: Same as H except over 6 months.\n\nK\n: I couldn't understand the explanation.\n\n## CASE STUDY 2: PBS\n\n\\fullwidth{pills}\n\n## CASE STUDY 2: PBS\n\n### The Pharmaceutical Benefits Scheme (PBS) is the Australian government drugs subsidy scheme.\n\n * Many drugs bought from pharmacies are subsidised to allow more equitable access to modern drugs.\n * The cost to government is determined by the number and types of drugs purchased. Currently nearly 1\\% of GDP.\n * The total cost is budgeted based on forecasts of drug usage.\n\n## CASE STUDY 2: PBS\n\n\\fullheight{pbs2}\n\n## CASE STUDY 2: PBS\n\n * In 2001: \\$4.5 billion budget, under-forecasted by \\$800 million.\n * Thousands of products. Seasonal demand.\n * Subject to covert marketing, volatile products, uncontrollable expenditure.\n * Although monthly data available for 10 years, data are aggregated to annual values, and only the first three years are used in estimating the forecasts.\n * All forecasts being done with the \\texttt{FORECAST} function in MS-Excel!\n\n## CASE STUDY 3: Car fleet company\n\n**Client:** One of Australia's largest car fleet companies\n\n**Problem:** how to forecast resale value of vehicles? How\nshould this affect leasing and sales policies?\n\n\\pause\n\n### Additional information\n - They can provide a large amount of data on previous vehicles and their eventual resale values.\n - The resale values are currently estimated by a group of specialists. They see me as a threat and do not cooperate.\n\n## CASE STUDY 4: Airline\n\n\\fullheight{ansettlogo}\n\n## CASE STUDY 4: Airline\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n\\only<2>{\\begin{textblock}{4.2}(11,5.5)\n\\begin{alertblock}{}\nNot the real data! Or is it?\n\\end{alertblock}\n\\end{textblock}}\n\n## CASE STUDY 4: Airline\n\n**Problem:** how to forecast passenger traffic on major routes?\n\n### Additional information\n\n * They can provide a large amount of data on previous routes.\n * Traffic is affected by school holidays, special events such as\nthe Grand Prix, advertising campaigns, competition behaviour, etc.\n * They have a highly capable team of people who are able to do\nmost of the computing.\n\n## Seasonal or cyclic?\n\n\\alert{Differences between seasonal and cyclic patterns:}\n\n * seasonal pattern constant length; cyclic pattern variable length\n * average length of cycle longer than length of seasonal pattern\n * magnitude of cycle more variable than magnitude of seasonal pattern\n\n\\pause\n\n\\begin{alertblock}{}\nThe timing of peaks and troughs is predictable with seasonal data, but unpredictable in the long term with cyclic data.\n\\end{alertblock}\n\n\n## Trend and seasonality in ACF plots\n\n- When data have a trend, the autocorrelations for small lags tend to be large and positive.\n- When data are seasonal, the autocorrelations will be larger at the seasonal lags (i.e., at multiples of the seasonal frequency)\n- When data are trended and seasonal, you see a combination of these effects.\n\n## Your turn\n\nWe have introduced various functions for time series graphics include `autoplot()`, `gg_season()`, `gg_subseries()`, `gg_lag()` and `ACF`. Use these functions to explore the quarterly tourism data for the Snowy Mountains.\n\n```r\nsnowy <- tourism |> filter(Region == \"Snowy Mountains\")\n```\n\nWhat do you learn?\n\n\n## Which is which?\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=14.5cm}\n:::\n:::\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week2/slides/figure-beamer/unnamed-chunk-1-1.pdf b/_freeze/week2/slides/figure-beamer/unnamed-chunk-1-1.pdf
deleted file mode 100644
index 7e048ff..0000000
Binary files a/_freeze/week2/slides/figure-beamer/unnamed-chunk-1-1.pdf and /dev/null differ
diff --git a/_freeze/week2/slides/figure-beamer/unnamed-chunk-2-1.pdf b/_freeze/week2/slides/figure-beamer/unnamed-chunk-2-1.pdf
deleted file mode 100644
index 63dc930..0000000
Binary files a/_freeze/week2/slides/figure-beamer/unnamed-chunk-2-1.pdf and /dev/null differ
diff --git a/_freeze/week2/slides/figure-html/unnamed-chunk-1-1.png b/_freeze/week2/slides/figure-html/unnamed-chunk-1-1.png
deleted file mode 100644
index deb29f3..0000000
Binary files a/_freeze/week2/slides/figure-html/unnamed-chunk-1-1.png and /dev/null differ
diff --git a/_freeze/week2/slides/figure-html/unnamed-chunk-2-1.png b/_freeze/week2/slides/figure-html/unnamed-chunk-2-1.png
deleted file mode 100644
index c24626d..0000000
Binary files a/_freeze/week2/slides/figure-html/unnamed-chunk-2-1.png and /dev/null differ
diff --git a/_freeze/week3/.DS_Store b/_freeze/week3/.DS_Store
deleted file mode 100644
index f9678e7..0000000
Binary files a/_freeze/week3/.DS_Store and /dev/null differ
diff --git a/_freeze/week3/index/execute-results/html.json b/_freeze/week3/index/execute-results/html.json
deleted file mode 100644
index d59b76a..0000000
--- a/_freeze/week3/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "d5f870877494554091a9579b6dc0a0e0",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 3: Time series decomposition\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Transforming data to remove some sources of variation\n* Decomposing a time series into trend-cycle, seasonal and remainder components\n* Seasonal adjustment\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 6, 8-11 from [Section 2.10 of the book](https://otexts.com/fpp3/graphics-exercises.html).\n\n\n## Exercise solutions\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Chapter 3 of the textbook](https://otexts.com/fpp3/decomposition.html) and watch all embedded videos\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n\n1. For the following series, find an appropriate Box-Cox transformation in order to stabilise the variance.\n\n * United States GDP from `global_economy`\n * Slaughter of Victorian “Bulls, bullocks and steers” in `aus_livestock`\n * Victorian Electricity Demand from `vic_elec`.\n * Gas production from `aus_production`\n\n2. Why is a Box-Cox transformation unhelpful for the `canadian_gas` data?\n\n3. Produce the following decomposition for the number (in thousands) of of people employed in Retail Trade in the US\n\n ```r\n us_retail_employment <- us_employment |>\n filter(year(Month) >= 1990, Title == \"Retail Trade\") |>\n select(-Series_ID)\n\n dcmp <- us_retail_employment |>\n model(stl = STL(Employed)) \n ```\n \n a. Plot the decomposition.\n \n b. Fit the trend component over the data [Hint: you can use `autolayer()` to add `trend` to the plot above. `trend` is one of the variables returned by `STL()`. ]\n \n c. Fit the trend and the seasonally adjusted [Hint: `seas_adjust` is one of the variables returned by `STL`. ]\n \n d. How does the seasonal shape change over time? [Hint: Try plotting the seasonal component using `gg_season()`.]\n \n e. What happens as you change the values of the two `window` arguments?\n \n f. Can you produce a plausible seasonally adjusted series? \n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week3.R](Seminar_code_week3.R){download=\"Seminar_code_week3.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [GA1](../assignments/G1.qmd) is due on Monday 24 March.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week3/slides/execute-results/html.json b/_freeze/week3/slides/execute-results/html.json
deleted file mode 100644
index 52ee1fd..0000000
--- a/_freeze/week3/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "60bbaae1788beca1c4eea6c5077f1c3d",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 3: Time series decomposition\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n\n## The ABS stuff-up\n\n\\fullheight{abs1}\n\n## The ABS stuff-up\n\n\\fullheight{abs2}\n\n## The ABS stuff-up\n\n\\fullheight{abs3}\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n:::\n\n\n\n\\fontsize{10}{10}\\sf\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed\n```\n\n::: {.cell-output .cell-output-stdout}\n```\n# A tsibble: 440 x 4 [1M]\n Time Month Year Employed\n \n 1 1978 Feb Feb 1978 5986.\n 2 1978 Mar Mar 1978 6041.\n 3 1978 Apr Apr 1978 6054.\n 4 1978 May May 1978 6038.\n 5 1978 Jun Jun 1978 6031.\n 6 1978 Jul Jul 1978 6036.\n 7 1978 Aug Aug 1978 6005.\n 8 1978 Sep Sep 1978 6024.\n 9 1978 Oct Oct 1978 6046.\n10 1978 Nov Nov 1978 6034.\n# ℹ 430 more rows\n```\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n autoplot(Employed) +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n filter(Year >= 2005) |>\n autoplot(Employed) +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n filter(Year >= 2005) |>\n gg_season(Employed, labels = \"right\") +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n mutate(diff = difference(Employed)) |>\n filter(Month == \"Sep\") |>\n ggplot(aes(y = diff, x = 1)) +\n geom_boxplot() +\n coord_flip() +\n labs(title = \"Sep - Aug: total employed\", y = \"Thousands\") +\n scale_x_continuous(breaks = NULL, labels = NULL)\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndcmp <- employed |>\n filter(Year >= 2005) |>\n model(stl = STL(Employed ~ season(window = 11), robust = TRUE))\ncomponents(dcmp) |> autoplot()\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents(dcmp) |>\n filter(year(Time) == 2013) |>\n gg_season(season_year) +\n labs(title = \"Seasonal component\") + guides(colour = \"none\")\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents(dcmp) |>\n as_tsibble() |>\n autoplot(season_adjust)\n```\n\n::: {.cell-output-display}\n{width=672}\n:::\n:::\n\n\n\n## The ABS stuff-up\n\\fontsize{13}{15}\\sf\n\n * August 2014 employment numbers higher than expected.\n * Supplementary survey usually conducted in August for employed people.\n * Most likely, some employed people were claiming to be unemployed in August to avoid supplementary questions.\n * Supplementary survey not run in 2014, so no motivation to lie about employment.\n * In previous years, seasonal adjustment fixed the problem.\n * The ABS has now adopted a new method to avoid the bias.\n",
- "supporting": [
- "slides_files\\figure-html"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week3/slides/execute-results/tex.json b/_freeze/week3/slides/execute-results/tex.json
deleted file mode 100644
index 955fc5d..0000000
--- a/_freeze/week3/slides/execute-results/tex.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "8fcb7a4d8d57951bda6f52104b028e13",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 3: Time series decomposition\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## The ABS stuff-up\n\n\\fullheight{abs1}\n\n## The ABS stuff-up\n\n\\fullheight{abs2}\n\n## The ABS stuff-up\n\n\\fullheight{abs3}\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n:::\n\n\n\\fontsize{10}{10}\\sf\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed\n```\n\n::: {.cell-output .cell-output-stdout}\n\n```\n# A tsibble: 440 x 4 [1M]\n Time Month Year Employed\n \n 1 1978 Feb Feb 1978 5986.\n 2 1978 Mar Mar 1978 6041.\n 3 1978 Apr Apr 1978 6054.\n 4 1978 May May 1978 6038.\n 5 1978 Jun Jun 1978 6031.\n 6 1978 Jul Jul 1978 6036.\n 7 1978 Aug Aug 1978 6005.\n 8 1978 Sep Sep 1978 6024.\n 9 1978 Oct Oct 1978 6046.\n10 1978 Nov Nov 1978 6034.\n# i 430 more rows\n```\n\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n autoplot(Employed) +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n filter(Year >= 2005) |>\n autoplot(Employed) +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n filter(Year >= 2005) |>\n gg_season(Employed, labels = \"right\") +\n labs(title = \"Total employed\", y = \"Thousands\")\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\nemployed |>\n mutate(diff = difference(Employed)) |>\n filter(Month == \"Sep\") |>\n ggplot(aes(y = diff, x = 1)) +\n geom_boxplot() +\n coord_flip() +\n labs(title = \"Sep - Aug: total employed\", y = \"Thousands\") +\n scale_x_continuous(breaks = NULL, labels = NULL)\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\ndcmp <- employed |>\n filter(Year >= 2005) |>\n model(stl = STL(Employed ~ season(window = 11), robust = TRUE))\ncomponents(dcmp) |> autoplot()\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents(dcmp) |>\n filter(year(Time) == 2013) |>\n gg_season(season_year) +\n labs(title = \"Seasonal component\") + guides(colour = \"none\")\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\n\n::: {.cell}\n\n```{.r .cell-code}\ncomponents(dcmp) |>\n as_tsibble() |>\n autoplot(season_adjust)\n```\n\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## The ABS stuff-up\n\\fontsize{13}{15}\\sf\n\n * August 2014 employment numbers higher than expected.\n * Supplementary survey usually conducted in August for employed people.\n * Most likely, some employed people were claiming to be unemployed in August to avoid supplementary questions.\n * Supplementary survey not run in 2014, so no motivation to lie about employment.\n * In previous years, seasonal adjustment fixed the problem.\n * The ABS has now adopted a new method to avoid the bias.\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week3/slides/figure-beamer/abs3-1.pdf b/_freeze/week3/slides/figure-beamer/abs3-1.pdf
deleted file mode 100644
index a662daa..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs3-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs4-1.pdf b/_freeze/week3/slides/figure-beamer/abs4-1.pdf
deleted file mode 100644
index c3f02d4..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs4-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs5-1.pdf b/_freeze/week3/slides/figure-beamer/abs5-1.pdf
deleted file mode 100644
index 3a95fd2..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs5-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs6-1.pdf b/_freeze/week3/slides/figure-beamer/abs6-1.pdf
deleted file mode 100644
index fec369e..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs6-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs7-1.pdf b/_freeze/week3/slides/figure-beamer/abs7-1.pdf
deleted file mode 100644
index 1582d5d..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs7-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs8-1.pdf b/_freeze/week3/slides/figure-beamer/abs8-1.pdf
deleted file mode 100644
index 4119771..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs8-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-beamer/abs9-1.pdf b/_freeze/week3/slides/figure-beamer/abs9-1.pdf
deleted file mode 100644
index ab2dda7..0000000
Binary files a/_freeze/week3/slides/figure-beamer/abs9-1.pdf and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs3-1.png b/_freeze/week3/slides/figure-html/abs3-1.png
deleted file mode 100644
index 05d66bc..0000000
Binary files a/_freeze/week3/slides/figure-html/abs3-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs4-1.png b/_freeze/week3/slides/figure-html/abs4-1.png
deleted file mode 100644
index 97834d7..0000000
Binary files a/_freeze/week3/slides/figure-html/abs4-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs5-1.png b/_freeze/week3/slides/figure-html/abs5-1.png
deleted file mode 100644
index f646ebe..0000000
Binary files a/_freeze/week3/slides/figure-html/abs5-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs6-1.png b/_freeze/week3/slides/figure-html/abs6-1.png
deleted file mode 100644
index 51cd026..0000000
Binary files a/_freeze/week3/slides/figure-html/abs6-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs7-1.png b/_freeze/week3/slides/figure-html/abs7-1.png
deleted file mode 100644
index 2896b9e..0000000
Binary files a/_freeze/week3/slides/figure-html/abs7-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs8-1.png b/_freeze/week3/slides/figure-html/abs8-1.png
deleted file mode 100644
index 39b18bc..0000000
Binary files a/_freeze/week3/slides/figure-html/abs8-1.png and /dev/null differ
diff --git a/_freeze/week3/slides/figure-html/abs9-1.png b/_freeze/week3/slides/figure-html/abs9-1.png
deleted file mode 100644
index 1e236de..0000000
Binary files a/_freeze/week3/slides/figure-html/abs9-1.png and /dev/null differ
diff --git a/_freeze/week4/index/execute-results/html.json b/_freeze/week4/index/execute-results/html.json
deleted file mode 100644
index 8eb2f44..0000000
--- a/_freeze/week4/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "f0d78cc71cec16bd98c0cb23c8d8a942",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 4: The forecaster's toolbox\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Four benchmark forecasting methods that we will use for comparison\n* Fitted values, residuals\n* Forecasting with transformations\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercise 2-3, 5, 7, 9-10 from [Section 3.7 of the book](https://otexts.com/fpp3/decomposition-exercises.html)\n\n## Exercise solutions\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Chapter 5 of the textbook](https://otexts.com/fpp3/toolbox.html) and watch all embedded videos\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n\n1. Create a tsibble with total Holiday travellers for Victoria and Queensland from the `tourism` data set. Plot the series. What do you see?\n\n2. Generate 4 year ahead forecasts from all four benchmarks. Plot them using `autoplot()`. Comment in the resulting forecasts.\n\n3. Plot the residuals from the most appropriate benchmark using `gg_tsresiduals()`. What do you see?\n\n4. Test if the residuals are white noise. What do you conclude?\n\n5. Plot point and interval forecasts from the most appropriate benchmark.\n\n6. Now try a decomposition forecasting model.\n\n7. Use `accuracy()` to evaluate Which benchmark fits the data best.\n\n8. Use a test set of last 3 years to check forecast accuracy.\n\n9. Now use time series cross-validation to check forecast accuracy.\n\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week4.R](Seminar_code_week4.R){download=\"Seminar_code_week4.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [GA1](../assignments/G1.qmd) is due on Monday 24 March.\n* [IA2](../assignments/A2.qmd) is due on Monday 07 April.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week5/.DS_Store b/_freeze/week5/.DS_Store
deleted file mode 100644
index 9f8e5bf..0000000
Binary files a/_freeze/week5/.DS_Store and /dev/null differ
diff --git a/_freeze/week5/index/execute-results/html.json b/_freeze/week5/index/execute-results/html.json
deleted file mode 100644
index 711026f..0000000
--- a/_freeze/week5/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "42b74de84cd3f5c67a3475d7ddc87a4a",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 5: Exponential smoothing\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Simple exponential smoothing\n* Corresponding ETS models\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 1-5, 8, 11-12 from [Section 5.11 of the book](https://otexts.com/fpp3/toolbox-exercises.html).\n\n## Exercise solutions\n\n\n\n\n\n\n## Pre-class activities\n\nRead [Sections 8.1-8.4 of the textbook](https://otexts.com/fpp3/expsmooth.html) and watch all embedded videos\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Use the tsibble created from `tourism` for holiday travel in Victoria and Queensland. Plot the series to remind yourself what these look like.\n\n2. Use the `ETS()` function to fit models to both series. Explore the resulting `mable` using `report()`, `glance()` and `tidy()`\n\n3. Plot the estimated components of each model.\n\n4. Generate forecasts using `forecast()`.\n\n5. Plot the exports data for Algeria from the `global_economy` tsibble. Is this time series white noise? What ETS model would be appropriate?\n\n6. Use the `ETS()` function to fit appropriate models with both additive and multiplicative errors. What model is chosen automatically? Explore the estimated models.\n\n7. Plot the components of the two models. What do you see?\n\n8. Explore the residuals of the two models. What do you see?\n\n9. Generate and plot forecasts.\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week5.R](Seminar_code_week5.R){download=\"Seminar_code_week5.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [IA2](../assignments/A2.qmd) is due on Monday 07 April.\n* [GA2](../assignments/G2.qmd) is due on Monday 14 April.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week5/slides/execute-results/html.json b/_freeze/week5/slides/execute-results/html.json
deleted file mode 100644
index 7ce7f1a..0000000
--- a/_freeze/week5/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "445d5d0a61f7d75144d9d284445bb75c",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 5: Simple Exponential smoothing\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## Historical perspective\n\n * Developed in the 1950s and 1960s as methods (algorithms) to produce point forecasts.\n * Combine a \"level\", \"trend\" (slope) and \"seasonal\" component to describe a time series.\n * The rate of change of the components are controlled by \"smoothing parameters\": $\\alpha$, $\\beta$ and $\\gamma$ respectively.\n * Need to choose best values for the smoothing parameters (and initial states).\n * Equivalent ETS state space models developed in the 1990s and 2000s.\n\n## Big idea: control the rate of change\n\\fontsize{13}{14}\\sf\n\n$\\alpha$ controls the flexibility of the **level**\n\n* If $\\alpha = 0$, the level never updates (mean)\n* If $\\alpha = 1$, the level updates completely (naive)\n\n$\\beta$ controls the flexibility of the **trend**\n\n* If $\\beta = 0$, the trend is linear\n* If $\\beta = 1$, the trend changes suddenly every observation\n\n$\\gamma$ controls the flexibility of the **seasonality**\n\n* If $\\gamma = 0$, the seasonality is fixed (seasonal means)\n* If $\\gamma = 1$, the seasonality updates completely (seasonal naive)\n\n## Models and methods\n\n### Methods\n\n * Algorithms that return point forecasts.\n\n### Models\n\n * Generate same point forecasts but can also generate forecast distributions.\n * A stochastic (or random) data generating process that can generate an entire forecast distribution.\n * Allow for \"proper\" model selection.\n\n## Simple Exponential Smoothing\n\n\\vspace*{0.2cm}\n\\begin{block}{Iterative form}\n\\centerline{$\\pred{y}{t+1}{t} = \\alpha y_t + (1-\\alpha) \\pred{y}{t}{t-1}$}\n\\end{block}\\pause\n\n\\begin{block}{Weighted average form}\n\\centerline{$\\displaystyle\\pred{y}{T+1}{T}=\\sum_{j=0}^{T-1} \\alpha(1-\\alpha)^j y_{T-j}+(1-\\alpha)^T \\ell_{0}$}\n\\end{block}\\pause\n\n\\begin{block}{Component form}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation}&&\\pred{y}{t+h}{t} &= \\ell_{t}\\\\\n\\text{Smoothing equation}&&\\ell_{t} &= \\alpha y_{t} + (1 - \\alpha)\\ell_{t-1}\n\\end{align*}\n\\end{block}\n\n## Simple Exponential Smoothing\n\\fontsize{14}{14}\\sf\n\n\\vspace*{0.2cm}\n\\begin{block}{Component form}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation}&&\\pred{y}{t+h}{t} &= \\ell_{t}\\\\\n\\text{Smoothing equation}&&\\ell_{t} &= \\alpha y_{t} + (1 - \\alpha)\\ell_{t-1}\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\nForecast error: $e_t = y_t - \\pred{y}{t}{t-1} = y_t - \\ell_{t-1}$.\\pause\n\\begin{block}{Error correction form}\\vspace*{-0.8cm}\n\\begin{align*}\ny_t &= \\ell_{t-1} + e_t\\\\\n\\ell_{t}\n &= \\ell_{t-1}+\\alpha( y_{t}-\\ell_{t-1})\\\\\n &= \\ell_{t-1}+\\alpha e_{t}\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\nSpecify probability distribution: $e_t = \\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n## ETS(A,N,N): SES with additive errors\n\n\\vspace*{0.2cm}\n\\begin{block}{ETS(A,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1} + \\varepsilon_t\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}+\\alpha \\varepsilon_t\n\\end{align*}\n\\end{block}\nwhere $\\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n * \"innovations\" or \"single source of error\" because equations have the same error process, $\\varepsilon_t$.\n * Observation equation: relationship between observations and states.\n * State equation(s): evolution of the state(s) through time.\n\n## ETS(M,N,N): SES with multiplicative errors.\n\n * Specify relative errors $\\varepsilon_t=\\frac{y_t-\\pred{y}{t}{t-1}}{\\pred{y}{t}{t-1}}\\sim \\text{NID}(0,\\sigma^2)$\n * Substituting $\\pred{y}{t}{t-1}=\\ell_{t-1}$ gives:\n * $y_t = \\ell_{t-1}+\\ell_{t-1}\\varepsilon_t$\n * $e_t = y_t - \\pred{y}{t}{t-1} = \\ell_{t-1}\\varepsilon_t$\n\n\\pause\n\\begin{block}{ETS(M,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1}(1 + \\varepsilon_t)\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}(1+\\alpha \\varepsilon_t)\n\\end{align*}\n\\end{block}\n\\pause\\vspace*{-0.4cm}\n\n * Models with additive and multiplicative errors with the same parameters generate the same point forecasts but different prediction intervals.\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week5/slides/execute-results/tex.json b/_freeze/week5/slides/execute-results/tex.json
deleted file mode 100644
index e990370..0000000
--- a/_freeze/week5/slides/execute-results/tex.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "aaa3eced3d1fb1fba5a05f8fb50e8116",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 5: Simple Exponential smoothing\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n## Historical perspective\n\n * Developed in the 1950s and 1960s as methods (algorithms) to produce point forecasts.\n * Combine a \"level\", \"trend\" (slope) and \"seasonal\" component to describe a time series.\n * The rate of change of the components are controlled by \"smoothing parameters\": $\\alpha$, $\\beta$ and $\\gamma$ respectively.\n * Need to choose best values for the smoothing parameters (and initial states).\n * Equivalent ETS state space models developed in the 1990s and 2000s.\n\n## Big idea: control the rate of change\n\\fontsize{13}{14}\\sf\n\n$\\alpha$ controls the flexibility of the **level**\n\n* If $\\alpha = 0$, the level never updates (mean)\n* If $\\alpha = 1$, the level updates completely (naive)\n\n$\\beta$ controls the flexibility of the **trend**\n\n* If $\\beta = 0$, the trend is linear\n* If $\\beta = 1$, the trend changes suddenly every observation\n\n$\\gamma$ controls the flexibility of the **seasonality**\n\n* If $\\gamma = 0$, the seasonality is fixed (seasonal means)\n* If $\\gamma = 1$, the seasonality updates completely (seasonal naive)\n\n## Models and methods\n\n### Methods\n\n * Algorithms that return point forecasts.\n\n### Models\n\n * Generate same point forecasts but can also generate forecast distributions.\n * A stochastic (or random) data generating process that can generate an entire forecast distribution.\n * Allow for \"proper\" model selection.\n\n## Simple Exponential Smoothing\n\n\\vspace*{0.2cm}\n\\begin{block}{Iterative form}\n\\centerline{$\\pred{y}{t+1}{t} = \\alpha y_t + (1-\\alpha) \\pred{y}{t}{t-1}$}\n\\end{block}\\pause\n\n\\begin{block}{Weighted average form}\n\\centerline{$\\displaystyle\\pred{y}{T+1}{T}=\\sum_{j=0}^{T-1} \\alpha(1-\\alpha)^j y_{T-j}+(1-\\alpha)^T \\ell_{0}$}\n\\end{block}\\pause\n\n\\begin{block}{Component form}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation}&&\\pred{y}{t+h}{t} &= \\ell_{t}\\\\\n\\text{Smoothing equation}&&\\ell_{t} &= \\alpha y_{t} + (1 - \\alpha)\\ell_{t-1}\n\\end{align*}\n\\end{block}\n\n## Simple Exponential Smoothing\n\\fontsize{14}{14}\\sf\n\n\\vspace*{0.2cm}\n\\begin{block}{Component form}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation}&&\\pred{y}{t+h}{t} &= \\ell_{t}\\\\\n\\text{Smoothing equation}&&\\ell_{t} &= \\alpha y_{t} + (1 - \\alpha)\\ell_{t-1}\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\nForecast error: $e_t = y_t - \\pred{y}{t}{t-1} = y_t - \\ell_{t-1}$.\\pause\n\\begin{block}{Error correction form}\\vspace*{-0.8cm}\n\\begin{align*}\ny_t &= \\ell_{t-1} + e_t\\\\\n\\ell_{t}\n &= \\ell_{t-1}+\\alpha( y_{t}-\\ell_{t-1})\\\\\n &= \\ell_{t-1}+\\alpha e_{t}\n\\end{align*}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\nSpecify probability distribution: $e_t = \\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n## ETS(A,N,N): SES with additive errors\n\n\\vspace*{0.2cm}\n\\begin{block}{ETS(A,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1} + \\varepsilon_t\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}+\\alpha \\varepsilon_t\n\\end{align*}\n\\end{block}\nwhere $\\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n * \"innovations\" or \"single source of error\" because equations have the same error process, $\\varepsilon_t$.\n * Observation equation: relationship between observations and states.\n * State equation(s): evolution of the state(s) through time.\n\n## ETS(M,N,N): SES with multiplicative errors.\n\n * Specify relative errors $\\varepsilon_t=\\frac{y_t-\\pred{y}{t}{t-1}}{\\pred{y}{t}{t-1}}\\sim \\text{NID}(0,\\sigma^2)$\n * Substituting $\\pred{y}{t}{t-1}=\\ell_{t-1}$ gives:\n * $y_t = \\ell_{t-1}+\\ell_{t-1}\\varepsilon_t$\n * $e_t = y_t - \\pred{y}{t}{t-1} = \\ell_{t-1}\\varepsilon_t$\n\n\\pause\n\\begin{block}{ETS(M,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1}(1 + \\varepsilon_t)\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}(1+\\alpha \\varepsilon_t)\n\\end{align*}\n\\end{block}\n\\pause\\vspace*{-0.4cm}\n\n * Models with additive and multiplicative errors with the same parameters generate the same point forecasts but different prediction intervals.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week6/.DS_Store b/_freeze/week6/.DS_Store
deleted file mode 100644
index b78eb21..0000000
Binary files a/_freeze/week6/.DS_Store and /dev/null differ
diff --git a/_freeze/week6/index/execute-results/html.json b/_freeze/week6/index/execute-results/html.json
deleted file mode 100644
index e1569d1..0000000
--- a/_freeze/week6/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "81d7298b416ff0171dc20fd841dab384",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 6: Exponential smoothing\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Exponential smoothing methods with trend and seasonality\n* ETS models\n* Automatic model selection using the AICc\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 1-4, 16, 17 from [Section 8.8](https://otexts.com/fpp3/expsmooth-exercises.html).\n\n## Exercise solutions\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Sections 8.4-8.7 of the textbook](https://otexts.com/fpp3/expsmooth.html) and watch all embedded videos.\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Use the tsibble created from `tourism` for holiday travel in Victoria and Queensland. Plot the series to remind yourself what these look like.\n\n2. Use the `ETS()` function to fit models with additive and multiplicative errors to both series. Also let `ETS()` auto select models. Explore the fitted models and their residuals. \n\n3. Generate forecasts from the fitted models. Why is the multiplicative model needed for Victoria? \n\n4. Generate the `h02` series from the `PBS` tsibble we explored earlier using the code below.\n\n ```r\n h02 <- PBS |>\n filter(ATC2 == \"H02\") |>\n summarise(Cost = sum(Cost))\n ```\n5. Find an `ETS` model and study it. Why has a damped trend been selected? \n\n6. Generate forecasts for the next few years. \n\n7. Combine `STL` decomposition with `ETS` to forecast the `h02` series.\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week6.R](Seminar_code_week6.R){download=\"Seminar_code_week6.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [IA2](../assignments/A2.qmd) is due on Monday 07 April.\n* [GA2](../assignments/G2.qmd) is due on Monday 14 April.\n* [IA3](../assignments/A3.qmd) is due on Monday 28 April.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week6/slides/execute-results/html.json b/_freeze/week6/slides/execute-results/html.json
deleted file mode 100644
index 9110944..0000000
--- a/_freeze/week6/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "8d8d6de1aa63d7aa6f42dffcae980c01",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 6: Exponential smoothing\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n\n## ETS models\n\n\\begin{block}{}\n\\hspace*{-0.25cm}\\begin{tabular}{l@{}p{2.3cm}@{}c@{}l}\n\\alert{General n\\rlap{otation}}\n & & ~E T S~ & ~:\\hspace*{0.3cm}\\textbf{E}xponen\\textbf{T}ial \\textbf{S}moothing \\\\ [-0.2cm]\n & \\hfill{$\\nearrow$\\hspace*{-0.1cm}} & {$\\uparrow$} & {\\hspace*{-0.2cm}$\\nwarrow$} \\\\\n & \\hfill{\\textbf{E}rror\\hspace*{0.2cm}} & {\\textbf{T}rend} & {\\hspace*{0.2cm}\\textbf{S}eason}\n\\end{tabular}\n\\end{block}\n\n\\alert{\\textbf{E}rror:} Additive (`\"A\"`) or multiplicative (`\"M\"`)\n\\pause\n\n\\alert{\\textbf{T}rend:} None (`\"N\"`), additive (`\"A\"`), multiplicative (`\"M\"`), or damped (`\"Ad\"` or `\"Md\"`).\n\\pause\n\n\\alert{\\textbf{S}easonality:} None (`\"N\"`), additive (`\"A\"`) or multiplicative (`\"M\"`)\n\n## ETS(A,N,N): SES with additive errors\n\\fontsize{14}{16}\\sf\\vspace*{0.2cm}\n\n\\begin{block}{ETS(A,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1} + \\varepsilon_t\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}+\\alpha \\varepsilon_t\n\\end{align*}\n\\end{block}\nwhere $\\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n * \"innovations\" or \"single source of error\" because equations have the same error process, $\\varepsilon_t$.\n * Measurement equation: relationship between observations and states.\n * State equation(s): evolution of the state(s) through time.\n\n## ETS(A,A,N)\n\nHolt's methods method with additive errors.\\phantom{p}\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= \\ell_{t} + hb_{t}\\\\\n\\text{Observation equation}&& y_t&=\\ell_{t-1}+b_{t-1} + \\varepsilon_t\\\\\n\\text{State equations}&& \\ell_t&=\\ell_{t-1}+b_{t-1}+\\alpha \\varepsilon_t\\\\\n&& b_t&=b_{t-1}+\\beta \\varepsilon_t\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = y_t - \\hat{y}_{t|t-1}$\n\n\\vspace*{10cm}\n\n## ETS(A,A,A)\n\nHolt-Winters additive method with additive errors.\\phantom{p}\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= \\ell_{t} + hb_{t} + s_{t+h-m(k+1)}\\\\\n\\text{Observation equation}&& y_t&=\\ell_{t-1}+b_{t-1}+s_{t-m} + \\varepsilon_t\\\\\n\\text{State equations}&& \\ell_t&=\\ell_{t-1}+b_{t-1}+\\alpha \\varepsilon_t\\\\\n&& b_t&=b_{t-1}+\\beta \\varepsilon_t \\\\\n&&s_t &= s_{t-m} + \\gamma\\varepsilon_t\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = y_t - \\hat{y}_{t|t-1}$\n* $k$ is integer part of $(h-1)/m$.\n\n## ETS(M,A,M)\n\nHolt-Winters multiplicative method with multiplicative errors.\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= (\\ell_{t} + hb_{t}) s_{t+h-m(k+1)}\\\\\n\\text{Observation equation}&& y_t&= (\\ell_{t-1}+b_{t-1})s_{t-m}(1 + \\varepsilon_t)\\\\\n\\text{State equations}&& \\ell_t&=(\\ell_{t-1}+b_{t-1})(1+\\alpha \\varepsilon_t)\\\\\n&& b_t&=b_{t-1} +\\beta(\\ell_{t-1}+b_{t-1}) \\varepsilon_t \\\\\n&&s_t &= s_{t-m}(1 + \\gamma\\varepsilon_t)\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = (y_t - \\hat{y}_{t|t-1})/\\hat{y}_{t|t-1}$\n* $k$ is integer part of $(h-1)/m$.\n\n\n## ETS model specification\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\nETS(y ~ error(\"A\") + trend(\"N\") + season(\"N\"))\n```\n:::\n\n\n\nBy default, optimal values for $\\alpha$, $\\beta$, $\\gamma$, and the states at time 0 are used.\n\nThe values for $\\alpha$, $\\beta$ and $\\gamma$ can be specified:\n\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntrend(\"A\", alpha = 0.5, beta = 0.2)\ntrend(\"A\", alpha_range = c(0.2, 0.8), beta_range = c(0.1, 0.4))\nseason(\"M\", gamma = 0.04)\nseason(\"M\", gamma_range = c(0, 0.3))\n```\n:::\n\n\n\n\n## Exponential smoothing methods\n\\fontsize{12}{13}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n& &\\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n\\multicolumn{2}{c|}{\\bf Trend}& N & A & M\\\\\n\\multicolumn{2}{c|}{\\bf Component} & (None) & (Additive) & (Multiplicative)\\\\\n\\cline{3-5} &&&&\\\\[-0.4cm]\nN & (None) & (N,N) & (N,A) & (N,M)\\\\\n&&&&\\\\[-0.4cm]\nA & (Additive) & (A,N) & (A,A) & (A,M)\\\\\n&&&&\\\\[-0.4cm]\nA\\damped & (Additive damped) & (A\\damped,N) & (A\\damped,A) & (A\\damped,M)\n\\end{tabular}\n\\end{block}\\fontsize{12}{13}\\sf\n\n\\begin{tabular}{lp{9.7cm}}\n\\alert{(N,N)}: & Simple exponential smoothing\\\\\n\\alert{(A,N)}: & Holt's linear method\\\\\n\\alert{(A\\damped,N)}: & Additive damped trend method\\\\\n\\alert{(A,A)}:~~ & Additive Holt-Winters' method\\\\\n\\alert{(A,M)}: & Multiplicative Holt-Winters' method\\\\\n\\alert{(A\\damped,M)}: & Damped multiplicative Holt-Winters' method\n\\end{tabular}\n\n\\only<2>{\\begin{textblock}{5}(10,6)\n\\begin{alertblock}{}\\fontsize{12}{14}\\sf\nThere are also multiplicative trend methods (not recommended).\n\\end{alertblock}\n\\end{textblock}}\n\n## ETS models\n\\fontsize{11}{12}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Additive Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & A,N,N & A,N,A & A,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & A,A,N & A,A,A & A,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & A,A\\damped,N & A,A\\damped,A & A,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Multiplicative Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & M,N,N & M,N,A & M,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & M,A,N & M,A,A & M,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & M,A\\damped,N & M,A\\damped,A & M,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n## ETS models\n\\fontsize{11}{12}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Additive Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & A,N,N & A,N,A & \\str{A,N,M} \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & A,A,N & A,A,A & \\str{A,A,M} \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & A,A\\damped,N & A,A\\damped,A & \\str{A,A\\damped,M}\n\\end{tabular}\n\\end{block}\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Multiplicative Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & M,N,N & M,N,A & M,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & M,A,N & M,A,A & M,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & M,A\\damped,N & M,A\\damped,A & M,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n## AIC and cross-validation\n\n\\Large\n\n\\begin{alertblock}{}\nMinimizing the AIC assuming Gaussian residuals is asymptotically equivalent to minimizing one-step time series cross validation MSE.\n\\end{alertblock}\n\n## Automatic forecasting\n\n**From Hyndman et al.\\ (IJF, 2002):**\n\n* Apply each model that is appropriate to the data.\nOptimize parameters and initial values using MLE (or some other\ncriterion).\n* Select best method using AICc:\n* Produce forecasts using best method.\n* Obtain forecast intervals using underlying state space model.\n\nMethod performed very well in M3 competition.\n\n## Residuals\n\\vspace*{0.2cm}\n\n### Response residuals\n\\centerline{$\\hat{e}_t = y_t - \\hat{y}_{t|t-1}$}\n\n### Innovation residuals\nAdditive error model:\n\\centerline{$\\hat\\varepsilon_t = y_t - \\hat{y}_{t|t-1}$}\nMultiplicative error model:\n\\centerline{$\\hat\\varepsilon_t = \\frac{y_t - \\hat{y}_{t|t-1}}{\\hat{y}_{t|t-1}}$}\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week6/slides/execute-results/tex.json b/_freeze/week6/slides/execute-results/tex.json
deleted file mode 100644
index 35b7080..0000000
--- a/_freeze/week6/slides/execute-results/tex.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "7647147596abe371cd0c774d2a9889ae",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 6: Exponential smoothing\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## ETS models\n\n\\begin{block}{}\n\\hspace*{-0.25cm}\\begin{tabular}{l@{}p{2.3cm}@{}c@{}l}\n\\alert{General n\\rlap{otation}}\n & & ~E T S~ & ~:\\hspace*{0.3cm}\\textbf{E}xponen\\textbf{T}ial \\textbf{S}moothing \\\\ [-0.2cm]\n & \\hfill{$\\nearrow$\\hspace*{-0.1cm}} & {$\\uparrow$} & {\\hspace*{-0.2cm}$\\nwarrow$} \\\\\n & \\hfill{\\textbf{E}rror\\hspace*{0.2cm}} & {\\textbf{T}rend} & {\\hspace*{0.2cm}\\textbf{S}eason}\n\\end{tabular}\n\\end{block}\n\n\\alert{\\textbf{E}rror:} Additive (`\"A\"`) or multiplicative (`\"M\"`)\n\\pause\n\n\\alert{\\textbf{T}rend:} None (`\"N\"`), additive (`\"A\"`), multiplicative (`\"M\"`), or damped (`\"Ad\"` or `\"Md\"`).\n\\pause\n\n\\alert{\\textbf{S}easonality:} None (`\"N\"`), additive (`\"A\"`) or multiplicative (`\"M\"`)\n\n## ETS(A,N,N): SES with additive errors\n\\fontsize{14}{16}\\sf\\vspace*{0.2cm}\n\n\\begin{block}{ETS(A,N,N) model}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Observation equation}&& y_t &= \\ell_{t-1} + \\varepsilon_t\\\\\n\\text{State equation}&& \\ell_t&=\\ell_{t-1}+\\alpha \\varepsilon_t\n\\end{align*}\n\\end{block}\nwhere $\\varepsilon_t\\sim\\text{NID}(0,\\sigma^2)$.\n\n * \"innovations\" or \"single source of error\" because equations have the same error process, $\\varepsilon_t$.\n * Measurement equation: relationship between observations and states.\n * State equation(s): evolution of the state(s) through time.\n\n## ETS(A,A,N)\n\nHolt's methods method with additive errors.\\phantom{p}\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= \\ell_{t} + hb_{t}\\\\\n\\text{Observation equation}&& y_t&=\\ell_{t-1}+b_{t-1} + \\varepsilon_t\\\\\n\\text{State equations}&& \\ell_t&=\\ell_{t-1}+b_{t-1}+\\alpha \\varepsilon_t\\\\\n&& b_t&=b_{t-1}+\\beta \\varepsilon_t\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = y_t - \\hat{y}_{t|t-1}$\n\n\\vspace*{10cm}\n\n## ETS(A,A,A)\n\nHolt-Winters additive method with additive errors.\\phantom{p}\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= \\ell_{t} + hb_{t} + s_{t+h-m(k+1)}\\\\\n\\text{Observation equation}&& y_t&=\\ell_{t-1}+b_{t-1}+s_{t-m} + \\varepsilon_t\\\\\n\\text{State equations}&& \\ell_t&=\\ell_{t-1}+b_{t-1}+\\alpha \\varepsilon_t\\\\\n&& b_t&=b_{t-1}+\\beta \\varepsilon_t \\\\\n&&s_t &= s_{t-m} + \\gamma\\varepsilon_t\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = y_t - \\hat{y}_{t|t-1}$\n* $k$ is integer part of $(h-1)/m$.\n\n## ETS(M,A,M)\n\nHolt-Winters multiplicative method with multiplicative errors.\n\n\\begin{block}{}\\vspace*{-0.8cm}\n\\begin{align*}\n\\text{Forecast equation} && \\hat{y}_{t+h|t} &= (\\ell_{t} + hb_{t}) s_{t+h-m(k+1)}\\\\\n\\text{Observation equation}&& y_t&= (\\ell_{t-1}+b_{t-1})s_{t-m}(1 + \\varepsilon_t)\\\\\n\\text{State equations}&& \\ell_t&=(\\ell_{t-1}+b_{t-1})(1+\\alpha \\varepsilon_t)\\\\\n&& b_t&=b_{t-1} +\\beta(\\ell_{t-1}+b_{t-1}) \\varepsilon_t \\\\\n&&s_t &= s_{t-m}(1 + \\gamma\\varepsilon_t)\n\\end{align*}\n\\end{block}\n\n* Forecast errors: $\\varepsilon_{t} = (y_t - \\hat{y}_{t|t-1})/\\hat{y}_{t|t-1}$\n* $k$ is integer part of $(h-1)/m$.\n\n\n## ETS model specification\n\n\n::: {.cell}\n\n```{.r .cell-code}\nETS(y ~ error(\"A\") + trend(\"N\") + season(\"N\"))\n```\n:::\n\n\nBy default, optimal values for $\\alpha$, $\\beta$, $\\gamma$, and the states at time 0 are used.\n\nThe values for $\\alpha$, $\\beta$ and $\\gamma$ can be specified:\n\n\n::: {.cell}\n\n```{.r .cell-code}\ntrend(\"A\", alpha = 0.5, beta = 0.2)\ntrend(\"A\", alpha_range = c(0.2, 0.8), beta_range = c(0.1, 0.4))\nseason(\"M\", gamma = 0.04)\nseason(\"M\", gamma_range = c(0, 0.3))\n```\n:::\n\n\n\n## Exponential smoothing methods\n\\fontsize{12}{13}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n& &\\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n\\multicolumn{2}{c|}{\\bf Trend}& N & A & M\\\\\n\\multicolumn{2}{c|}{\\bf Component} & (None) & (Additive) & (Multiplicative)\\\\\n\\cline{3-5} &&&&\\\\[-0.4cm]\nN & (None) & (N,N) & (N,A) & (N,M)\\\\\n&&&&\\\\[-0.4cm]\nA & (Additive) & (A,N) & (A,A) & (A,M)\\\\\n&&&&\\\\[-0.4cm]\nA\\damped & (Additive damped) & (A\\damped,N) & (A\\damped,A) & (A\\damped,M)\n\\end{tabular}\n\\end{block}\\fontsize{12}{13}\\sf\n\n\\begin{tabular}{lp{9.7cm}}\n\\alert{(N,N)}: & Simple exponential smoothing\\\\\n\\alert{(A,N)}: & Holt's linear method\\\\\n\\alert{(A\\damped,N)}: & Additive damped trend method\\\\\n\\alert{(A,A)}:~~ & Additive Holt-Winters' method\\\\\n\\alert{(A,M)}: & Multiplicative Holt-Winters' method\\\\\n\\alert{(A\\damped,M)}: & Damped multiplicative Holt-Winters' method\n\\end{tabular}\n\n\\only<2>{\\begin{textblock}{5}(10,6)\n\\begin{alertblock}{}\\fontsize{12}{14}\\sf\nThere are also multiplicative trend methods (not recommended).\n\\end{alertblock}\n\\end{textblock}}\n\n## ETS models\n\\fontsize{11}{12}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Additive Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & A,N,N & A,N,A & A,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & A,A,N & A,A,A & A,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & A,A\\damped,N & A,A\\damped,A & A,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Multiplicative Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & M,N,N & M,N,A & M,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & M,A,N & M,A,A & M,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & M,A\\damped,N & M,A\\damped,A & M,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n## ETS models\n\\fontsize{11}{12}\\sf\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Additive Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & A,N,N & A,N,A & \\str{A,N,M} \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & A,A,N & A,A,A & \\str{A,A,M} \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & A,A\\damped,N & A,A\\damped,A & \\str{A,A\\damped,M}\n\\end{tabular}\n\\end{block}\n\n\\begin{block}{}\n\\begin{tabular}{ll|ccc}\n \\multicolumn{2}{l}{\\alert{\\bf Multiplicative Error}} & \\multicolumn{3}{c}{\\bf Seasonal Component} \\\\\n \\multicolumn{2}{c|}{\\bf Trend} & N & A & M \\\\\n \\multicolumn{2}{c|}{\\bf Component} & ~(None)~ & (Additive) & (Multiplicative) \\\\ \\cline{3-5}\n & & & & \\\\[-0.4cm]\n N & (None) & M,N,N & M,N,A & M,N,M \\\\\n & & & & \\\\[-0.4cm]\n A & (Additive) & M,A,N & M,A,A & M,A,M \\\\\n & & & & \\\\[-0.4cm]\n A\\damped & (Additive damped) & M,A\\damped,N & M,A\\damped,A & M,A\\damped,M\n\\end{tabular}\n\\end{block}\n\n## AIC and cross-validation\n\n\\Large\n\n\\begin{alertblock}{}\nMinimizing the AIC assuming Gaussian residuals is asymptotically equivalent to minimizing one-step time series cross validation MSE.\n\\end{alertblock}\n\n## Automatic forecasting\n\n**From Hyndman et al.\\ (IJF, 2002):**\n\n* Apply each model that is appropriate to the data.\nOptimize parameters and initial values using MLE (or some other\ncriterion).\n* Select best method using AICc:\n* Produce forecasts using best method.\n* Obtain forecast intervals using underlying state space model.\n\nMethod performed very well in M3 competition.\n\n## Residuals\n\\vspace*{0.2cm}\n\n### Response residuals\n\\centerline{$\\hat{e}_t = y_t - \\hat{y}_{t|t-1}$}\n\n### Innovation residuals\nAdditive error model:\n\\centerline{$\\hat\\varepsilon_t = y_t - \\hat{y}_{t|t-1}$}\nMultiplicative error model:\n\\centerline{$\\hat\\varepsilon_t = \\frac{y_t - \\hat{y}_{t|t-1}}{\\hat{y}_{t|t-1}}$}\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week7/.DS_Store b/_freeze/week7/.DS_Store
deleted file mode 100644
index 809e0f3..0000000
Binary files a/_freeze/week7/.DS_Store and /dev/null differ
diff --git a/_freeze/week7/index/execute-results/html.json b/_freeze/week7/index/execute-results/html.json
deleted file mode 100644
index 441be2a..0000000
--- a/_freeze/week7/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "085c6d94c7c80f1cb54a652446ac16aa",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 7: ARIMA models\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Stationarity and differencing\n* Random Walk Models\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 5-7, 10-15 from [Section 8.8 of the book](https://otexts.com/fpp3/expsmooth-exercises.html).\n\n\n## Exercise solutions\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Sections 9.1-9.2 of the textbook](https://otexts.com/fpp3/arima.html) and watch all embedded videos.\n\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. What sorts of transformations and differencing are needed to make the `Cement` series from `aus_production` stationary? Do the tests agree with your decisions? \n\n2. Repeat the exercise for the `Gas` series. \n\n3. Generate the `a10` and the `h02` series from the `PBS` tsibble we explored earlier using the code below.\n\n ```r\n a10 <- PBS |>\n filter(ATC2 == \"A10\") |>\n summarise(Cost = sum(Cost))\n \n h02 <- PBS |>\n filter(ATC2 == \"H02\") |>\n summarise(Cost = sum(Cost))\n\n ```\nWhat are the features of these series in terms of stationarity and what operations do you need to take to turn them into stationary series. \n\n4. Explore the the Algerian exports series from the `global_economy` tsibble. Is the series stationary? Is the series white noise?\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week7.R](Seminar_code_week7.R){download=\"Seminar_code_week7.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [GA2](../assignments/G2.qmd) is due on Monday 14 April.\n* [IA3](../assignments/A3.qmd) is due on Monday 28 April.\n* [GA3](../assignments/G3.qmd) is due on Monday 05 May.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week7/slides/execute-results/html.json b/_freeze/week7/slides/execute-results/html.json
deleted file mode 100644
index f7a9831..0000000
--- a/_freeze/week7/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "918e58ebd9db87aafdb50d10fe1b75fe",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 7: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## ARIMA models\n\n\\begin{tabular}{@{}rl}\n\\textbf{AR}: & autoregressive (lagged observations as inputs)\\\\\n\\textbf{I}: & integrated (differencing to make series stationary)\\\\\n\\textbf{MA}: & moving average (lagged errors as inputs)\n\\end{tabular}\n\n###\nAn ARIMA model is rarely interpretable in terms of visible data structures like trend and seasonality. But it can capture a huge range of time series patterns.\n\n## Stationarity\n\n\\vspace*{0.2cm}\n\\begin{block}{Definition}\nIf $\\{y_t\\}$ is a stationary time series, then for all $s$, the distribution of $(y_t,\\dots,y_{t+s})$ does not depend on $t$.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\nTransformations help to **stabilize the variance**.\\newline\nFor ARIMA modelling, we also need to **stabilize the mean**.\n\n### Differencing\n\n* Differencing helps to **stabilize the mean**.\n* First differencing: *change* between consecutive observations: $y'_t = y_t - y_{t-1}$.\n* Seasonal differencing: *change* between years: $y'_t = y_t - y_{t-m}$.\n\n\n## Automatic differencing\n\\vspace*{0.2cm}\n\n### Using unit root tests for first differencing\n\n 1. Augmented Dickey Fuller test: null hypothesis is that the data are non-stationary and non-seasonal.\n 2. Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test: null hypothesis is that the data are stationary and non-seasonal.\n\n### Seasonal strength\n\nSTL decomposition: $y_t = T_t+S_t+R_t$\n\nSeasonal strength $F_s = \\max\\big(0, 1-\\frac{\\text{Var}(R_t)}{\\text{Var}(S_t+R_t)}\\big)$\n\nIf $F_s > 0.64$, do one seasonal difference.\n\n\n## Random walk model\n\nIf differenced series is white noise with zero mean:\n\n\\begin{block}{}\n\\centerline{$y_t-y_{t-1}=\\varepsilon_t$ \\hspace{0.4cm} or \\hspace{0.4cm} $y_t=y_{t-1}+\\varepsilon_t$}\n\\end{block}\\vspace*{-0.3cm}\nwhere $\\varepsilon_t \\sim NID(0,\\sigma^2)$.\n\n* Model behind the \\alert{naïve method}.\n* Forecast are equal to the last observation (future movements up or down are equally likely).\n\n\\vspace*{10cm}\n\n## Random walk with drift model\n\nIf differenced series is white noise with non-zero mean:\n\n\\begin{block}{}\n\\centerline{$y_t-y_{t-1}=c+\\varepsilon_t$ \\hspace{0.4cm} or \\hspace{0.4cm} $y_t=c+y_{t-1}+\\varepsilon_t$}\n\\end{block}\\vspace*{-0.3cm}\nwhere $\\varepsilon_t \\sim NID(0,\\sigma^2)$.\n\n* $c$ is the \\alert{average change} between consecutive observations.\n* Model behind the \\alert{drift method}.\n\n\\vspace*{10cm}\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week7/slides/execute-results/tex.json b/_freeze/week7/slides/execute-results/tex.json
deleted file mode 100644
index ca77fa7..0000000
--- a/_freeze/week7/slides/execute-results/tex.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "9dfbca08301998ef40df32018413b27f",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 7: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n## ARIMA models\n\n\\begin{tabular}{@{}rl}\n\\textbf{AR}: & autoregressive (lagged observations as inputs)\\\\\n\\textbf{I}: & integrated (differencing to make series stationary)\\\\\n\\textbf{MA}: & moving average (lagged errors as inputs)\n\\end{tabular}\n\n###\nAn ARIMA model is rarely interpretable in terms of visible data structures like trend and seasonality. But it can capture a huge range of time series patterns.\n\n## Stationarity\n\n\\vspace*{0.2cm}\n\\begin{block}{Definition}\nIf $\\{y_t\\}$ is a stationary time series, then for all $s$, the distribution of $(y_t,\\dots,y_{t+s})$ does not depend on $t$.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\nTransformations help to **stabilize the variance**.\\newline\nFor ARIMA modelling, we also need to **stabilize the mean**.\n\n### Differencing\n\n* Differencing helps to **stabilize the mean**.\n* First differencing: *change* between consecutive observations: $y'_t = y_t - y_{t-1}$.\n* Seasonal differencing: *change* between years: $y'_t = y_t - y_{t-m}$.\n\n\n## Automatic differencing\n\\vspace*{0.2cm}\n\n### Using unit root tests for first differencing\n\n 1. Augmented Dickey Fuller test: null hypothesis is that the data are non-stationary and non-seasonal.\n 2. Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test: null hypothesis is that the data are stationary and non-seasonal.\n\n### Seasonal strength\n\nSTL decomposition: $y_t = T_t+S_t+R_t$\n\nSeasonal strength $F_s = \\max\\big(0, 1-\\frac{\\text{Var}(R_t)}{\\text{Var}(S_t+R_t)}\\big)$\n\nIf $F_s > 0.64$, do one seasonal difference.\n\n\n## Random walk model\n\nIf differenced series is white noise with zero mean:\n\n\\begin{block}{}\n\\centerline{$y_t-y_{t-1}=\\varepsilon_t$ \\hspace{0.4cm} or \\hspace{0.4cm} $y_t=y_{t-1}+\\varepsilon_t$}\n\\end{block}\\vspace*{-0.3cm}\nwhere $\\varepsilon_t \\sim NID(0,\\sigma^2)$.\n\n* Model behind the \\alert{naïve method}.\n* Forecast are equal to the last observation (future movements up or down are equally likely).\n\n\\vspace*{10cm}\n\n## Random walk with drift model\n\nIf differenced series is white noise with non-zero mean:\n\n\\begin{block}{}\n\\centerline{$y_t-y_{t-1}=c+\\varepsilon_t$ \\hspace{0.4cm} or \\hspace{0.4cm} $y_t=c+y_{t-1}+\\varepsilon_t$}\n\\end{block}\\vspace*{-0.3cm}\nwhere $\\varepsilon_t \\sim NID(0,\\sigma^2)$.\n\n* $c$ is the \\alert{average change} between consecutive observations.\n* Model behind the \\alert{drift method}.\n\n\\vspace*{10cm}\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week8/.DS_Store b/_freeze/week8/.DS_Store
deleted file mode 100644
index 43c73f4..0000000
Binary files a/_freeze/week8/.DS_Store and /dev/null differ
diff --git a/_freeze/week8/index/execute-results/html.json b/_freeze/week8/index/execute-results/html.json
deleted file mode 100644
index 46636c7..0000000
--- a/_freeze/week8/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "bbb5e7da37fa0a17e1eeddfd909a91f2",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 8: ARIMA models\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* AR, MA, ARMA and ARIMA models\n* Selecting model orders manually and automatically\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercise 1-4 from [Section 9.11 of the book](https://otexts.com/fpp3/arima-exercises.html)\n\n## Exercise solutions\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Sections 9.3-9.8 of the textbook](https://otexts.com/fpp3/arima.html) and watch all embedded videos\n\n\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Explore the Egyptian exports series. Is data stationary? What ARIMA model?\n\n\n2. Run through the code for the rest of the examples. Explore and understand what the code is doing. \n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week8.R](Seminar_code_week8.R){download=\"Seminar_code_week8.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [IA3](../assignments/A3.qmd) is due on Monday 28 April.\n* [GA3](../assignments/G3.qmd) is due on Monday 05 May.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week8/slides/execute-results/html.json b/_freeze/week8/slides/execute-results/html.json
deleted file mode 100644
index 980bfc7..0000000
--- a/_freeze/week8/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "9bd7f77504e08288ded57cf28605df18",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 8: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n\n## Backshift operator notation\n\n* $B$ shifts the data back one period. $B y_{t} = y_{t - 1}$\n* $B^2$ shifts the data back two periods: $B(By_{t}) = B^{2}y_{t} = y_{t-2}$\n* A difference can be written as $(1 - B) y_{t}$\n* A $d$th-order difference can be written as $(1 - B)^{d} y_{t}$\n* A seasonal difference followed by a first difference can be written as\n$(1-B)(1-B^m)y_t$\n\n## AR(1) model\n\n\\begin{block}{}\n \\centerline{$y_{t} = c + \\phi_1 y_{t - 1} + \\varepsilon_{t}$}\n\\end{block}\n\n* When $\\phi_1=0$, $y_t$ is **equivalent to WN**\n* When $\\phi_1=1$ and $c=0$, $y_t$ is **equivalent to a RW**\n* When $\\phi_1=1$ and $c\\ne0$, $y_t$ is **equivalent to a RW with drift**\n* When $\\phi_1<0$, $y_t$ tends to **oscillate between positive and negative values**.\n\n## Autoregressive models\n\nA multiple regression with \\textbf{lagged values} of $y_t$ as predictors.\n\n\\vspace*{-1.2cm}\n\\begin{align*}\ny_t &= c + \\phi_{1}y_{t - 1} + \\phi_{2}y_{t - 2} + \\cdots + \\phi_{p}y_{t - p} + \\varepsilon_{t} \\\\\n&= c + (\\phi_1 B + \\phi_2 B^2 + \\cdots + \\phi_p B^p)y_t + \\varepsilon_t\n\\end{align*}\\pause\\vspace*{-1.2cm}\n\\begin{align*}\n(1 - \\phi_1 B - \\phi_2 B^2 - \\cdots - \\phi_p B^p)y_t &= c + \\varepsilon_t \\\\\n\\phi(B) y_t &= c+\\varepsilon_t\n\\end{align*}\n\n* $\\varepsilon_t$ is white noise.\n* $\\phi(B) = (1 - \\phi_1 B - \\phi_2 B^2 - \\cdots - \\phi_p B^p)$\n\n## Stationarity conditions\n\nWe normally restrict autoregressive models to stationary data, and then some constraints on the values of the parameters are required.\n\n\\begin{block}{General condition for stationarity}\n Complex roots of $\\phi(z) = 1-\\phi_1 z - \\phi_2 z^2 - \\dots - \\phi_pz^p$ lie outside the unit circle on the complex plane.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\n* For $p=1$: $-1<\\phi_1<1$.\n* For $p=2$: $-1<\\phi_2<1\\qquad \\phi_2+\\phi_1 < 1 \\qquad \\phi_2 -\\phi_1 < 1$.\n* More complicated conditions hold for $p\\ge3$.\n* fable takes care of this.\n\n## Moving Average (MA) models\nA multiple regression with \\textbf{past \\emph{errors}} as predictors.\n\n\n\\vspace*{-1.2cm}\n\\begin{align*}\n y_{t} &= c + \\varepsilon_t + \\theta_{1}\\varepsilon_{t - 1} + \\theta_{2}\\varepsilon_{t - 2} + \\cdots + \\theta_{q}\\varepsilon_{t - q}\\\\\n&= c + (1 + \\theta_1 B + \\theta_2 B^2 + \\cdots + \\theta_q B^q)\\varepsilon_t \\\\\n&= c + \\theta(B)\\varepsilon_t\n\\end{align*}\\pause\\vspace*{-1.2cm}\n\n* $\\varepsilon_t$ is white noise.\n* $\\theta(B) = (1 + \\theta_1 B + \\theta_2 B^2 + \\cdots + \\theta_q B^q)$\n\n## Invertibility\n\n\\begin{block}{General condition for invertibility}\n Complex roots of $\\theta(z) = 1+\\theta_1 z + \\theta_2 z^2 + \\dots + \\theta_qz^q$ lie outside the unit circle on the complex plane.\n\\end{block}\\pause\n\n* For $q=1$: $-1<\\theta_1<1$.\n* For $q=2$: $-1<\\theta_2<1\\qquad \\theta_2+\\theta_1 >-1 \\qquad \\theta_1 -\\theta_2 < 1$.\n* More complicated conditions hold for $q\\ge3$.\n* fable takes care of this.\n\n## ARIMA models\n\n\\fontsize{14}{14.5}\\sf\\vspace*{0.2cm}\n\\begin{block}{ARIMA($p, d, q$) model:\\qquad $\\phi(B)(1-B)^dy_{t} = c + \\theta(B)\\varepsilon_{t}$}\n\\begin{tabular}{rl}\nAR:& $p =$ order of the autoregressive part\\\\\nI: & $d =$ degree of first differencing involved\\\\\nMA:& $q =$ order of the moving average part.\n\\end{tabular}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\n* Conditions on AR coefficients ensure stationarity.\n* Conditions on MA coefficients ensure invertibility.\n* White noise model: ARIMA(0,0,0)\n* Random walk: ARIMA(0,1,0) with no constant\n* Random walk with drift: ARIMA(0,1,0) with \\rlap{const.}\n* AR($p$): ARIMA($p$,0,0)\n* MA($q$): ARIMA(0,0,$q$)\n\n\n## R model\n\\vspace*{0.2cm}\n\n\\begin{block}{Intercept form}\n \\centerline{$(1-\\phi_1B - \\cdots - \\phi_p B^p) y_t' = c + (1 + \\theta_1 B + \\cdots + \\theta_q B^q)\\varepsilon_t$}\n\\end{block}\n\n\\begin{block}{Mean form}\n \\centerline{$(1-\\phi_1B - \\cdots - \\phi_p B^p)(y_t' - \\mu) = (1 + \\theta_1 B + \\cdots + \\theta_q B^q)\\varepsilon_t$}\n\\end{block}\n\n * $y_t' = (1-B)^d y_t$\n * $\\mu$ is the mean of $y_t'$.\n * $c = \\mu(1-\\phi_1 - \\cdots - \\phi_p )$.\n * fable uses intercept form\n\n## Understanding ARIMA models\n\\fontsize{13}{14}\\sf\n\n* If $c=0$ and $d=0$, the long-term forecasts will go to zero.\n* If $c=0$ and $d=1$, the long-term forecasts will go to a non-zero constant.\n* If $c=0$ and $d=2$, the long-term forecasts will follow a straight line.\n* If $c\\ne0$ and $d=0$, the long-term forecasts will go to the mean of the data.\n* If $c\\ne0$ and $d=1$, the long-term forecasts will follow a straight line.\n* If $c\\ne0$ and $d=2$, the long-term forecasts will follow a quadratic trend.\n\n## Understanding ARIMA models\n\\fontsize{14}{16}\\sf\\vspace*{0.2cm}\n\n### Forecast variance and $d$\n * The higher the value of $d$, the more rapidly the prediction intervals increase in size.\n * For $d=0$, the long-term forecast standard deviation will go to the standard deviation of the historical data.\n\n### Cyclic behaviour\n * For cyclic forecasts, $p\\ge2$ and some restrictions on coefficients are required.\n * If $p=2$, we need $\\phi_1^2+4\\phi_2<0$. Then average cycle of length\n\n\\centerline{$(2\\pi)/\\left[\\text{arc cos}(-\\phi_1(1-\\phi_2)/(4\\phi_2))\\right].$}\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week8/slides/execute-results/tex.json b/_freeze/week8/slides/execute-results/tex.json
deleted file mode 100644
index f611199..0000000
--- a/_freeze/week8/slides/execute-results/tex.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "0d0feffa6e738b673d48f9b68adcb542",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 8: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## Backshift operator notation\n\n* $B$ shifts the data back one period. $B y_{t} = y_{t - 1}$\n* $B^2$ shifts the data back two periods: $B(By_{t}) = B^{2}y_{t} = y_{t-2}$\n* A difference can be written as $(1 - B) y_{t}$\n* A $d$th-order difference can be written as $(1 - B)^{d} y_{t}$\n* A seasonal difference followed by a first difference can be written as\n$(1-B)(1-B^m)y_t$\n\n## AR(1) model\n\n\\begin{block}{}\n \\centerline{$y_{t} = c + \\phi_1 y_{t - 1} + \\varepsilon_{t}$}\n\\end{block}\n\n* When $\\phi_1=0$, $y_t$ is **equivalent to WN**\n* When $\\phi_1=1$ and $c=0$, $y_t$ is **equivalent to a RW**\n* When $\\phi_1=1$ and $c\\ne0$, $y_t$ is **equivalent to a RW with drift**\n* When $\\phi_1<0$, $y_t$ tends to **oscillate between positive and negative values**.\n\n## Autoregressive models\n\nA multiple regression with \\textbf{lagged values} of $y_t$ as predictors.\n\n\\vspace*{-1.2cm}\n\\begin{align*}\ny_t &= c + \\phi_{1}y_{t - 1} + \\phi_{2}y_{t - 2} + \\cdots + \\phi_{p}y_{t - p} + \\varepsilon_{t} \\\\\n&= c + (\\phi_1 B + \\phi_2 B^2 + \\cdots + \\phi_p B^p)y_t + \\varepsilon_t\n\\end{align*}\\pause\\vspace*{-1.2cm}\n\\begin{align*}\n(1 - \\phi_1 B - \\phi_2 B^2 - \\cdots - \\phi_p B^p)y_t &= c + \\varepsilon_t \\\\\n\\phi(B) y_t &= c+\\varepsilon_t\n\\end{align*}\n\n* $\\varepsilon_t$ is white noise.\n* $\\phi(B) = (1 - \\phi_1 B - \\phi_2 B^2 - \\cdots - \\phi_p B^p)$\n\n## Stationarity conditions\n\nWe normally restrict autoregressive models to stationary data, and then some constraints on the values of the parameters are required.\n\n\\begin{block}{General condition for stationarity}\n Complex roots of $\\phi(z) = 1-\\phi_1 z - \\phi_2 z^2 - \\dots - \\phi_pz^p$ lie outside the unit circle on the complex plane.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\n* For $p=1$: $-1<\\phi_1<1$.\n* For $p=2$: $-1<\\phi_2<1\\qquad \\phi_2+\\phi_1 < 1 \\qquad \\phi_2 -\\phi_1 < 1$.\n* More complicated conditions hold for $p\\ge3$.\n* fable takes care of this.\n\n## Moving Average (MA) models\nA multiple regression with \\textbf{past \\emph{errors}} as predictors.\n\n\n\\vspace*{-1.2cm}\n\\begin{align*}\n y_{t} &= c + \\varepsilon_t + \\theta_{1}\\varepsilon_{t - 1} + \\theta_{2}\\varepsilon_{t - 2} + \\cdots + \\theta_{q}\\varepsilon_{t - q}\\\\\n&= c + (1 + \\theta_1 B + \\theta_2 B^2 + \\cdots + \\theta_q B^q)\\varepsilon_t \\\\\n&= c + \\theta(B)\\varepsilon_t\n\\end{align*}\\pause\\vspace*{-1.2cm}\n\n* $\\varepsilon_t$ is white noise.\n* $\\theta(B) = (1 + \\theta_1 B + \\theta_2 B^2 + \\cdots + \\theta_q B^q)$\n\n## Invertibility\n\n\\begin{block}{General condition for invertibility}\n Complex roots of $\\theta(z) = 1+\\theta_1 z + \\theta_2 z^2 + \\dots + \\theta_qz^q$ lie outside the unit circle on the complex plane.\n\\end{block}\\pause\n\n* For $q=1$: $-1<\\theta_1<1$.\n* For $q=2$: $-1<\\theta_2<1\\qquad \\theta_2+\\theta_1 >-1 \\qquad \\theta_1 -\\theta_2 < 1$.\n* More complicated conditions hold for $q\\ge3$.\n* fable takes care of this.\n\n## ARIMA models\n\n\\fontsize{14}{14.5}\\sf\\vspace*{0.2cm}\n\\begin{block}{ARIMA($p, d, q$) model:\\qquad $\\phi(B)(1-B)^dy_{t} = c + \\theta(B)\\varepsilon_{t}$}\n\\begin{tabular}{rl}\nAR:& $p =$ order of the autoregressive part\\\\\nI: & $d =$ degree of first differencing involved\\\\\nMA:& $q =$ order of the moving average part.\n\\end{tabular}\n\\end{block}\\pause\\vspace*{-0.2cm}\n\n* Conditions on AR coefficients ensure stationarity.\n* Conditions on MA coefficients ensure invertibility.\n* White noise model: ARIMA(0,0,0)\n* Random walk: ARIMA(0,1,0) with no constant\n* Random walk with drift: ARIMA(0,1,0) with \\rlap{const.}\n* AR($p$): ARIMA($p$,0,0)\n* MA($q$): ARIMA(0,0,$q$)\n\n\n## R model\n\\vspace*{0.2cm}\n\n\\begin{block}{Intercept form}\n \\centerline{$(1-\\phi_1B - \\cdots - \\phi_p B^p) y_t' = c + (1 + \\theta_1 B + \\cdots + \\theta_q B^q)\\varepsilon_t$}\n\\end{block}\n\n\\begin{block}{Mean form}\n \\centerline{$(1-\\phi_1B - \\cdots - \\phi_p B^p)(y_t' - \\mu) = (1 + \\theta_1 B + \\cdots + \\theta_q B^q)\\varepsilon_t$}\n\\end{block}\n\n * $y_t' = (1-B)^d y_t$\n * $\\mu$ is the mean of $y_t'$.\n * $c = \\mu(1-\\phi_1 - \\cdots - \\phi_p )$.\n * fable uses intercept form\n\n## Understanding ARIMA models\n\\fontsize{13}{14}\\sf\n\n* If $c=0$ and $d=0$, the long-term forecasts will go to zero.\n* If $c=0$ and $d=1$, the long-term forecasts will go to a non-zero constant.\n* If $c=0$ and $d=2$, the long-term forecasts will follow a straight line.\n* If $c\\ne0$ and $d=0$, the long-term forecasts will go to the mean of the data.\n* If $c\\ne0$ and $d=1$, the long-term forecasts will follow a straight line.\n* If $c\\ne0$ and $d=2$, the long-term forecasts will follow a quadratic trend.\n\n## Understanding ARIMA models\n\\fontsize{14}{16}\\sf\\vspace*{0.2cm}\n\n### Forecast variance and $d$\n * The higher the value of $d$, the more rapidly the prediction intervals increase in size.\n * For $d=0$, the long-term forecast standard deviation will go to the standard deviation of the historical data.\n\n### Cyclic behaviour\n * For cyclic forecasts, $p\\ge2$ and some restrictions on coefficients are required.\n * If $p=2$, we need $\\phi_1^2+4\\phi_2<0$. Then average cycle of length\n\n\\centerline{$(2\\pi)/\\left[\\text{arc cos}(-\\phi_1(1-\\phi_2)/(4\\phi_2))\\right].$}\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week9/.DS_Store b/_freeze/week9/.DS_Store
deleted file mode 100644
index 031808b..0000000
Binary files a/_freeze/week9/.DS_Store and /dev/null differ
diff --git a/_freeze/week9/index/execute-results/html.json b/_freeze/week9/index/execute-results/html.json
deleted file mode 100644
index 503a38e..0000000
--- a/_freeze/week9/index/execute-results/html.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "hash": "0ea7eb644f4e62602bcd982472b06ff2",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: \"Week 9: ARIMA models\"\n---\n\n::: {.cell}\n\n:::\n\n\n\n\n## What you will learn this week\n\n* Seasonal ARIMA models\n* Computing forecasts for ARIMA models\n* ARIMA vs ETS models\n\n## Exercises (on your own or in tutorial)\n\nComplete Exercises 6-8, 15-16 from [Section 9.11 of the book](https://otexts.com/fpp3/arima-exercises.html).\n\n\n## Exercise solutions\n\n\n\n\n\n\n\n## Pre-seminar activities\n\nRead [Sections 9.8-9.10 of the textbook](https://otexts.com/fpp3/arima.html) and watch all embedded videos\n\n\n\n\n\n## Slides for seminar\n\n\nDownload pdf\n\n\n\n\n## Seminar activities\n\n\n\n\n\n1. Identify, estimate and generate forecasts from ARIMA models for the `usmelec`, `leisure` and `h02` as specified below:\n\n ```r\n usmelec <- as_tsibble(fpp2::usmelec) |>\n rename(Month = index, Generation = value)\n \n leisure <- us_employment |>\n filter(Title == \"Leisure and Hospitality\", year(Month) > 2000) |>\n mutate(Employed = Employed/1000) |> select(Month, Employed)\n \n h02 <- PBS |>\n filter(ATC2 == \"H02\") |>\n summarise(Cost = sum(Cost))\n ```\n2. Identify, estimate and generate forecasts from ARIMA and ETS models for the `aus_economy` and `cement` as specified below:\n\n ```r\n aus_economy <- global_economy |> filter(Code == \"AUS\") |>\n mutate(Population = Population/1e6)\n \n cement <- aus_production |>\n select(Cement) |>\n filter_index(\"1988 Q1\" ~ .)\n ```\n\n\n\n\n\n## Seminar code\n\n::: {.callout appearance=\"minimal\"}\n [Seminar_code_week9.R](Seminar_code_week9.R){download=\"Seminar_code_week9.R\"}\n:::\n\n\n\n\n\n\n\n## Assignments\n\n* [GA3](../assignments/G3.qmd) is due on Monday 05 May.\n* [IA4](../assignments/A4.qmd) is due on Monday 19 May.\n",
- "supporting": [],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week9/slides/execute-results/html.json b/_freeze/week9/slides/execute-results/html.json
deleted file mode 100644
index fde4659..0000000
--- a/_freeze/week9/slides/execute-results/html.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "hash": "d8382295613a156f43bf310b321bdfa7",
- "result": {
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 9: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n\n## Point forecasts\n\n1. Rearrange ARIMA equation so $y_t$ is on LHS.\n2. Rewrite equation by replacing $t$ by $T+h$.\n3. On RHS, replace future observations by their forecasts, future errors by zero, and past errors by corresponding residuals.\n\nStart with $h=1$. Repeat for $h=2,3,\\dots$.\n\n## Prediction intervals\n\\vspace*{0.2cm}\\fontsize{14}{15}\\sf\n\n\\begin{block}{95\\% prediction interval}\n$$\\hat{y}_{T+h|T} \\pm 1.96\\sqrt{v_{T+h|T}}$$\nwhere $v_{T+h|T}$ is estimated forecast variance.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\n* $v_{T+1|T}=\\hat{\\sigma}^2$ for all ARIMA models regardless of parameters and orders.\\pause\n* Multi-step prediction intervals for ARIMA(0,0,$q$):\n\\centerline{$\\displaystyle y_t = \\varepsilon_t + \\sum_{i=1}^q \\theta_i \\varepsilon_{t-i}.$}\n\\centerline{$\\displaystyle\nv_{T|T+h} = \\hat{\\sigma}^2 \\left[ 1 + \\sum_{i=1}^{h-1} \\theta_i^2\\right], \\qquad\\text{for~} h=2,3,\\dots.$}\n\n## Prediction intervals\n\n* Prediction intervals **increase in size with forecast horizon**.\n* Prediction intervals can be difficult to calculate by hand\n* Calculations assume residuals are **uncorrelated** and **normally distributed**.\n* Prediction intervals tend to be too narrow.\n * the uncertainty in the parameter estimates has not been accounted for.\n * the ARIMA model assumes historical patterns will not change during the forecast period.\n * the ARIMA model assumes uncorrelated future \\rlap{errors}\n\n\n## Seasonal ARIMA models\n\n| ARIMA | $~\\underbrace{(p, d, q)}$ | $\\underbrace{(P, D, Q)_{m}}$ |\n| ----: | :-----------------------: | :--------------------------: |\n| | ${\\uparrow}$ | ${\\uparrow}$ |\n| | Non-seasonal part | Seasonal part of |\n| | of the model | of the model |\n\nwhere $m =$ number of observations per year.\n\n## Seasonal ARIMA models\n\nE.g., ARIMA$(1, 1, 1)(1, 1, 1)_{4}$ model (without constant)\\pause\n$$(1 - \\phi_{1}B)(1 - \\Phi_{1}B^{4}) (1 - B) (1 - B^{4})y_{t} ~= ~\n(1 + \\theta_{1}B) (1 + \\Theta_{1}B^{4})\\varepsilon_{t}.\n$$\\pause\\vspace*{-1cm}\n\n\\setlength{\\unitlength}{1mm}\n\\begin{footnotesize}\n\\begin{picture}(100,25)(-5,0)\n\\thinlines\n{\\put(15,22){\\vector(0,1){6}}}\n{\\put(32,10){\\vector(0,1){18}}}\n{\\put(48,22){\\vector(0,1){6}}}\n{\\put(66,10){\\vector(0,1){18}}}\n{\\put(97,22){\\vector(0,1){6}}}\n{\\put(115,10){\\vector(0,1){18}}}\n{\\put(0,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{AR(1)}\n \\end{array}\\right)$}}\n{\\put(22,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{AR(1)}\n \\end{array}\\right)$}}\n{\\put(35,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{difference}\n \\end{array}\\right)$}}\n{\\put(55,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{difference}\n \\end{array}\\right)$}}\n{\\put(85,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{MA(1)}\n \\end{array}\\right)$}}\n{\\put(105,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{MA(1)}\n \\end{array}\\right)$}}\n\\end{picture}\n\\end{footnotesize}\n\n\\vspace*{10cm}\n\n## Seasonal ARIMA models\n\nE.g., ARIMA$(1, 1, 1)(1, 1, 1)_{4}$ model (without constant)\n$$(1 - \\phi_{1}B)(1 - \\Phi_{1}B^{4}) (1 - B) (1 - B^{4})y_{t} ~= ~\n(1 + \\theta_{1}B) (1 + \\Theta_{1}B^{4})\\varepsilon_{t}.\n$$\\vspace*{-1cm}\n\nAll the factors can be multiplied out and the general model\nwritten as follows:\\vspace*{-0.7cm}\n\\begin{align*}\ny_{t} &= (1 + \\phi_{1})y_{t - 1} - \\phi_1y_{t-2} + (1 + \\Phi_{1})y_{t - 4}\\\\\n&\\text{}\n - (1 + \\phi_{1} + \\Phi_{1} + \\phi_{1}\\Phi_{1})y_{t - 5}\n + (\\phi_{1} + \\phi_{1} \\Phi_{1}) y_{t - 6} \\\\\n& \\text{} - \\Phi_{1} y_{t - 8} + (\\Phi_{1} + \\phi_{1} \\Phi_{1}) y_{t - 9}\n - \\phi_{1} \\Phi_{1} y_{t - 10}\\\\\n &\\text{}\n + \\varepsilon_{t} + \\theta_{1}\\varepsilon_{t - 1} + \\Theta_{1}\\varepsilon_{t - 4} + \\theta_{1}\\Theta_{1}\\varepsilon_{t - 5}.\n\\end{align*}\n\\vspace*{10cm}\n\n## Seasonal ARIMA models\nThe seasonal part of an AR or MA model will be seen in the seasonal lags of\nthe PACF and ACF.\n\n\\alert{ARIMA(0,0,0)(0,0,1)$_{12}$ will show:}\\vspace*{-0.2cm}\n\n * a spike at lag 12 in the ACF but no other significant spikes.\n * The PACF will show exponential decay in the seasonal lags; that is, at lags 12, 24, 36, \\dots.\n\n\\alert{ARIMA(0,0,0)(1,0,0)$_{12}$ will show:}\\vspace*{-0.2cm}\n\n * exponential decay in the seasonal lags of the ACF\n * a single significant spike at lag 12 in the PACF.\n\n\n## ARIMA vs ETS\n\\fontsize{14}{16}\\sf\n\n * Myth that ARIMA models are more general than exponential smoothing.\n * Linear exponential smoothing models all special cases of ARIMA models.\n * Non-linear exponential smoothing models have no equivalent ARIMA counterparts.\n * Many ARIMA models have no exponential smoothing counterparts.\n * ETS models all non-stationary. Models with seasonality or non-damped trend (or both) have two unit roots; all other models have one unit \\rlap{root.}\n\n\\vspace*{10cm}\n\n## ARIMA vs ETS\n\n\n\n::: {.cell}\n::: {.cell-output-display}\n{width=480}\n:::\n:::\n\n\n\n## Equivalences\n\n\\fontsize{13}{15}\\sf\n\n|**ETS model** | **ARIMA model** | **Parameters** |\n| :------------ | :-------------------------- | :----------------------------------- |\n| ETS(A,N,N) | ARIMA(0,1,1) | $\\theta_1 = \\alpha-1$ |\n| ETS(A,A,N) | ARIMA(0,2,2) | $\\theta_1 = \\alpha+\\beta-2$ |\n| | | $\\theta_2 = 1-\\alpha$ |\n| ETS(A,A\\damped,N) | ARIMA(1,1,2) | $\\phi_1=\\phi$ |\n| | | $\\theta_1 = \\alpha+\\phi\\beta-1-\\phi$ |\n| | | $\\theta_2 = (1-\\alpha)\\phi$ |\n| ETS(A,N,A) | ARIMA(0,0,$m$)(0,1,0)$_m$ | |\n| ETS(A,A,A) | ARIMA(0,1,$m+1$)(0,1,0)$_m$ | |\n| ETS(A,A\\damped,A) | ARIMA(1,0,$m+1$)(0,1,0)$_m$ | |\n",
- "supporting": [
- "slides_files\\figure-html"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": {},
- "postProcess": true
- }
-}
\ No newline at end of file
diff --git a/_freeze/week9/slides/execute-results/tex.json b/_freeze/week9/slides/execute-results/tex.json
deleted file mode 100644
index 5de3093..0000000
--- a/_freeze/week9/slides/execute-results/tex.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "hash": "f1a1e8d03fe0ec1914accb13c7eace54",
- "result": {
- "engine": "knitr",
- "markdown": "---\ntitle: ETC3550/ETC5550 Applied forecasting\nauthor: \"Week 9: ARIMA models\"\nformat:\n beamer:\n aspectratio: 169\n fontsize: 14pt\n section-titles: false\n knitr:\n opts_chunk:\n dev: \"cairo_pdf\"\n pdf-engine: pdflatex\n fig-width: 7.5\n fig-height: 3.5\n include-in-header: ../header.tex\n---\n\n\n\n\n\n## Point forecasts\n\n1. Rearrange ARIMA equation so $y_t$ is on LHS.\n2. Rewrite equation by replacing $t$ by $T+h$.\n3. On RHS, replace future observations by their forecasts, future errors by zero, and past errors by corresponding residuals.\n\nStart with $h=1$. Repeat for $h=2,3,\\dots$.\n\n## Prediction intervals\n\\vspace*{0.2cm}\\fontsize{14}{15}\\sf\n\n\\begin{block}{95\\% prediction interval}\n$$\\hat{y}_{T+h|T} \\pm 1.96\\sqrt{v_{T+h|T}}$$\nwhere $v_{T+h|T}$ is estimated forecast variance.\n\\end{block}\\pause\\vspace*{-0.3cm}\n\n* $v_{T+1|T}=\\hat{\\sigma}^2$ for all ARIMA models regardless of parameters and orders.\\pause\n* Multi-step prediction intervals for ARIMA(0,0,$q$):\n\\centerline{$\\displaystyle y_t = \\varepsilon_t + \\sum_{i=1}^q \\theta_i \\varepsilon_{t-i}.$}\n\\centerline{$\\displaystyle\nv_{T|T+h} = \\hat{\\sigma}^2 \\left[ 1 + \\sum_{i=1}^{h-1} \\theta_i^2\\right], \\qquad\\text{for~} h=2,3,\\dots.$}\n\n## Prediction intervals\n\n* Prediction intervals **increase in size with forecast horizon**.\n* Prediction intervals can be difficult to calculate by hand\n* Calculations assume residuals are **uncorrelated** and **normally distributed**.\n* Prediction intervals tend to be too narrow.\n * the uncertainty in the parameter estimates has not been accounted for.\n * the ARIMA model assumes historical patterns will not change during the forecast period.\n * the ARIMA model assumes uncorrelated future \\rlap{errors}\n\n\n## Seasonal ARIMA models\n\n| ARIMA | $~\\underbrace{(p, d, q)}$ | $\\underbrace{(P, D, Q)_{m}}$ |\n| ----: | :-----------------------: | :--------------------------: |\n| | ${\\uparrow}$ | ${\\uparrow}$ |\n| | Non-seasonal part | Seasonal part of |\n| | of the model | of the model |\n\nwhere $m =$ number of observations per year.\n\n## Seasonal ARIMA models\n\nE.g., ARIMA$(1, 1, 1)(1, 1, 1)_{4}$ model (without constant)\\pause\n$$(1 - \\phi_{1}B)(1 - \\Phi_{1}B^{4}) (1 - B) (1 - B^{4})y_{t} ~= ~\n(1 + \\theta_{1}B) (1 + \\Theta_{1}B^{4})\\varepsilon_{t}.\n$$\\pause\\vspace*{-1cm}\n\n\\setlength{\\unitlength}{1mm}\n\\begin{footnotesize}\n\\begin{picture}(100,25)(-5,0)\n\\thinlines\n{\\put(15,22){\\vector(0,1){6}}}\n{\\put(32,10){\\vector(0,1){18}}}\n{\\put(48,22){\\vector(0,1){6}}}\n{\\put(66,10){\\vector(0,1){18}}}\n{\\put(97,22){\\vector(0,1){6}}}\n{\\put(115,10){\\vector(0,1){18}}}\n{\\put(0,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{AR(1)}\n \\end{array}\\right)$}}\n{\\put(22,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{AR(1)}\n \\end{array}\\right)$}}\n{\\put(35,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{difference}\n \\end{array}\\right)$}}\n{\\put(55,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{difference}\n \\end{array}\\right)$}}\n{\\put(85,17){$\\left(\\begin{array}{@{}c@{}} \\text{Non-seasonal} \\\\ \\text{MA(1)}\n \\end{array}\\right)$}}\n{\\put(105,5){$\\left(\\begin{array}{@{}c@{}} \\text{Seasonal} \\\\ \\text{MA(1)}\n \\end{array}\\right)$}}\n\\end{picture}\n\\end{footnotesize}\n\n\\vspace*{10cm}\n\n## Seasonal ARIMA models\n\nE.g., ARIMA$(1, 1, 1)(1, 1, 1)_{4}$ model (without constant)\n$$(1 - \\phi_{1}B)(1 - \\Phi_{1}B^{4}) (1 - B) (1 - B^{4})y_{t} ~= ~\n(1 + \\theta_{1}B) (1 + \\Theta_{1}B^{4})\\varepsilon_{t}.\n$$\\vspace*{-1cm}\n\nAll the factors can be multiplied out and the general model\nwritten as follows:\\vspace*{-0.7cm}\n\\begin{align*}\ny_{t} &= (1 + \\phi_{1})y_{t - 1} - \\phi_1y_{t-2} + (1 + \\Phi_{1})y_{t - 4}\\\\\n&\\text{}\n - (1 + \\phi_{1} + \\Phi_{1} + \\phi_{1}\\Phi_{1})y_{t - 5}\n + (\\phi_{1} + \\phi_{1} \\Phi_{1}) y_{t - 6} \\\\\n& \\text{} - \\Phi_{1} y_{t - 8} + (\\Phi_{1} + \\phi_{1} \\Phi_{1}) y_{t - 9}\n - \\phi_{1} \\Phi_{1} y_{t - 10}\\\\\n &\\text{}\n + \\varepsilon_{t} + \\theta_{1}\\varepsilon_{t - 1} + \\Theta_{1}\\varepsilon_{t - 4} + \\theta_{1}\\Theta_{1}\\varepsilon_{t - 5}.\n\\end{align*}\n\\vspace*{10cm}\n\n## Seasonal ARIMA models\nThe seasonal part of an AR or MA model will be seen in the seasonal lags of\nthe PACF and ACF.\n\n\\alert{ARIMA(0,0,0)(0,0,1)$_{12}$ will show:}\\vspace*{-0.2cm}\n\n * a spike at lag 12 in the ACF but no other significant spikes.\n * The PACF will show exponential decay in the seasonal lags; that is, at lags 12, 24, 36, \\dots.\n\n\\alert{ARIMA(0,0,0)(1,0,0)$_{12}$ will show:}\\vspace*{-0.2cm}\n\n * exponential decay in the seasonal lags of the ACF\n * a single significant spike at lag 12 in the PACF.\n\n\n## ARIMA vs ETS\n\\fontsize{14}{16}\\sf\n\n * Myth that ARIMA models are more general than exponential smoothing.\n * Linear exponential smoothing models all special cases of ARIMA models.\n * Non-linear exponential smoothing models have no equivalent ARIMA counterparts.\n * Many ARIMA models have no exponential smoothing counterparts.\n * ETS models all non-stationary. Models with seasonality or non-damped trend (or both) have two unit roots; all other models have one unit \\rlap{root.}\n\n\\vspace*{10cm}\n\n## ARIMA vs ETS\n\n\n::: {.cell}\n::: {.cell-output-display}\n\n:::\n:::\n\n\n## Equivalences\n\n\\fontsize{13}{15}\\sf\n\n|**ETS model** | **ARIMA model** | **Parameters** |\n| :------------ | :-------------------------- | :----------------------------------- |\n| ETS(A,N,N) | ARIMA(0,1,1) | $\\theta_1 = \\alpha-1$ |\n| ETS(A,A,N) | ARIMA(0,2,2) | $\\theta_1 = \\alpha+\\beta-2$ |\n| | | $\\theta_2 = 1-\\alpha$ |\n| ETS(A,A\\damped,N) | ARIMA(1,1,2) | $\\phi_1=\\phi$ |\n| | | $\\theta_1 = \\alpha+\\phi\\beta-1-\\phi$ |\n| | | $\\theta_2 = (1-\\alpha)\\phi$ |\n| ETS(A,N,A) | ARIMA(0,0,$m$)(0,1,0)$_m$ | |\n| ETS(A,A,A) | ARIMA(0,1,$m+1$)(0,1,0)$_m$ | |\n| ETS(A,A\\damped,A) | ARIMA(1,0,$m+1$)(0,1,0)$_m$ | |\n",
- "supporting": [
- "slides_files"
- ],
- "filters": [
- "rmarkdown/pagebreak.lua"
- ],
- "includes": {},
- "engineDependencies": {},
- "preserve": null,
- "postProcess": false
- }
-}
\ No newline at end of file
diff --git a/_freeze/week9/slides/figure-beamer/venn-1.pdf b/_freeze/week9/slides/figure-beamer/venn-1.pdf
deleted file mode 100644
index 5ff1770..0000000
Binary files a/_freeze/week9/slides/figure-beamer/venn-1.pdf and /dev/null differ
diff --git a/_freeze/week9/slides/figure-html/venn-1.png b/_freeze/week9/slides/figure-html/venn-1.png
deleted file mode 100644
index d74652a..0000000
Binary files a/_freeze/week9/slides/figure-html/venn-1.png and /dev/null differ
diff --git a/assignments.csv b/assignments.csv
index 0aebe63..d8dd237 100644
--- a/assignments.csv
+++ b/assignments.csv
@@ -1,9 +1,9 @@
-Assignment, Due, Moodle, File
-IA1, 2025-03-10,3877940,A1.qmd
-IA2, 2025-04-07,3444670,A2.qmd
-IA3, 2025-04-28,3444674,A3.qmd
-IA4, 2025-05-19,3444678,A4.qmd
-GA1, 2025-03-24,3877946,G1.qmd
-GA2, 2025-04-14,3444683,G2.qmd
-GA3, 2025-05-05,3444684,G3.qmd
-GA4, 2025-05-26,3444685,G4.qmd
+Assignment, Due, Moodle,Moodle3231, File
+IA1, 2025-03-10,3877940,3877918,A1.qmd
+IA2, 2025-04-07,3444670,3444669,A2.qmd
+IA3, 2025-04-28,3444674,3444673,A3.qmd
+IA4, 2025-05-19,3444678,3444677,A4.qmd
+GA1, 2025-03-24,3877946,,G1.qmd
+GA2, 2025-04-14,3444683,,G2.qmd
+GA3, 2025-05-05,3444684,,G3.qmd
+GA4, 2025-05-26,3444685,,G4.qmd
\ No newline at end of file
diff --git a/course_info.R b/course_info.R
index 388e8e7..2869653 100644
--- a/course_info.R
+++ b/course_info.R
@@ -7,10 +7,6 @@ start_semester <- "2025-03-03"
# Week of mid-semester break
mid_semester_break <- "2025-04-21"
-# List of php id for postgrad submission
-php_url <- paste0("https://learning.monash.edu/mod/assign/view.php?id=",
- c("3877918", "3444669","3444684", "3444685"))
-
# Schedule
schedule <- tibble(
Week = seq(12),
@@ -91,6 +87,10 @@ assignments <- read_csv(here::here("assignments.csv")) |>
"https://learning.monash.edu/mod/",
c("quiz", rep("assign", 3)), "/view.php?id=", Moodle
),
+ Moodle3231 = paste0(
+ "https://learning.monash.edu/mod/",
+ c("quiz", rep("assign", 3)), "/view.php?id=", Moodle3231
+ ),
File = paste0("assignments/", File)
)
@@ -166,7 +166,7 @@ submit <- function(schedule, assignment) {
" Submit (ETF5231) ")
cat(button1)
if (str_detect(ass$Assignment, "IA")) {
- url <- php_url[as.numeric(str_sub(assignment, 3))]
+ url <- ass$Moodle3231
button2 <- paste0("