setwd("~/Github/bigdata/predictions/")
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.0 ──
## ✓ ggplot2 3.3.2     ✓ purrr   0.3.4
## ✓ tibble  3.0.4     ✓ dplyr   1.0.2
## ✓ tidyr   1.1.2     ✓ stringr 1.4.0
## ✓ readr   1.4.0     ✓ forcats 0.5.0
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag()    masks stats::lag()
#Data from CDC trend page.

Most recent data is from Weds May 19th.

I’m going to try to improve on the model from last time. Last time I used a log diff-in-diff model. (Thurs/last thurs)*last sunday There are a few of these you could use though. A thursday one, a monday one, a tuesday one…

I’m going to build out 3 (monday, tuesday, wednesday) [don’t have the data for thurs yet]. Then I’m going to consider a few different ensembles. Just the mean prediction. The MSPE variance weighted mean prediction. And the MSPE covariance weighted mean prediction. (When I build these weighted models, I’m going to include the ‘naive’ mean as one of the submodels). Then I’ll test out of sample, for some sense of which is best. Then I’ll build using full sample.

cases = read_csv("cdc_cases_may_21.csv",skip=2)
## 
## ── Column specification ────────────────────────────────────────────────────────
## cols(
##   State = col_character(),
##   Date = col_character(),
##   `New Cases` = col_double(),
##   `7-Day Moving Avg` = col_double()
## )
cases = cases %>% select(-State)

# Need to make Date more usable.
library(lubridate)
## 
## Attaching package: 'lubridate'
## The following objects are masked from 'package:base':
## 
##     date, intersect, setdiff, union
cases = cases %>% 
  mutate(Date=mdy(Date)) %>% #Convert to Date from Chr
  mutate(dow = wday(Date)) #Day of week.
#Fix names
cases = cases %>% 
  rename(cases = `New Cases`, 
         avg = `7-Day Moving Avg`,
         date=Date)

#Drop everything before July 2020
cases = cases %>% 
  filter(date > mdy("July 05, 2020"))

cases = cases %>%
  mutate(week = isoweek(date))

cases$week[cases$week < 24] = cases$week[cases$week < 22] + 53
cases$week = cases$week - 27

cases = cases %>% 
  select(-avg,-date)
cases = cases %>% 
  arrange(dow) %>%
  mutate(dow = paste0("d",dow)) %>%
  pivot_wider(names_from=dow,values_from=cases) %>%
  arrange(desc(week)) 

cases = cases %>% mutate(ld1 = lead(d1),
                         ld2 = lead(d2),
                         ld3 = lead(d3),
                         ld4 = lead(d4),
                         ld5 = lead(d5))

cases = cases %>% mutate(pd2 = ld1*d2/ld2,
                         pd3 = ld1*d3/ld3,
                         pd4 = ld1*d4/ld4,
                         pd5 = ld1*d5/ld5)

mod1 = lm(d1~.-week-1-d6-d7,data=cases)
#Gibberish.

mod2 = lm(d1~pd2+pd3+pd4+pd5-1,data=cases)


# Okay. Still not interpretable. 
# Let me explain myself a touch. Above, I've created 4 models. All based on my 
# model from last week -- but using a different day of the week as reference.
#Monday, Tuesday, Wednesday, Thursday, Friday. 
# I want to combine these models usefully. Ideally some kind of mean. Lets start there.

#Build mean
cases = cases %>% mutate(mp = (pd2+pd3+pd4)/3)
#Look at performance (in sample?) compared to last week. 
mod3 = lm(d1~mp-1,data=cases)
mod4 = lm(d1~pd4-1,data=cases)

#Lets look at logs real quick...
mod3a = lm(log(d1)~log(mp)-1,data=cases)
mod4a = lm(log(d1)~log(pd4)-1,data=cases)

#Hmmm... just realized I'm estimating coefs still.
mod3b = lm(log(d1/mp) ~1,data=cases)
mod4b = lm(log(d1/pd4)~1,data=cases)

#Really just extremely high performance. Haven't... like looked firmly at anything. 

#So thats a naive mean. It looks to _marginally_ outperform the single model. 

#Lets... ratchet up slightly. Estimate OOS Errors.

cases = cases %>% mutate(
  errs2 = d1-pd2,
  errs3 = d1-pd3,
  errs4 = d1-pd4,
 # errs5 = d1-pd5,
  errsmp = d1-mp
)

Wprime = cov(cases %>% select(starts_with("errs")),use="pairwise.complete.obs")
#possibly not positive-semi-definite....
#Also... damn. Its not percent weighted.Its raw errrs

cases = cases %>% mutate(
  perr2 = errs2/pd2,
  perr3 = errs3/pd3,
  perr4 = errs4/pd4,
 # perr5 = errs5/pd5,
  perrmp = errsmp/mp
)
#subsetting to retain an OOS section...
Wprime = cov(cases %>% filter(week < 30) %>%
            #   select(-perr5) %>%
               select(starts_with("perr")) ,use="complete")
sqrt(diag(Wprime))
##     perr2     perr3     perr4    perrmp 
## 0.1532518 0.1954849 0.1023942 0.1320249
#Looks like days 4/5 are much better than 2/3. 

#Using the diagongal for weihts.
weights = diag(solve(Wprime))[1:3] #dropping mean
weights = weights/sum(weights)
weights
##     perr2     perr3     perr4 
## 0.3469068 0.2647508 0.3883424
#reintroducing mean
weights = diag(solve(Wprime))
weights = weights/sum(weights)
weights #mean gets... 77% of weight. But not all.
##      perr2      perr3      perr4     perrmp 
## 0.09067971 0.06920454 0.10151078 0.73860496
preds = as.matrix(cases %>% drop_na() %>% select(-pd5) %>% select(starts_with("pd"),mp)) %*% weights
preds = cbind(cases$week[c(-1,-46)],preds,cases$d1[c(-1,-46)]) 
colnames(preds) = c("week","pred.var","outcome")
preds = as_tibble(preds)
preds %>% ggplot(aes(pred.var,outcome,col=week>30)) + geom_point() + scale_x_log10()+scale_y_log10()

#week 30+ is "oos" for weights routine.

#Lets try full weight decomp. 
weights.cov = Wprime %*% (rep(1,4))
weights.cov = weights.cov[,1]/sum(weights.cov)
weights.cov
##     perr2     perr3     perr4    perrmp 
## 0.2713484 0.3378363 0.1457483 0.2450670
#This is very different. Why?
#It severly downweights the mean.
#eVen though the mean has the lowest MSPE,
#It also is positively correlated with every other entry
#so it doesn't "hedge".

preds.cov = as.matrix(cases %>% drop_na() %>% # select(-pd5) %>%
                        select(starts_with("pd"))) %*% weights.cov
preds.cov = cbind(cases$week[c(-1,-46)],preds.cov,cases$d1[c(-1,-46)]) 
colnames(preds.cov) = c("week","pred.cov","outcome")
preds.cov = as_tibble(preds.cov)
preds.cov %>% ggplot(aes(pred.cov,outcome,col=week>30)) + geom_point() + scale_x_log10()+scale_y_log10()

#Comparing OOS performance of 
# 1) naive mean
# 2) variance weighted mean
# 3) covariance weighted mean
#on weeks 30+

trip_preds = preds.cov %>% left_join(preds,by=c(week="week",outcome="outcome")) %>% left_join(cases %>% select(week,mp,d1),by=c(week="week",outcome="d1"))

trip_errs = trip_preds %>% 
  mutate(errs.cov  = (outcome-pred.cov)/pred.cov,
         errs.var  = (outcome-pred.var)/pred.var,
         errs.mean = (outcome-mp      )/mp) %>%
  filter(week >= 30) 
trip_errs %>% summarize(mspe.cov  = mean(errs.cov^2 ),
                        mspe.var  = mean(errs.var^2 ),
                        mspe.mean = mean(errs.mean^2))
## # A tibble: 1 x 3
##   mspe.cov mspe.var mspe.mean
##      <dbl>    <dbl>     <dbl>
## 1   0.0139   0.0132    0.0134
#They look... very similar. The covariance is the worst. But I might... just take an average of these three models. 

trip_errs %>% 
  summarize(cov.20  = mean(errs.cov  < -0.2 | errs.cov  > 0.2),
            var.20  = mean(errs.var  < -0.2 | errs.var  > 0.2),
            mean.20 = mean(errs.mean < -0.2 | errs.mean > 0.2))
## # A tibble: 1 x 3
##   cov.20 var.20 mean.20
##    <dbl>  <dbl>   <dbl>
## 1 0.0625 0.0625  0.0625
# Their OOS 20% error rate is 6% for each of them. That represents
#Exactly 1 occurrence in the OOS weeks. (1/16) 
# To be safe, lets add 50% to the odds, for ~9%. 

#Re-estimate on full data.
Wprime = cov(cases %>%
          #     select(-perr5) %>%
               select(starts_with("perr")) ,use="complete")
weights = diag(solve(Wprime))
weights = weights/sum(weights)
weights.cov = Wprime %*% (rep(1,4))
weights.cov = weights.cov[,1]/sum(weights.cov)

#Final weights
weights
##      perr2      perr3      perr4     perrmp 
## 0.07925136 0.07515780 0.10410056 0.74149028
weights.cov
##     perr2     perr3     perr4    perrmp 
## 0.2786640 0.3239662 0.1515349 0.2458348
weights.cov/weights #change ratios.
##     perr2     perr3     perr4    perrmp 
## 3.5162048 4.3104804 1.4556590 0.3315416
pred.mean = cases$mp[1]
pred.cov = (as.matrix(cases %>% select(pd2,pd3,pd4,mp))[1,] %*% weights.cov)[1,]
pred.var = (as.matrix(cases %>% select(pd2,pd3,pd4,mp))[1,] %*% weights)[1,]

pred.mean
## [1] 15055.32
pred.cov
## [1] 15080.04
pred.var
## [1] 15049.37
mean.preds = mean(c(pred.mean,pred.cov,pred.var))

mean.preds
## [1] 15061.58

#Final prediction

mean.preds
## [1] 15061.58

Final guess at P[20% error] = 9%.