1 Load data

## Read
filename <- "Data_Rezidential.RDS"   
Data <- readRDS(filename)  

2 Modify ACE Score to take into account that all are institutionalized

Data$CYW <- ifelse(Data$CYW == 0, 0, Data$CYW - 1) 

3 Descriptive for ACE Score

## Bar Plot
cat("### Frequencies for ACE Score")
### Frequencies for ACE Score
Data %>%
  ggplot(aes(x = as.factor(CYW))) +
  geom_bar(aes(y = (..count..)/sum(..count..))) +
  geom_text(aes(y = ((..count..)/sum(..count..)), label = scales::percent((..count..)/sum(..count..))), stat = "count", vjust = -0.25) +
  scale_y_continuous(labels = percent) +
  labs(title = "ACE Score frequency", y = "Percent", x = "ACE Score")

## Is ACE Scoare Poisson distributed?
cat("### Does ACE Scoare follow a Poisson distribution?")
### Does ACE Scoare follow a Poisson distribution?
# Note that if the p value is larger than 0.05, we can not reject h0: the process is a Poisson process. 
# Or else, it is not a Poisson process.
gf <- vcd::goodfit(Data$CYW, type = "poisson", method = "ML")    # based on load the vcd package
# plot(gf, main = "Poisson", shade = TRUE, legend = FALSE)
# summary(gf)                                                    # Likelihood Ratio Test
## to automatically get the pvalue
gf.summary <- capture.output(summary(gf))[[7]]                   # if p-value is smaller pick [5] from list
pvalue <- unlist(strsplit(gf.summary, split = " "))
pvalue <- as.numeric(pvalue[length(pvalue)]); 
cat("Goodness-of-fit test for poisson distribution p-value: ", round(pvalue, 3))
Goodness-of-fit test for poisson distribution p-value:  0
if(pvalue > .05) cat("Yes, it is Poisson") else cat("No, it is not Poisson")
No, it is not Poisson
## Rootograms
hroot_plot1 <- plot(gf, type = "hanging", shade = TRUE, main = "Hanging Rootogram", return_grob = TRUE)       # hanging rootogram

droot_plot1 <- plot(gf, type = "deviation", shade = TRUE, main = "Deviation Rootogram", return_grob = TRUE)   # deviation rootogram

subtext1 <- "Left: hanging rootogram; Right: deviation rootogram. Hanging rootogram Color reflects the sign and magnitude of the contributions
to lack of fit. moves the rootogram bars so their tops are at the expected frequencies for poisson distribution."
vcd::mplot(hroot_plot1, droot_plot1, sub = subtext1, gp_sub = grid::gpar(fontsize = 11))

## Poissonness plots
dist_plot1 <- vcd::distplot(Data$CYW, type = "poisson", xlab = "ACE", return_grob = TRUE)

subtext2 <- "The fitted line is not within the confidence intervals, indicating the Poisson model is not adequate for these data"
vcd::mplot(dist_plot1, sub = subtext2, gp_sub = grid::gpar(fontsize = 11))

## Negative Binomial?
cat("### Does ACE Scoare follow a Negative Binomial distribution?")
### Does ACE Scoare follow a Negative Binomial distribution?
gf2 <- vcd::goodfit(Data$CYW, type = "nbinomial")
# summary(gf2)                                                                 # Likelihood Ratio Test
# plot(gf2, main = "Negative binomial", shade = TRUE, legend = FALSE)
dist_plot2 <- vcd::distplot(Data$CYW, type = "nbinomial", xlab = "ACE", return_grob = TRUE)

subtext3 <- "The fitted line is within the confidence intervals, indicating the adequacy of the Poisson model for these data"
vcd::mplot(dist_plot2, sub = subtext3, gp_sub = grid::gpar(fontsize = 11))

## Ord plots: Diagnostic slope and intercept for four discrete distributions
vcd::Ord_plot(Data$CYW, main = "Ord plot", gp = grid::gpar(cex = 1), pch = 16)

4 Test Linear Regression - ACE Scoare

cat("#### Test a good liniar model")
#### Test a good liniar model
## Data for Linear Regression Step
Data_lm <-
  Data %>%    # recode v_mama_nastere to binary
    mutate(v_mama_nastere_d = fct_recode(v_mama_nastere, "1" = "<19" , "0" = "20-25", "0" = "26–34", "0" = "35>")) %>%
    mutate_at(vars(v_mama_nastere_d), funs(as.numeric(as.character(.)))) %>%
    select(CYW, varsta, gen, 9:29, v_mama_nastere_d) %>%
    mutate_at(vars(expunere_tox:comunit), funs(replace_na(., 0)))
library(gvlma)
library(olsrr)
## Linear Regression for Test - full model
mod_lm_full <- lm(CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom +  neglijare + varsta + boli, data = Data_lm)
moderndive::get_regression_table(mod_lm_full) 
moderndive::get_regression_summaries(mod_lm_full)
par(mfrow = c(2, 2)); plot(mod_lm_full)

gvlma::gvlma(mod_lm_full)

Call:
lm(formula = CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + 
    neglijare + varsta + boli, data = Data_lm)

Coefficients:
 (Intercept)  expunere_tox   varsta_inst      tras_dez    schimb_dom     neglijare        varsta          boli  
      1.8865        0.9103        0.0810        1.1212        0.7154        0.5677        0.0968        0.6371  


ASSESSMENT OF THE LINEAR MODEL ASSUMPTIONS
USING THE GLOBAL TEST ON 4 DEGREES-OF-FREEDOM:
Level of Significance =  0.05 

Call:
 gvlma::gvlma(x = mod_lm_full) 
# Influential Observations -- Cook's D plot
# identify D values > 4/(n-k-1) 
cutoff <- 4/((nrow(Data)-length(mod_lm_full$coefficients)-2)) 
plot(mod_lm_full, which = 4, cook.levels=cutoff)
# Influence Plot 
car::influencePlot(mod_lm_full, main = "Influence Plot", sub = "Circle size is proportial to Cook's Distance")

# Evaluate Collinearity
olsrr::ols_coll_diag(mod_lm_full) # VIF si Tolerance din olsrr
Tolerance and Variance Inflation Factor
---------------------------------------


Eigenvalue and Condition Index
------------------------------
car::vif(mod_lm_full) # variance inflation factors 
expunere_tox  varsta_inst     tras_dez   schimb_dom    neglijare       varsta         boli 
    1.085937     1.128838     1.082916     1.067616     1.047941     1.138849     1.074788 
sqrt(vif(mod_lm_full)) > 2 # problem?
expunere_tox  varsta_inst     tras_dez   schimb_dom    neglijare       varsta         boli 
       FALSE        FALSE        FALSE        FALSE        FALSE        FALSE        FALSE 
# Evaluate Nonlinearity
# component + residual plot 
car::crPlots(mod_lm_full, ask = FALSE)

# Ceres plots 
# car::ceresPlots(mod_lm_full, ask = FALSE)

5 Varsta mamei la nastere is not a predictor for ACE score

Data_var_imp <-
  Data %>%    # recode v_mama_nastere to binary
    select(CYW, varsta, gen, 9:29, v_mama_nastere, tip_chestionar) %>%
    mutate_at(vars(expunere_tox:comunit), funs(replace_na(., 0)))
with(Data_var_imp,
     by(CYW, INDICES = v_mama_nastere, FUN = summarytools::descr, transpose = TRUE,
        stats = c("n.valid", "mean", "sd", "min", "med", "max", "skewness", "kurtosis"), plain.ascii = FALSE, headings = FALSE))  %>%  
          view(method = "render", style = "rmarkdown", footnote = NA)

N.Valid Mean Std.Dev Min Median Max Skewness Kurtosis
<19 157 4.61 3.44 0.00 4.00 17.00 0.76 0.16
20-25 497 4.84 3.24 0.00 5.00 18.00 0.46 -0.15
2634 369 4.18 2.84 0.00 4.00 13.00 0.33 -0.53
35> 115 4.60 2.66 0.00 5.00 13.00 0.50 0.06

ggplot(Data_var_imp, aes(x = v_mama_nastere, y = CYW)) +
  geom_boxplot() +
  stat_summary(fun.data = mean_se,  colour = "darkred") +
  ggpubr::stat_compare_means(method = "t.test", 
                             label = "p.signif",                                         # to avoid scientific notation of very small p-values
                             paired = FALSE, 
                             comparisons = list(c("<19", "20-25"),
                                                c("<19", "26–34"),
                                                c("20-25", "26–34"),
                                                c("20-25", "35>"),
                                                c("26–34", "35>"))) 

ggplot(Data_var_imp, aes(x = gen, y = CYW)) +
  facet_wrap(~v_mama_nastere) + 
  geom_boxplot() +
  stat_summary(fun.data = mean_se,  colour = "darkred") +
  ggpubr::stat_compare_means(method = "t.test", 
                             label = "p.signif",                                         # to avoid scientific notation of very small p-values
                             paired = FALSE, 
                             comparisons = list(c("m", "f"))) 

6 Gen is not a predictor for ACE score

with(Data_var_imp,
     by(CYW, INDICES = gen, FUN = summarytools::descr, transpose = TRUE,
        stats = c("n.valid", "mean", "sd", "min", "med", "max", "skewness", "kurtosis"), plain.ascii = FALSE, headings = FALSE))  %>%  
          view(method = "render", style = "rmarkdown", footnote = NA)

N.Valid Mean Std.Dev Min Median Max Skewness Kurtosis
f 666 4.64 3.20 0.00 4.00 18.00 0.50 -0.13
m 609 4.54 3.13 0.00 4.00 15.00 0.48 -0.32

tadaatoolbox::tadaa_t.test(data = Data_var_imp,
                            response = CYW, group = gen, paired = FALSE)   # , print = "markdown"
ggplot(Data_var_imp, aes(x = gen, y = CYW)) +
  geom_boxplot() +
  stat_summary(fun.data = mean_se,  colour = "darkred") +
  ggpubr::stat_compare_means(method = "t.test", 
                             label = "p.signif",                                         # to avoid scientific notation of very small p-values
                             paired = FALSE, 
                             comparisons = list(c("f", "m")))  

7 Descriptives by tip_chestionar

with(Data_var_imp,
     by(CYW, INDICES = tip_chestionar, FUN = summarytools::descr, transpose = TRUE,
        stats = c("n.valid", "mean", "sd", "min", "med", "max", "skewness", "kurtosis"), plain.ascii = FALSE, headings = FALSE))  %>%  
          view(method = "render", style = "rmarkdown", footnote = NA)

N.Valid Mean Std.Dev Min Median Max Skewness Kurtosis
5-8ani 152 3.78 2.66 0.00 3.50 12.00 0.39 -0.45
5-8intarziere 78 6.13 3.20 0.00 6.00 13.00 -0.32 -0.39
9-18ani 1045 4.60 3.19 0.00 4.00 18.00 0.54 -0.14

t.test(CYW ~ tip_chestionar, data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8ani", "5-8intarziere"),])

    Welch Two Sample t-test

data:  CYW by tip_chestionar
t = -5.5818, df = 132.89, p-value = 0.0000001286
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 -3.185314 -1.518465
sample estimates:
       mean in group 5-8ani mean in group 5-8intarziere 
                   3.776316                    6.128205 
tadaatoolbox::tadaa_t.test(data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8ani", "5-8intarziere"),],
                            response = CYW, group = tip_chestionar, paired = FALSE)   # , print = "markdown"
t.test(CYW ~ tip_chestionar, data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8intarziere", "9-18ani"),])  # this works, tadaatoolbox bug

    Welch Two Sample t-test

data:  CYW by tip_chestionar
t = 4.0759, df = 88.871, p-value = 0.00009951
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 0.7836904 2.2746337
sample estimates:
mean in group 5-8intarziere       mean in group 9-18ani 
                   6.128205                    4.599043 
# tadaatoolbox::tadaa_t.test(data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8intarziere", "9-18ani"),],
#                             response = CYW, group = tip_chestionar, paired = FALSE)   # , print = "markdown"
# 
t.test(CYW ~ tip_chestionar, data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8ani", "9-18ani"),])  # this works, tadaatoolbox bug

    Welch Two Sample t-test

data:  CYW by tip_chestionar
t = -3.467, df = 219.6, p-value = 0.0006332
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 -1.2904142 -0.3550403
sample estimates:
 mean in group 5-8ani mean in group 9-18ani 
             3.776316              4.599043 
# tadaatoolbox::tadaa_t.test(data = Data_var_imp[Data_var_imp$tip_chestionar %in% c("5-8ani", "9-18ani"),],
#                             response = CYW, group = tip_chestionar, paired = FALSE)   # , print = "markdown"
ggplot(Data_var_imp, aes(x = tip_chestionar, y = CYW)) +
  geom_boxplot() +
  stat_summary(fun.data = mean_se,  colour = "darkred") +
  ggpubr::stat_compare_means(method = "t.test", 
                             label = "p.signif",                                         # to avoid scientific notation of very small p-values
                             paired = FALSE, 
                             comparisons = list(c("5-8ani", "5-8intarziere"),
                                                c("5-8intarziere", "9-18ani"),
                                                c("5-8ani", "9-18ani")))  

8 Poisson Regression Model

## Poisson Regression Step
step_pois_null <- glm(CYW ~ 1, family = poisson, data = Data_lm_step)
step_pois_full <- glm(CYW ~ ., family = poisson, data = Data_lm_step)
cat("#### Poisson - Bidirectional step by BIC")
#### Poisson - Bidirectional step by BIC
mod_pois_BIC <- step(step_pois_null, scope=list(lower=formula(step_pois_null), upper=formula(step_pois_full)), direction="both", k = log(nrow(Data_lm_step)), trace = FALSE)
summary(mod_pois_BIC)

Call:
glm(formula = CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + 
    neglijare + varsta + boli + comunit + nr_frati, family = poisson, 
    data = Data_lm_step)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-3.9623  -1.0943  -0.1427   0.8560   3.4029  

Coefficients:
             Estimate Std. Error z value             Pr(>|z|)    
(Intercept)  0.817902   0.073330  11.154 < 0.0000000000000002 ***
expunere_tox 0.159382   0.033891   4.703        0.00000256711 ***
varsta_inst  0.024757   0.004187   5.913        0.00000000335 ***
tras_dez     0.189401   0.052076   3.637             0.000276 ***
schimb_dom   0.146103   0.035932   4.066        0.00004779879 ***
neglijare    0.130465   0.031784   4.105        0.00004047885 ***
varsta       0.017343   0.005047   3.437             0.000589 ***
boli         0.150929   0.045275   3.334             0.000857 ***
comunit      0.105935   0.036907   2.870             0.004101 ** 
nr_frati     0.015987   0.005643   2.833             0.004609 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for poisson family taken to be 1)

    Null deviance: 2165.1  on 929  degrees of freedom
Residual deviance: 1938.7  on 920  degrees of freedom
AIC: 4776.7

Number of Fisher Scoring iterations: 5
# summary(step(step_pois_full, ~.^2,  direction = "both", k = log(nrow(Data_lm_step)), trace = FALSE))   # step for all terms and all interactions !!!
cat("#### Poisson - Bidirectional step by AIC")
#### Poisson - Bidirectional step by AIC
mod_pois_AIC <- step(step_pois_null, scope=list(lower=formula(step_pois_null), upper=formula(step_pois_full)), direction="both", trace = FALSE)
summary(mod_pois_AIC) 

Call:
glm(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
    neglijare + varsta + boli + comunit + nr_frati + v_mama_nastere + 
    TCC + gen + temperam + scoala_spec + intarziere + asfixie + 
    tulb_cond + abuz_sub, family = poisson, data = Data_lm_step)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-4.2716  -1.0910  -0.1607   0.8802   3.4389  

Coefficients:
                     Estimate Std. Error z value             Pr(>|z|)    
(Intercept)          0.786797   0.085342   9.219 < 0.0000000000000002 ***
expunere_tox         0.136271   0.034537   3.946         0.0000795897 ***
varsta_inst          0.023153   0.004238   5.463         0.0000000469 ***
schimb_dom           0.137522   0.036335   3.785             0.000154 ***
neglijare            0.143642   0.032056   4.481         0.0000074308 ***
varsta               0.020166   0.005173   3.898         0.0000969802 ***
boli                 0.131460   0.046740   2.813             0.004914 ** 
comunit              0.085488   0.037782   2.263             0.023657 *  
nr_frati             0.019418   0.005775   3.362             0.000773 ***
v_mama_nastere20-25  0.065748   0.049539   1.327             0.184444    
v_mama_nastere26–34 -0.054876   0.053270  -1.030             0.302933    
v_mama_nastere35>   -0.040525   0.066041  -0.614             0.539458    
TCC                  0.179279   0.101340   1.769             0.076880 .  
genm                -0.063241   0.031126  -2.032             0.042177 *  
temperam             0.079530   0.046975   1.693             0.090448 .  
scoala_spec         -0.237303   0.075600  -3.139             0.001696 ** 
intarziere           0.106579   0.047019   2.267             0.023409 *  
asfixie              0.223835   0.105030   2.131             0.033077 *  
tulb_cond            0.120407   0.050427   2.388             0.016952 *  
abuz_sub            -0.117746   0.072784  -1.618             0.105715    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for poisson family taken to be 1)

    Null deviance: 2165.1  on 929  degrees of freedom
Residual deviance: 1900.5  on 910  degrees of freedom
AIC: 4758.6

Number of Fisher Scoring iterations: 5
# summary(step(step_pois_full, ~.^2,  direction = "both", trace = FALSE))   # step for all terms and all interactions !!!
## Data for GLMs
Data_glm <-
  Data %>%    # recode v_mama_nastere to binary
    mutate(v_mama_nastere_d = fct_recode(v_mama_nastere, "1" = "<19" , "0" = "20-25", "0" = "26–34", "0" = "35>")) %>%
    mutate_at(vars(v_mama_nastere_d), funs(as.numeric(as.character(.)))) %>%
    select(CYW, varsta, gen, 9:29, v_mama_nastere_d) %>%
    mutate_at(vars(expunere_tox:comunit), funs(replace_na(., 0)))
## GLM - Poisson
cat("#### Test a good poisson model")
#### Test a good poisson model
# mod_pois <- glm(CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + boli + neglijare + varsta + 
#                       nr_frati + TCC, family = poisson, data = Data_glm)                         # fisrt decent model
mod_pois <- glm(CYW ~ expunere_tox + varsta_inst +  schimb_dom + boli + varsta +
                      nr_frati + gen + comunit + intarziere + TCC + neglijare + 
                      scoala_spec + tulb_cond, family = poisson, data = Data_glm)                  # best possible model 
summary(mod_pois) # tidy(mod_pois) %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "pois.xlsx") 

Call:
glm(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
    boli + varsta + nr_frati + gen + comunit + intarziere + TCC + 
    neglijare + scoala_spec + tulb_cond, family = poisson, data = Data_glm)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-4.1038  -1.1869  -0.1411   0.8357   4.4377  

Coefficients:
              Estimate Std. Error z value             Pr(>|z|)    
(Intercept)   0.874702   0.070492  12.409 < 0.0000000000000002 ***
expunere_tox  0.139297   0.031750   4.387          0.000011479 ***
varsta_inst   0.015875   0.003743   4.241          0.000022227 ***
schimb_dom    0.130945   0.033140   3.951          0.000077725 ***
boli          0.098082   0.043801   2.239             0.025140 *  
varsta        0.023857   0.004688   5.089          0.000000359 ***
nr_frati      0.011953   0.005202   2.298             0.021579 *  
genm         -0.059882   0.028346  -2.113             0.034643 *  
comunit       0.085733   0.034140   2.511             0.012032 *  
intarziere    0.128884   0.043780   2.944             0.003241 ** 
TCC           0.219910   0.083426   2.636             0.008389 ** 
neglijare     0.113752   0.028729   3.959          0.000075111 ***
scoala_spec  -0.217585   0.065682  -3.313             0.000924 ***
tulb_cond     0.148534   0.040335   3.683             0.000231 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for poisson family taken to be 1)

    Null deviance: 2666.1  on 1106  degrees of freedom
Residual deviance: 2416.2  on 1093  degrees of freedom
  (168 observations deleted due to missingness)
AIC: 5800.8

Number of Fisher Scoring iterations: 5
plot(mod_pois, ask = FALSE)

# pr <- sum(residuals(mod_pois, type="pearson")^2)                  # Pearson Chi2
# pr/mod_pois$df.residual                                           # dispersion statistic
msme:::P__disp(mod_pois)                                            # commented the 2 lines above, this function does both
pearson.chi2   dispersion 
  2145.03392      1.96252 
dev <- deviance(mod_pois)
df <- df.residual(mod_pois)
p_value <- 1-pchisq(dev,df)
print(matrix(c("Deviance GOF"," ","D",round(dev,4),"df",df,     # the deviance GOF test, a Chi2 p < 0.05 indicates that the model is considered well fit
     "p_value",p_value), ncol=2))
     [,1]           [,2]     
[1,] "Deviance GOF" "df"     
[2,] " "            "1093"   
[3,] "D"            "p_value"
[4,] "2416.2099"    "0"      
# these assess the overall performance of a model in reproducing the data. The commonly used measures include the Pearson chi-square and likelihoodratio
# deviance statistics, which can be seen as weighted sums of residuals.
COUNT::modelfit(mod_pois)
$AIC
[1] 5800.816

$AICn
[1] 5.240123

$BIC
[1] 5870.948

$BICqh
[1] 5.28158
# cnt <- table(Data_lm$CYW)
# dataf <- data.frame(prop.table(table(Data_lm$CYW) ) )
# dataf$cumulative <- cumsum(dataf$Freq)
# datafall <- data.frame(cnt, dataf$Freq*100, dataf$cumulative * 100)
mod_pois$aic / (mod_pois$df.null+1)                                # AIC/n
[1] 5.240123
exp(coef(mod_pois))                                                # IRR
 (Intercept) expunere_tox  varsta_inst   schimb_dom         boli       varsta     nr_frati         genm      comunit   intarziere          TCC 
   2.3981606    1.1494650    1.0160019    1.1399049    1.1030528    1.0241442    1.0120251    0.9418756    1.0895156    1.1375582    1.2459646 
   neglijare  scoala_spec    tulb_cond 
   1.1204746    0.8044595    1.1601325 
exp(coef(mod_pois))*sqrt(diag(vcov(mod_pois)))                     # delta method
 (Intercept) expunere_tox  varsta_inst   schimb_dom         boli       varsta     nr_frati         genm      comunit   intarziere          TCC 
 0.169051093  0.036495835  0.003802940  0.037775996  0.048315120  0.004800822  0.005264885  0.026698830  0.037196372  0.049802209  0.103946225 
   neglijare  scoala_spec    tulb_cond 
 0.032190189  0.052838615  0.046793765 
exp(confint.default(mod_pois))                                     # CI of IRR
                 2.5 %    97.5 %
(Intercept)  2.0886967 2.7534751
expunere_tox 1.0801146 1.2232680
varsta_inst  1.0085755 1.0234829
schimb_dom   1.0682186 1.2164019
boli         1.0123078 1.2019324
varsta       1.0147779 1.0335970
nr_frati     1.0017585 1.0223968
genm         0.8909740 0.9956853
comunit      1.0189977 1.1649136
intarziere   1.0440182 1.2394789
TCC          1.0580181 1.4672979
neglijare    1.0591264 1.1853763
scoala_spec  0.7072866 0.9149827
tulb_cond    1.0719500 1.2555693
## Test for overdispersion
# Z-Score Test (assumtions: The data set on which the test is used is large. z is t-distributed.), ns = overdispersed
mu <-predict(mod_pois, type="response")
z <- ((Data_glm$CYW - mu)^2 - Data_glm$CYW)/ (mu * sqrt(2))
summary(zscore <- lm(z ~ 1))         # the hypothesis of no overdispersion is rejected (i.e., that it is likely that real overdispersion exists in the data)

Call:
lm(formula = z ~ 1)

Residuals:
    Min      1Q  Median      3Q     Max 
-1.7800 -1.5225 -0.7561  0.8374 19.2377 

Coefficients:
            Estimate Std. Error t value            Pr(>|t|)    
(Intercept)  1.02212    0.06267   16.31 <0.0000000000000002 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 2.238 on 1274 degrees of freedom
# Lagrange Multiplier Test, ns = overdispersed
obs <- nrow(Data_glm)   # continue from Table 3.2
mmu <- mean(mu); nybar <- obs*mmu; musq <- mu*mu
mu2 <- mean(musq)*obs
chival <- (mu2 - nybar)^2/(2*mu2); chival 
[1] 9217.919
pchisq(chival,1,lower.tail = FALSE)      # the hypothesis of no overdispersion is again rejected
[1] 0
# Many statisticians argue that robust standard errors should be the default standard errors for all count response regression models.
# A robust variance estimator adjusts standard errors for correlation in the data. That is, robust standard
# errors should be used when the data are not independent, perhaps gathered
# over different households, hospitals, schools, cities, litters, and so forth.
# Robust variance estimators have also been referred to as sandwich variance
# estimators or heteroskedastic robust estimators.
# Lack of fit in a GLM for count data can result either from a mis-specified model for the systematic
# component (omitted or unmeasured predictors, nonlinear relations, etc.) or from failure of the
# Poisson mean = variance assumption. Thus, use of these methods requires some high degree of
# confidence that the systematic part of the model has been correctly specified, so that any lack of fit
# can be attributed to overdispersion.
# One way of dealing with this is to base inference on so-called sandwich covariance estimators
# that are robust against some types of model mis-specification.
require("sandwich")                      # over-dispersion is present in this data set, we re-compute the Wald tests using sandwich standard errors
sandw_coefse <- lmtest::coeftest(mod_pois, vcov = sandwich)       # sandwich-adjusted Poisson
sandw_coefse

z test of coefficients:

               Estimate Std. Error z value              Pr(>|z|)    
(Intercept)   0.8747020  0.0964586  9.0682 < 0.00000000000000022 ***
expunere_tox  0.1392966  0.0424894  3.2784             0.0010440 ** 
varsta_inst   0.0158752  0.0056217  2.8239             0.0047442 ** 
schimb_dom    0.1309448  0.0430544  3.0414             0.0023549 ** 
boli          0.0980816  0.0596660  1.6438             0.1002080    
varsta        0.0238574  0.0063528  3.7554             0.0001731 ***
nr_frati      0.0119533  0.0072897  1.6398             0.1010544    
genm         -0.0598821  0.0387525 -1.5452             0.1222875    
comunit       0.0857332  0.0437573  1.9593             0.0500791 .  
intarziere    0.1288840  0.0616466  2.0907             0.0365559 *  
TCC           0.2199100  0.1162442  1.8918             0.0585185 .  
neglijare     0.1137524  0.0400693  2.8389             0.0045271 ** 
scoala_spec  -0.2175847  0.1003456 -2.1684             0.0301319 *  
tulb_cond     0.1485342  0.0554158  2.6804             0.0073543 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
sandw_ci <- lmtest::coefci(mod_pois, vcov = sandwich)
exp(sandw_ci)                             # CI of sandwich-adjusted IRR    
                 2.5 %    97.5 %
(Intercept)  1.9850552 2.8972365
expunere_tox 1.0576177 1.2492885
varsta_inst  1.0048687 1.0272585
schimb_dom   1.0476607 1.2402709
boli         0.9813153 1.2398926
varsta       1.0114714 1.0369759
nr_frati     0.9976686 1.0265882
genm         0.8729861 1.0162013
comunit      0.9999704 1.1870794
intarziere   1.0080914 1.2836521
TCC          0.9921069 1.5647787
neglijare    1.0358455 1.2120180
scoala_spec  0.6608301 0.9793063
tulb_cond    1.0407289 1.2932354
library(effects)
plot(allEffects(mod_pois), band.colors = "blue", lwd = 3, ylab = "ACE Score", main = "", rows=5, cols=3)  # plot meta-array: rows=5, cols=3

9 Quasi-Poisson Regression Model

## GLM - Quasi Poisson
# In R, Poisson models with scaled standard errors are called quasipoisson:
# A Pearson dispersion in excess of 1.0 indicates likely Poisson model
# overdispersion. Whether the overdispersion is significant depends on
# (1) the value of the dispersion statistic, (2) the number of observations
# in the model, and (3) the structure of the data; for example, if the data
# are highly unbalanced. 
# mod_qpois <- glm(CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + boli + neglijare + varsta + 
#                       nr_frati + TCC, family = quasipoisson, data = Data_glm)                           # first decent model
mod_qpois <- glm(CYW ~ expunere_tox + varsta_inst +  schimb_dom + boli + varsta +
                      nr_frati + gen + comunit + intarziere + TCC + neglijare + 
                      scoala_spec + tulb_cond, family = quasipoisson, data = Data_glm)                  # best possible model 
summary(mod_qpois)  # Dispersion parameter for quasipoisson family taken to be 1.976578 -- is > 1 

Call:
glm(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
    boli + varsta + nr_frati + gen + comunit + intarziere + TCC + 
    neglijare + scoala_spec + tulb_cond, family = quasipoisson, 
    data = Data_glm)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-4.1038  -1.1869  -0.1411   0.8357   4.4377  

Coefficients:
              Estimate Std. Error t value             Pr(>|t|)    
(Intercept)   0.874702   0.098752   8.858 < 0.0000000000000002 ***
expunere_tox  0.139297   0.044479   3.132             0.001784 ** 
varsta_inst   0.015875   0.005244   3.028             0.002523 ** 
schimb_dom    0.130945   0.046425   2.821             0.004881 ** 
boli          0.098082   0.061361   1.598             0.110236    
varsta        0.023857   0.006567   3.633             0.000293 ***
nr_frati      0.011953   0.007288   1.640             0.101261    
genm         -0.059882   0.039711  -1.508             0.131853    
comunit       0.085733   0.047827   1.793             0.073319 .  
intarziere    0.128884   0.061331   2.101             0.035831 *  
TCC           0.219910   0.116872   1.882             0.060151 .  
neglijare     0.113752   0.040247   2.826             0.004793 ** 
scoala_spec  -0.217585   0.092014  -2.365             0.018219 *  
tulb_cond     0.148534   0.056505   2.629             0.008692 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for quasipoisson family taken to be 1.96252)

    Null deviance: 2666.1  on 1106  degrees of freedom
Residual deviance: 2416.2  on 1093  degrees of freedom
  (168 observations deleted due to missingness)
AIC: NA

Number of Fisher Scoring iterations: 5
                    #  over-dispersion can be confirmed by comparison of the log-likelihoods of the Poisson and negative binomial model
# tidy(mod_qpois) %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "qpois.xlsx")
exp(coef(mod_qpois))                                                # IRR
 (Intercept) expunere_tox  varsta_inst   schimb_dom         boli       varsta     nr_frati         genm      comunit   intarziere          TCC 
   2.3981606    1.1494650    1.0160019    1.1399049    1.1030528    1.0241442    1.0120251    0.9418756    1.0895156    1.1375582    1.2459646 
   neglijare  scoala_spec    tulb_cond 
   1.1204746    0.8044595    1.1601325 
exp(coef(mod_qpois))*sqrt(diag(vcov(mod_qpois)))                    # delta method
 (Intercept) expunere_tox  varsta_inst   schimb_dom         boli       varsta     nr_frati         genm      comunit   intarziere          TCC 
 0.236823632  0.051127006  0.005327538  0.052920383  0.067684638  0.006725471  0.007375576  0.037402384  0.052108388  0.069767902  0.145618239 
   neglijare  scoala_spec    tulb_cond 
 0.045095227  0.074021602  0.065553372 
exp(confint.default(mod_qpois))                                     # CI of IRR
                 2.5 %    97.5 %
(Intercept)  1.9761515 2.9102902
expunere_tox 1.0535015 1.2541697
varsta_inst  1.0056136 1.0264975
schimb_dom   1.0407618 1.2484923
boli         0.9780602 1.2440191
varsta       1.0110470 1.0374111
nr_frati     0.9976720 1.0265847
genm         0.8713485 1.0181112
comunit      0.9920257 1.1965861
intarziere   1.0087146 1.2828589
TCC          0.9908871 1.5667050
neglijare    1.0354857 1.2124391
scoala_spec  0.6717097 0.9634445
tulb_cond    1.0385093 1.2959994
## Test if there is overdispersion in BIC step selected model (best fitting) --- overdispersion still present
# mod_qpoisBIC <- glm(CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + neglijare +
#                           varsta + boli + comunit, family = quasipoisson, data = Data_glm)                  # best BIC
# summary(mod_qpoisBIC)

10 NB Regression Model

# Step selection NB Regression
step_glm_nb_full <- MASS::glm.nb(CYW ~ ., data = Data_lm_step)
cat("#### NB GLM - Bidirectional step by AIC")
#### NB GLM - Bidirectional step by AIC
mod_nb_AIC <- MASS::stepAIC(step_glm_nb_full, direction = "both", trace = FALSE)
summary(mod_nb_AIC)

Call:
MASS::glm.nb(formula = CYW ~ varsta + gen + varsta_inst + nr_frati + 
    expunere_tox + boli + TCC + intarziere + tras_dez + neglijare + 
    temperam + scoala_spec + schimb_dom + comunit, data = Data_lm_step, 
    init.theta = 4.675848344, link = log)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-3.1972  -0.8322  -0.0907   0.6144   2.1442  

Coefficients:
              Estimate Std. Error z value           Pr(>|z|)    
(Intercept)   0.792959   0.106074   7.476 0.0000000000000769 ***
varsta        0.021459   0.007186   2.986           0.002824 ** 
genm         -0.064499   0.043807  -1.472           0.140931    
varsta_inst   0.023296   0.006062   3.843           0.000122 ***
nr_frati      0.015695   0.008152   1.925           0.054193 .  
expunere_tox  0.149244   0.049391   3.022           0.002514 ** 
boli          0.132322   0.068679   1.927           0.054022 .  
TCC           0.243798   0.145721   1.673           0.094317 .  
intarziere    0.116584   0.068857   1.693           0.090432 .  
tras_dez      0.142578   0.085173   1.674           0.094134 .  
neglijare     0.131975   0.044996   2.933           0.003357 ** 
temperam      0.096056   0.063181   1.520           0.128426    
scoala_spec  -0.206528   0.104283  -1.980           0.047651 *  
schimb_dom    0.133804   0.052676   2.540           0.011081 *  
comunit       0.091172   0.054269   1.680           0.092958 .  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for Negative Binomial(4.6758) family taken to be 1)

    Null deviance: 1231.5  on 929  degrees of freedom
Residual deviance: 1106.3  on 915  degrees of freedom
AIC: 4556.5

Number of Fisher Scoring iterations: 1

              Theta:  4.676 
          Std. Err.:  0.497 

 2 x log-likelihood:  -4524.514 
cat("#### NB GLM - Bidirectional step by BIC")
#### NB GLM - Bidirectional step by BIC
mod_nb_BIC <- MASS::stepAIC(step_glm_nb_full, direction = "both", k = log(nrow(Data_lm_step)), trace = FALSE)
summary(mod_nb_BIC)

Call:
MASS::glm.nb(formula = CYW ~ varsta + varsta_inst + expunere_tox + 
    tras_dez + neglijare + schimb_dom, data = Data_lm_step, init.theta = 4.368457033, 
    link = log)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-3.0604  -0.8216  -0.1146   0.5956   2.2701  

Coefficients:
             Estimate Std. Error z value             Pr(>|z|)    
(Intercept)  0.877921   0.099373   8.835 < 0.0000000000000002 ***
varsta       0.020118   0.007127   2.823             0.004761 ** 
varsta_inst  0.023193   0.006071   3.821             0.000133 ***
expunere_tox 0.210956   0.047733   4.420           0.00000989 ***
tras_dez     0.235963   0.079463   2.969             0.002983 ** 
neglijare    0.135866   0.045429   2.991             0.002783 ** 
schimb_dom   0.155043   0.052708   2.942             0.003266 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for Negative Binomial(4.3685) family taken to be 1)

    Null deviance: 1198.2  on 929  degrees of freedom
Residual deviance: 1102.6  on 923  degrees of freedom
AIC: 4566.2

Number of Fisher Scoring iterations: 1

              Theta:  4.368 
          Std. Err.:  0.448 

 2 x log-likelihood:  -4550.245 
# mod_nb <- MASS::glm.nb(CYW ~ varsta + varsta_inst  +  expunere_tox + TCC  +  intarziere + 
#                        neglijare + temperam  + scoala_spec  + schimb_dom, data = Data_glm)       # a good model on Poisson and NB          
# summary(mod_nb)             
## Negative Binomial GLM - a good model
cat("#### Test a good NB model")
#### Test a good NB model
# NB2
mod_nb2 <- MASS::glm.nb(CYW ~ expunere_tox + varsta_inst + schimb_dom + 
                              varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond,
                              data = Data_glm)
summary(mod_nb2) # tidy(mod_nb2) %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "mod_nb2.xlsx") 

Call:
MASS::glm.nb(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
    varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond, 
    data = Data_glm, init.theta = 3.853972983, link = log)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-3.0243  -0.8670  -0.1277   0.5529   2.6456  

Coefficients:
              Estimate Std. Error z value             Pr(>|z|)    
(Intercept)   0.858096   0.093796   9.149 < 0.0000000000000002 ***
expunere_tox  0.182474   0.046301   3.941            0.0000811 ***
varsta_inst   0.015630   0.005415   2.886              0.00390 ** 
schimb_dom    0.140069   0.049631   2.822              0.00477 ** 
varsta        0.026321   0.006679   3.941            0.0000812 ***
intarziere    0.178918   0.065683   2.724              0.00645 ** 
TCC           0.296167   0.136563   2.169              0.03010 *  
neglijare     0.129852   0.041878   3.101              0.00193 ** 
scoala_spec  -0.231707   0.094070  -2.463              0.01377 *  
tulb_cond     0.155874   0.060333   2.584              0.00978 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for Negative Binomial(3.854) family taken to be 1)

    Null deviance: 1483.7  on 1157  degrees of freedom
Residual deviance: 1370.0  on 1148  degrees of freedom
  (117 observations deleted due to missingness)
AIC: 5736.4

Number of Fisher Scoring iterations: 1

              Theta:  3.854 
          Std. Err.:  0.333 

 2 x log-likelihood:  -5714.372 
exp(coef(mod_nb2)); # exp(coef(mod_nb2))*sqrt(diag(vcov(mod_nb2)))    # adjust SE???, not here
 (Intercept) expunere_tox  varsta_inst   schimb_dom       varsta   intarziere          TCC    neglijare  scoala_spec    tulb_cond 
   2.3586654    1.2001827    1.0157523    1.1503535    1.0266700    1.1959221    1.3446951    1.1386603    0.7931786    1.1686793 
exp(confint.default(mod_nb2))
                 2.5 %    97.5 %
(Intercept)  1.9625789 2.8346899
expunere_tox 1.0960647 1.3141911
varsta_inst  1.0050279 1.0265911
schimb_dom   1.0437240 1.2678765
varsta       1.0133183 1.0401977
intarziere   1.0514624 1.3602291
TCC          1.0289180 1.7573847
neglijare    1.0489332 1.2360627
scoala_spec  0.6596274 0.9537693
tulb_cond    1.0383405 1.3153791
mod_pois_new <- glm(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
                                    varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond,
                                    family = poisson, data = Data_glm)
summary(mod_pois_new) # tidy(mod_pois_new) %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "mod_pois_new.xlsx") 

Call:
glm(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + 
    varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond, 
    family = poisson, data = Data_glm)

Deviance Residuals: 
    Min       1Q   Median       3Q      Max  
-4.1411  -1.2151  -0.1879   0.8585   4.5745  

Coefficients:
              Estimate Std. Error z value             Pr(>|z|)    
(Intercept)   0.869963   0.064547  13.478 < 0.0000000000000002 ***
expunere_tox  0.176596   0.030316   5.825         0.0000000057 ***
varsta_inst   0.015592   0.003594   4.338         0.0000143585 ***
schimb_dom    0.144852   0.032228   4.495         0.0000069713 ***
varsta        0.025715   0.004551   5.651         0.0000000160 ***
intarziere    0.165831   0.042378   3.913         0.0000911183 ***
TCC           0.269712   0.081106   3.325             0.000883 ***
neglijare     0.129210   0.028203   4.582         0.0000046162 ***
scoala_spec  -0.220913   0.063488  -3.480             0.000502 ***
tulb_cond     0.151754   0.038747   3.916         0.0000898524 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for poisson family taken to be 1)

    Null deviance: 2833.2  on 1157  degrees of freedom
Residual deviance: 2582.8  on 1148  degrees of freedom
  (117 observations deleted due to missingness)
AIC: 6094

Number of Fisher Scoring iterations: 5
exp(coef(mod_pois_new))
 (Intercept) expunere_tox  varsta_inst   schimb_dom       varsta   intarziere          TCC    neglijare  scoala_spec    tulb_cond 
   2.3868231    1.1931493    1.0157145    1.1558685    1.0260483    1.1803735    1.3095875    1.1379295    0.8017862    1.1638733 
exp(confint.default(mod_pois_new))
                 2.5 %    97.5 %
(Intercept)  2.1031870 2.7087105
expunere_tox 1.1243206 1.2661915
varsta_inst  1.0085846 1.0228947
schimb_dom   1.0851148 1.2312357
varsta       1.0169375 1.0352406
intarziere   1.0862928 1.2826022
TCC          1.1171117 1.5352265
neglijare    1.0767362 1.2026006
scoala_spec  0.7079749 0.9080281
tulb_cond    1.0787575 1.2557050
lmtest::lrtest(mod_pois_new, mod_nb2)    #  likelihood ratio test
Likelihood ratio test

Model 1: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond
Model 2: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond
  #Df  LogLik Df  Chisq            Pr(>Chisq)    
1  10 -3037.0                                    
2  11 -2857.2  1 359.65 < 0.00000000000000022 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
## ATTENTION AT DISPERSION PARAM -- nbinomial has alpha (direct rel), glm.nb has theta (indirect rel) 
# ATTENTION -- when using na.omit do it on dataset that has only the predictors and outcome from the model to not exclude other cases
# NB2 with alpha dispersion param instead of theta
    # mod_nb2 <- msme::nbinomial(CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + boli + neglijare + varsta +
    #                       nr_frati + TCC, data = na.omit(Data_glm))
    # summary(mod_nb2)
## NB1
# library(gamlss)
# mod_nb1 <- gamlss::gamlss(formula = CYW ~ expunere_tox + varsta_inst + tras_dez + schimb_dom + boli + neglijare + varsta +
#                       nr_frati + TCC, family = NBI, data = na.omit(Data_glm))
# summary(mod_nb1); plot(mod_nb1)
# lmtest::lrtest(mod_nb1, mod_nb2)    #  likelihood ratio test
# https://data.library.virginia.edu/getting-started-with-negative-binomial-regression-modeling/
# install.packages("countreg", repos="http://R-Forge.R-project.org")       # Zeileis and Kleiber, 2014  not on CRAN
library(countreg)
countreg::rootogram(mod_pois_new, ylim = c(-7, 18), main = "Poisson")     # rootogram on the fitted model objects (not possible in vcd::rootogram)

countreg::rootogram(mod_nb2, ylim = c(-7, 18), main = "Negative Binomial") 

# For the negativebinomial the underfitting of the count for 0 and overfitting for counts 1–2 is characteristic of data with excess zeros.

11 ZIP and Hurdle Regression Models

# Data without NA for functions that dont exclude NAs by default
Data_glm_nona <- 
  Data_glm %>%
  dplyr::select(CYW, expunere_tox, varsta_inst, schimb_dom, varsta,
          intarziere, TCC, neglijare, scoala_spec, tulb_cond) %>%
  drop_na()
# Formlula for 9 prector best model (Poisson and NB)
formula_model <- as.formula("CYW ~ expunere_tox + varsta_inst + schimb_dom + 
                                   varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond")
library(pscl)
mod_hurd_pois <- hurdle(formula_model, data = Data_glm , dist = "poisson")
mod_hurd_nb <- hurdle(formula_model, data = Data_glm , dist = "negbin")
mod_zip <- zeroinfl(formula_model, data = Data_glm , dist = "poisson")
mod_znb <- zeroinfl(formula_model, data = Data_glm , dist = "negbin")
mod_zpig <- gamlss::gamlss(formula_model, data = Data_glm_nona , family = "ZIPIG")
GAMLSS-RS iteration 1: Global Deviance = 5691.461 
GAMLSS-RS iteration 2: Global Deviance = 5702.617 
GAMLSS-RS iteration 3: Global Deviance = 5707.807 
GAMLSS-RS iteration 4: Global Deviance = 5709.887 
GAMLSS-RS iteration 5: Global Deviance = 5710.591 
GAMLSS-RS iteration 6: Global Deviance = 5710.963 
GAMLSS-RS iteration 7: Global Deviance = 5711.088 
GAMLSS-RS iteration 8: Global Deviance = 5711.162 
GAMLSS-RS iteration 9: Global Deviance = 5711.193 
GAMLSS-RS iteration 10: Global Deviance = 5711.206 
GAMLSS-RS iteration 11: Global Deviance = 5711.213 
GAMLSS-RS iteration 12: Global Deviance = 5711.212 
GAMLSS-RS iteration 13: Global Deviance = 5711.215 
GAMLSS-RS iteration 14: Global Deviance = 5711.213 
GAMLSS-RS iteration 15: Global Deviance = 5711.214 
GAMLSS-RS iteration 16: Global Deviance = 5711.214 
countreg::rootogram(mod_hurd_pois, max = 50, main = "Hurdle Poisson")

countreg::rootogram(mod_hurd_nb, max = 50, main = "Hurdle Negative Binomial")

countreg::rootogram(mod_zip, max = 50, main = "Zero-inflated Poisson")

countreg::rootogram(mod_znb, max = 50, main = "Zero-inflated Negative Binomial")

vcdExtra::LRstats(mod_pois_new, mod_nb2, mod_hurd_pois, mod_hurd_nb, mod_zip, mod_znb, mod_zpig, sortby = "AIC") #%>% xlsx::write.xlsx(., file = "LRtest.xlsx")
Likelihood summary table:
                 AIC    BIC LR Chisq   Df            Pr(>Chisq)    
mod_pois_new  6094.0 6144.6   6074.0 1148 < 0.00000000000000022 ***
mod_hurd_pois 5846.2 5947.3   5806.2 1138 < 0.00000000000000022 ***
mod_zip       5845.4 5946.5   5805.4 1138 < 0.00000000000000022 ***
mod_nb2       5736.4 5792.0   5714.4 1148 < 0.00000000000000022 ***
mod_zpig      5735.2 5795.9   5711.2 1146 < 0.00000000000000022 ***
mod_hurd_nb   5696.6 5802.7   5654.6 1137 < 0.00000000000000022 ***
mod_znb       5694.5 5800.7   5652.5 1137 < 0.00000000000000022 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
lmtest::lrtest(mod_hurd_nb, mod_znb)       
Likelihood ratio test

Model 1: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond
Model 2: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond
  #Df  LogLik Df  Chisq            Pr(>Chisq)    
1  21 -2827.3                                    
2  21 -2826.3  0 2.0362 < 0.00000000000000022 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# Very Best Model - Zero-inflated Negative Binomial
summary(mod_znb)  

Call:
zeroinfl(formula = formula_model, data = Data_glm, dist = "negbin")

Pearson residuals:
    Min      1Q  Median      3Q     Max 
-1.8238 -0.7583 -0.1148  0.6524  4.0239 

Count model coefficients (negbin with log link):
              Estimate Std. Error z value             Pr(>|z|)    
(Intercept)   0.974121   0.091627  10.631 < 0.0000000000000002 ***
expunere_tox  0.181976   0.042720   4.260            0.0000205 ***
varsta_inst   0.013161   0.005181   2.540              0.01108 *  
schimb_dom    0.100949   0.045208   2.233              0.02555 *  
varsta        0.026730   0.006530   4.094            0.0000425 ***
intarziere    0.182133   0.059343   3.069              0.00215 ** 
TCC           0.236223   0.118234   1.998              0.04572 *  
neglijare     0.060959   0.039219   1.554              0.12011    
scoala_spec  -0.193458   0.088012  -2.198              0.02794 *  
tulb_cond     0.160337   0.054983   2.916              0.00354 ** 
Log(theta)    1.848631   0.123964  14.913 < 0.0000000000000002 ***

Zero-inflation model coefficients (binomial with logit link):
               Estimate Std. Error z value Pr(>|z|)   
(Intercept)   -1.869657   0.785924  -2.379  0.01736 * 
expunere_tox   0.052760   0.382341   0.138  0.89025   
varsta_inst   -0.046119   0.043698  -1.055  0.29125   
schimb_dom    -1.084011   0.639696  -1.695  0.09016 . 
varsta         0.001132   0.057044   0.020  0.98417   
intarziere     0.032617   0.516929   0.063  0.94969   
TCC          -13.943212 904.443543  -0.015  0.98770   
neglijare     -1.349598   0.447732  -3.014  0.00258 **
scoala_spec    0.477429   0.576483   0.828  0.40757   
tulb_cond      0.086141   0.447328   0.193  0.84730   
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 

Theta = 6.3511 
Number of iterations in BFGS optimization: 31 
Log-likelihood: -2826 on 21 Df
exp(coef(mod_znb)) #  %>% as.data.frame() %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "mod_znb.xlsx")
 count_(Intercept) count_expunere_tox  count_varsta_inst   count_schimb_dom       count_varsta   count_intarziere          count_TCC 
   2.6488366843609    1.1995853584739    1.0132478388526    1.1062199094659    1.0270903997878    1.1997741031525    1.2664572338295 
   count_neglijare  count_scoala_spec    count_tulb_cond   zero_(Intercept)  zero_expunere_tox   zero_varsta_inst    zero_schimb_dom 
   1.0628551671389    0.8241040929909    1.1739061865554    0.1541765294175    1.0541760856719    0.9549287565637    0.3382360724541 
       zero_varsta    zero_intarziere           zero_TCC     zero_neglijare   zero_scoala_spec     zero_tulb_cond 
   1.0011324796663    1.0331549499947    0.0000008801157    0.2593444676465    1.6119250943723    1.0899595920461 
exp(confint.default(mod_znb)) #  %>% as.data.frame() %>% mutate_if(is.numeric, round, 2)  %>% xlsx::write.xlsx(., file = "mod_znb.xlsx")
                        2.5 %    97.5 %
count_(Intercept)  2.21341150 3.1699193
count_expunere_tox 1.10323438 1.3043511
count_varsta_inst  1.00301080 1.0235894
count_schimb_dom   1.01241962 1.2087108
count_varsta       1.01402968 1.0403193
count_intarziere   1.06803869 1.3477582
count_TCC          1.00449834 1.5967313
count_neglijare    0.98421686 1.1477766
count_scoala_spec  0.69353203 0.9792591
count_tulb_cond    1.05397803 1.3074805
zero_(Intercept)   0.03303989 0.7194456
zero_expunere_tox  0.49826880 2.2302966
zero_varsta_inst   0.87654607 1.0403206
zero_schimb_dom    0.09654044 1.1850333
zero_varsta        0.89523247 1.1195598
zero_intarziere    0.37510626 2.8456180
zero_TCC           0.00000000       Inf
zero_neglijare     0.10783677 0.6237163
zero_scoala_spec   0.52076440 4.9894012
zero_tulb_cond     0.45357021 2.6192459
pred <- round(colSums(predict(mod_znb, type="prob")[,1:18])) # expected counts
obs <- table(Data_glm$CYW)[1:18]                           # observed counts        
rbind(obs, pred)
       0   1   2   3   4   5   6   7  8  9 10 11 12 13 14 15 17 18
obs  119 128 144 142 121 130 147 105 81 68 34 31 14  6  2  1  1  1
pred 101  87 133 155 153 135 111  85 63 45 31 21 14  9  6  4  3  2
# Other Tests
mod_znb2 <- zeroinfl(CYW ~ expunere_tox + varsta_inst + schimb_dom + 
                           varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond |  neglijare , data = Data_glm , dist = "negbin")
summary(mod_znb2)

Call:
zeroinfl(formula = CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + TCC + neglijare + scoala_spec + tulb_cond | 
    neglijare, data = Data_glm, dist = "negbin")

Pearson residuals:
    Min      1Q  Median      3Q     Max 
-1.8155 -0.7717 -0.1139  0.6484  3.9855 

Count model coefficients (negbin with log link):
              Estimate Std. Error z value             Pr(>|z|)    
(Intercept)   0.964337   0.089380  10.789 < 0.0000000000000002 ***
expunere_tox  0.181433   0.042378   4.281            0.0000186 ***
varsta_inst   0.014176   0.005077   2.792              0.00523 ** 
schimb_dom    0.113385   0.045010   2.519              0.01177 *  
varsta        0.026448   0.006335   4.175            0.0000298 ***
intarziere    0.180370   0.059126   3.051              0.00228 ** 
TCC           0.235590   0.118591   1.987              0.04697 *  
neglijare     0.064806   0.039276   1.650              0.09894 .  
scoala_spec  -0.204271   0.087691  -2.329              0.01984 *  
tulb_cond     0.161198   0.054905   2.936              0.00333 ** 
Log(theta)    1.835746   0.123039  14.920 < 0.0000000000000002 ***

Zero-inflation model coefficients (binomial with logit link):
            Estimate Std. Error z value            Pr(>|z|)    
(Intercept)  -2.3071     0.1855 -12.439 <0.0000000000000002 ***
neglijare    -1.3395     0.4244  -3.156              0.0016 ** 
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 

Theta = 6.2698 
Number of iterations in BFGS optimization: 25 
Log-likelihood: -2831 on 13 Df
lmtest::lrtest(mod_znb, mod_znb2)   # isnt better if zero counts predicted just by neglijare
Likelihood ratio test

Model 1: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond
Model 2: CYW ~ expunere_tox + varsta_inst + schimb_dom + varsta + intarziere + 
    TCC + neglijare + scoala_spec + tulb_cond | neglijare
  #Df  LogLik Df  Chisq Pr(>Chisq)
1  21 -2826.3                     
2  13 -2831.3 -8 10.021     0.2636
# NB2 again -- nobs and results are the same as mod_nb2  --- THE DATA IS UNDERDISPERED HERE: Disp = 0.933
mod_nbnb <- msme::nbinomial(formula_model, data = Data_glm_nona)
summary(mod_nbnb)

# HNB
mod_hnb <- msme::nbinomial(formula1 = formula_model, 
                            formula2 =~ expunere_tox + varsta_inst + schimb_dom + varsta +          # modelling predictors of dispersion
                                        intarziere + TCC + neglijare + scoala_spec + tulb_cond,
                            family = "negBinomial", mean.link = "log", scale.link = "log_s",  data = Data_glm_nona)
summary(mod_hnb)
exp(coef(mod_hnb))


# NB1 -- seems a little better than NB2 but not sure
mod_nb1 <- gamlss::gamlss(formula = formula_model, family = NBI, data = Data_glm_nona)
summary(mod_nb1); # plot(mod_nb1)
countreg::rootogram(mod_nb1, max = 50, main = "NB1")
vcdExtra::LRstats(mod_nb2, mod_nb1)


12 Session Info

R version 3.5.2 (2018-12-20)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows >= 8 x64 (build 9200)

Matrix products: default

locale:
[1] LC_COLLATE=Romanian_Romania.1250  LC_CTYPE=Romanian_Romania.1250    LC_MONETARY=Romanian_Romania.1250 LC_NUMERIC=C                     
[5] LC_TIME=Romanian_Romania.1250    

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

other attached packages:
 [1] pscl_1.5.2                 countreg_0.2-1             MASS_7.3-51.1              effects_4.1-0              sandwich_2.5-1            
 [6] olsrr_0.5.2                gvlma_1.0.0.2              bindrcpp_0.2.2             car_3.0-2                  carData_3.0-2             
[11] RColorBrewer_1.1-2         corrplot_0.84              GGally_1.4.0               Hmisc_4.1-1                Formula_1.2-3             
[16] survival_2.43-3            lattice_0.20-38            rio_0.5.16                 scales_1.0.0               ggpubr_0.2                
[21] magrittr_1.5               PerformanceAnalytics_1.5.2 xts_0.11-2                 zoo_1.8-4                  tadaatoolbox_0.16.1       
[26] summarytools_0.9.3         broom_0.5.1                psycho_0.4.0               psych_1.8.10               plyr_1.8.4                
[31] forcats_0.3.0              stringr_1.3.1              dplyr_0.7.8                purrr_0.2.5                readr_1.3.0               
[36] tidyr_0.8.2                tibble_1.4.2               ggplot2_3.1.0              tidyverse_1.2.1            papaja_0.1.0.9842         
[41] kableExtra_1.0.1           knitr_1.21                 pacman_0.5.0              

loaded via a namespace (and not attached):
  [1] statnet.common_4.1.4       vcd_1.4-4                  corpcor_1.6.9              class_7.3-14               formula.tools_1.7.1       
  [6] assertive.properties_0.0-4 ps_1.2.1                   d3Network_0.5.2.1          relimp_1.0-5               lmtest_0.9-36             
 [11] crayon_1.3.4               nonnest2_0.5-2             nlme_3.1-137               backports_1.1.3            infer_0.4.0.1             
 [16] ggcorrplot_0.1.2           ellipse_0.4.1              colourpicker_1.0           huge_1.2.7                 rlang_0.3.0.1             
 [21] readxl_1.1.0               SparseM_1.77               nloptr_1.2.1               callr_3.1.1                ca_0.71                   
 [26] rjson_0.2.20               glue_1.3.1                 loo_2.0.0                  rstan_2.18.2               parallel_3.5.2            
 [31] processx_3.2.1             moderndive_0.2.0           tcltk_3.5.2                mcmc_0.9-5                 haven_2.1.0               
 [36] tidyselect_0.2.5           assertive.types_0.0-3      blavaan_0.3-4              xtable_1.8-3               MatrixModels_0.4-1        
 [41] ggm_2.3                    evaluate_0.12              cli_1.0.1                  rstudioapi_0.8             miniUI_0.1.1.1            
 [46] whisker_0.3-2              rpart_4.1-13               shinystan_2.5.0            shiny_1.2.0                xfun_0.4                  
 [51] inline_0.3.15              pkgbuild_1.0.2             cluster_2.0.7-1            nFactors_2.3.3             expm_0.999-3              
 [56] quantreg_5.38              assertive.sets_0.0-3       threejs_0.3.1              png_0.1-7                  reshape_0.8.8             
 [61] ipred_0.9-8                withr_2.1.2                bitops_1.0-6               cellranger_1.1.0           assertive.base_0.0-7      
 [66] survey_3.35                assertive.models_0.0-2     coda_0.19-2                pillar_1.3.1               multcomp_1.4-10           
 [71] assertive.matrices_0.0-2   msme_0.5.3                 assertive.reflection_0.0-4 BDgraph_2.53               gamlss.data_5.1-4         
 [76] pbivnorm_0.6.0             generics_0.0.2             dygraphs_1.1.1.6           gh_1.0.1                   nortest_1.0-4             
 [81] lava_1.6.4                 tools_3.5.2                foreign_0.8-71             munsell_0.5.0              emmeans_1.3.1             
 [86] compiler_3.5.2             abind_1.4-5                httpuv_1.4.5               manipulate_1.0.1           assertive.data.uk_0.0-2   
 [91] DescTools_0.99.27          prodlim_2018.04.18         gridExtra_2.3              MCMCpack_1.4-4             ppcor_1.1                 
 [96] assertive.data.us_0.0-2    later_0.7.5                recipes_0.1.4              vcdExtra_0.7-1             jsonlite_1.6              
[101] arm_1.10-1                 pbapply_1.3-4              estimability_1.3           lazyeval_0.2.1             promises_1.0.1            
[106] latticeExtra_0.6-28        goftest_1.1-1              sna_2.4                    checkmate_1.8.5            rapportools_1.0           
[111] rmarkdown_1.11             openxlsx_4.1.0             webshot_0.5.1              pander_0.6.3               igraph_1.2.2              
[116] numDeriv_2016.8-1          rsconnect_0.8.13           yaml_2.2.0                 bayesplot_1.6.0            htmltools_0.3.6           
[121] rstantools_1.5.1           lavaan_0.6-3               quadprog_1.5-5             viridisLite_0.3.0          digest_0.6.18             
[126] assertthat_0.2.1           mime_0.6                   MuMIn_1.42.1               assertive.code_0.0-3       assertive.strings_0.0-3   
[131] data.table_1.12.2          gnm_1.1-0                  shinythemes_1.1.2          splines_3.5.2              labeling_0.3              
[136] RCurl_1.95-4.11            assertive.numbers_0.0-2    hms_0.4.2                  modelr_0.1.2               colorspace_1.3-2          
[141] base64enc_0.1-3            mnormt_1.5-5               operator.tools_1.6.3       assertive.files_0.0-2      nnet_7.3-12               
[146] Rcpp_1.0.1                 mvtnorm_1.0-10             matrixcalc_1.0-3           R6_2.4.0                   grid_3.5.2                
[151] ggridges_0.5.1             acepack_1.4.1              StanHeaders_2.18.0-1       zip_1.0.0                  BayesFactor_0.9.12-4.2    
[156] qvcalc_1.0.0               curl_3.2                   ggsignif_0.4.0             pryr_0.1.4                 minqa_1.2.4               
[161] mi_1.0                     snakecase_0.9.2            qgraph_1.5                 Matrix_1.2-15              assertive.data_0.0-3      
[166] glasso_1.10                TH.data_1.0-9              pixiedust_0.8.6            gower_0.1.2                htmlwidgets_1.3           
[171] markdown_0.9               network_1.13.0.1           crosstalk_1.0.0            gamlss_5.1-4               COUNT_1.3.4               
[176] rvest_0.3.2                rstanarm_2.18.2            htmlTable_1.12             codetools_0.2-15           matrixStats_0.54.0        
[181] lubridate_1.7.4            gtools_3.8.1               prettyunits_1.0.2          gtable_0.2.0               stats4_3.5.2              
[186] httr_1.4.0                 stringi_1.2.4              reshape2_1.4.3             viridis_0.5.1              fdrtool_1.2.15            
[191] magick_2.0                 timeDate_3043.102          DT_0.5                     xml2_1.2.0                 assertive_0.3-5           
[196] assertive.datetimes_0.0-2  boot_1.3-20                shinyjs_1.0                lme4_1.1-19                sem_3.1-9                 
[201] pwr_1.2-2                  CompQuadForm_1.4.3         jpeg_0.1-8                 janitor_1.1.1              pkgconfig_2.0.2           
[206] gamlss.dist_5.1-4          lmerTest_3.0-1             bindr_0.1.1               
 

A work by Claudiu Papasteri

claudiu.papasteri@gmail.com

 

LS0tDQp0aXRsZTogIjxicj4gUmV6aWRlbnRpYWwiIA0Kc3VidGl0bGU6ICJJbmZlcmVudGlhbCBTdGF0aXN0aWNzIg0KYXV0aG9yOiAiPGJyPiBDbGF1ZGl1IFBhcGFzdGVyaSINCmRhdGU6ICJgciBmb3JtYXQoU3lzLnRpbWUoKSwgJyVkICVtICVZJylgIg0Kb3V0cHV0OiANCiAgICBodG1sX25vdGVib29rOg0KICAgICAgICAgICAgY29kZV9mb2xkaW5nOiBoaWRlDQogICAgICAgICAgICB0b2M6IHRydWUNCiAgICAgICAgICAgIHRvY19kZXB0aDogMg0KICAgICAgICAgICAgbnVtYmVyX3NlY3Rpb25zOiB0cnVlDQogICAgICAgICAgICB0aGVtZTogc3BhY2VsYWINCiAgICAgICAgICAgIGhpZ2hsaWdodDogdGFuZ28NCiAgICAgICAgICAgIGZvbnQtZmFtaWx5OiBBcmlhbA0KICAgICAgICAgICAgZmlnX3dpZHRoOiAxMA0KICAgICAgICAgICAgZmlnX2hlaWdodDogOQ0KICAgICMgcGRmX2RvY3VtZW50OiANCiAgICAjICAgICAgICAgdG9jOiB0cnVlDQogICAgIyAgICAgICAgIHRvY19kZXB0aDogMg0KICAgICMgICAgICAgICBudW1iZXJfc2VjdGlvbnM6IHRydWUNCiAgICAgICAgICAgICMgZm9udHNpemU6IDExcHQNCiAgICAgICAgICAgICMgZ2VvbWV0cnk6IG1hcmdpbj0xaW4NCiAgICAgICAgICAgICMgZmlnX3dpZHRoOiA3DQogICAgICAgICAgICAjIGZpZ19oZWlnaHQ6IDYNCiAgICAgICAgICAgICMgZmlnX2NhcHRpb246IHRydWUNCiAgICAjIGdpdGh1Yl9kb2N1bWVudDogDQogICAgICAgICAgICAjIHRvYzogdHJ1ZQ0KICAgICAgICAgICAgIyB0b2NfZGVwdGg6IDINCiAgICAgICAgICAgICMgaHRtbF9wcmV2aWV3OiBmYWxzZQ0KICAgICAgICAgICAgIyBmaWdfd2lkdGg6IDUNCiAgICAgICAgICAgICMgZmlnX2hlaWdodDogNQ0KICAgICAgICAgICAgIyBkZXY6IGpwZWcNCi0tLQ0KDQoNCjwhLS0gU2V0dXAgLS0+DQoNCg0KYGBge3Igc2V0dXAsIGluY2x1ZGUgPSBGQUxTRX0NCiMga2ludHIgb3B0aW9ucw0Ka25pdHI6Om9wdHNfY2h1bmskc2V0KA0KICBjb21tZW50ID0gIiMiLA0KICBjb2xsYXBzZSA9IFRSVUUsDQogIGVjaG8gPSBUUlVFLCANCiAgY2FjaGUgPSBUUlVFLCANCiAgd2FybmluZyA9IEZBTFNFLCBtZXNzYWdlID0gRkFMU0UgICAjIFdIRU4gTk9URUJPT0sgSVMgRklOSVNIRUQgLi4uIHVudGlsIHRoZW4gbGVhdmU6IHdhcm5pbmcgPSBUUlVFLCBtZXNzYWdlID0gVFJVRSAgICAgICAgDQopDQoNCiMgR2VuZXJhbCBSIG9wdGlvbnMgYW5kIGluZm8NCnNldC5zZWVkKDExMSkgICAgICAgICAgICAgICAjIGluIGNhc2Ugd2UgdXNlIHJhbmRvbWl6ZWQgcHJvY2VkdXJlcyAgICAgICANCm9wdGlvbnMoc2NpcGVuID0gOTk5KSAgICAgICAjIHBvc2l0aXZlIHZhbHVlcyBiaWFzIHRvd2FyZHMgZml4ZWQgYW5kIG5lZ2F0aXZlIHRvd2FyZHMgc2NpZW50aWZpYyBub3RhdGlvbg0KDQojIExvYWQgcGFja2FnZXMNCmlmICghcmVxdWlyZSgicGFjbWFuIikpIGluc3RhbGwucGFja2FnZXMoInBhY21hbiIpDQpwYWNrYWdlcyA8LSBjKA0KICAia25pdHIiLCAia2FibGVFeHRyYSIsICJwYXBhamEiLCAgDQogICJ0aWR5dmVyc2UiLCAicGx5ciIsICAgICAgDQogICJwc3ljaCIsICJwc3ljaG8iLCAgICAgICAgICAgDQogICJicm9vbSIsICJzdW1tYXJ5dG9vbHMiLCAidGFkYWF0b29sYm94IiwgIlBlcmZvcm1hbmNlQW5hbHl0aWNzIiwgICAgICAgICAgDQogICJnZ3Bsb3QyIiwgImdncHViciIsICJzY2FsZXMiLCAgICAgICAgDQogICJyaW8iLA0KICAiSG1pc2MiLCANCiAgIkdHYWxseSIsICJjb3JycGxvdCIsICJSQ29sb3JCcmV3ZXIiLCANCiAgImNhciINCiAgIyAsIC4uLg0KKQ0KaWYgKCFyZXF1aXJlKCJwYWNtYW4iKSkgaW5zdGFsbC5wYWNrYWdlcygicGFjbWFuIikNCnBhY21hbjo6cF9sb2FkKGNoYXIgPSBwYWNrYWdlcykNCg0KIyBUaGVtZXMgZm9yIGdncGxvdDIgcGxvdGluZyAoaGVyZSB1c2VkIEFQQSBzdHlsZSkNCnRoZW1lX3NldCh0aGVtZV9hcGEoKSkNCmBgYA0KDQpgYGB7ciB3b3JraW5nX2RpcmVjdG9yeSwgaW5jbHVkZSA9IEZBTFNFfQ0KIyBpZiBuZWVkZWQNCiMgd2QgPSAiLi9SZXppZGVudGlhbCINCiMgc2V0d2Qod2QpDQpgYGANCg0KDQo8IS0tIFJFUE9SVCAtLT4NCg0KDQojIExvYWQgZGF0YQ0KDQpgYGB7ciByZHNfZGF0YSwgcmVzdWx0cyA9ICdoaWRlJywgY2FjaGUuZXh0cmEgPSBmaWxlLmluZm8oIkRhdGFfUmV6aWRlbnRpYWwuUkRTIil9DQojIyBSZWFkDQpmaWxlbmFtZSA8LSAiRGF0YV9SZXppZGVudGlhbC5SRFMiICAgDQoNCkRhdGEgPC0gcmVhZFJEUyhmaWxlbmFtZSkgIA0KYGBgDQoNCg0KPCEtLSBJbnNwZWN0IERhdGEgLSBzd2l0Y2hlZCBvZmYgLS0+DQpgYGB7ciBpbnNwZWN0ZGF0YSwgZWNobz1GQUxTRSwgcmVzdWx0cz0iaGlkZSJ9DQojIG5hbWVzKERhdGFbLCAxOTc6MjQwXSkgICAjIGRlcml2ZWQgc2NvcmVzDQogICMgcHJpbnQoc3VtbWFyeXRvb2xzOjpkZlN1bW1hcnkoRGF0YVssIDE5NzoyNDBdLCBzdHlsZSA9ICdncmlkJywgcGxhaW4uYXNjaWkgPSBGQUxTRSwgZ3JhcGgubWFnbmlmID0gMC44NSksICAgICMgc3VwcHJlc3Mgb3V0cHV0DQogICMgICAgICAgbWV0aG9kID0gJ3JlbmRlcicsIGhlYWRpbmdzID0gRkFMU0UpDQojIHN0cihEYXRhWywgMTk3OjI0MF0sIGxpc3QubGVuPW5jb2woRGF0YVssIDE5NzoyNDBdKSkgICMgZGF0YSB0eXBlcyBhcmUgZmluZQ0KYGBgDQoNCiMgTW9kaWZ5IEFDRSBTY29yZSB0byB0YWtlIGludG8gYWNjb3VudCB0aGF0IGFsbCBhcmUgaW5zdGl0dXRpb25hbGl6ZWQNCmBgYHtyIGRlcml2ZWRfZGF0YSwgY2FjaGUgPSBUUlVFLCBkZXBlbmRzb24gPSAicmRzX2RhdGEifQ0KRGF0YSRDWVcgPC0gaWZlbHNlKERhdGEkQ1lXID09IDAsIDAsIERhdGEkQ1lXIC0gMSkgDQpgYGANCg0KIyBEZXNjcmlwdGl2ZSBmb3IgQUNFIFNjb3JlDQoNCmBgYHtyIGRlc2NfYWNlfQ0KIyMgQmFyIFBsb3QNCmNhdCgiIyMjIEZyZXF1ZW5jaWVzIGZvciBBQ0UgU2NvcmUiKQ0KRGF0YSAlPiUNCiAgZ2dwbG90KGFlcyh4ID0gYXMuZmFjdG9yKENZVykpKSArDQogIGdlb21fYmFyKGFlcyh5ID0gKC4uY291bnQuLikvc3VtKC4uY291bnQuLikpKSArDQogIGdlb21fdGV4dChhZXMoeSA9ICgoLi5jb3VudC4uKS9zdW0oLi5jb3VudC4uKSksIGxhYmVsID0gc2NhbGVzOjpwZXJjZW50KCguLmNvdW50Li4pL3N1bSguLmNvdW50Li4pKSksIHN0YXQgPSAiY291bnQiLCB2anVzdCA9IC0wLjI1KSArDQogIHNjYWxlX3lfY29udGludW91cyhsYWJlbHMgPSBwZXJjZW50KSArDQogIGxhYnModGl0bGUgPSAiQUNFIFNjb3JlIGZyZXF1ZW5jeSIsIHkgPSAiUGVyY2VudCIsIHggPSAiQUNFIFNjb3JlIikNCg0KDQojIyBJcyBBQ0UgU2NvYXJlIFBvaXNzb24gZGlzdHJpYnV0ZWQ/DQpjYXQoIiMjIyBEb2VzIEFDRSBTY29hcmUgZm9sbG93IGEgUG9pc3NvbiBkaXN0cmlidXRpb24/IikNCiMgTm90ZSB0aGF0IGlmIHRoZSBwIHZhbHVlIGlzIGxhcmdlciB0aGFuIDAuMDUsIHdlIGNhbiBub3QgcmVqZWN0IGgwOiB0aGUgcHJvY2VzcyBpcyBhIFBvaXNzb24gcHJvY2Vzcy4gDQojIE9yIGVsc2UsIGl0IGlzIG5vdCBhIFBvaXNzb24gcHJvY2Vzcy4NCmdmIDwtIHZjZDo6Z29vZGZpdChEYXRhJENZVywgdHlwZSA9ICJwb2lzc29uIiwgbWV0aG9kID0gIk1MIikgICAgIyBiYXNlZCBvbiBsb2FkIHRoZSB2Y2QgcGFja2FnZQ0KIyBwbG90KGdmLCBtYWluID0gIlBvaXNzb24iLCBzaGFkZSA9IFRSVUUsIGxlZ2VuZCA9IEZBTFNFKQ0KIyBzdW1tYXJ5KGdmKSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAjIExpa2VsaWhvb2QgUmF0aW8gVGVzdA0KDQojIyB0byBhdXRvbWF0aWNhbGx5IGdldCB0aGUgcHZhbHVlDQpnZi5zdW1tYXJ5IDwtIGNhcHR1cmUub3V0cHV0KHN1bW1hcnkoZ2YpKVtbN11dICAgICAgICAgICAgICAgICAgICMgaWYgcC12YWx1ZSBpcyBzbWFsbGVyIHBpY2sgWzVdIGZyb20gbGlzdA0KcHZhbHVlIDwtIHVubGlzdChzdHJzcGxpdChnZi5zdW1tYXJ5LCBzcGxpdCA9ICIgIikpDQpwdmFsdWUgPC0gYXMubnVtZXJpYyhwdmFsdWVbbGVuZ3RoKHB2YWx1ZSldKTsgDQpjYXQoIkdvb2RuZXNzLW9mLWZpdCB0ZXN0IGZvciBwb2lzc29uIGRpc3RyaWJ1dGlvbiBwLXZhbHVlOiAiLCByb3VuZChwdmFsdWUsIDMpKQ0KaWYocHZhbHVlID4gLjA1KSBjYXQoIlllcywgaXQgaXMgUG9pc3NvbiIpIGVsc2UgY2F0KCJObywgaXQgaXMgbm90IFBvaXNzb24iKQ0KDQojIyBSb290b2dyYW1zDQpocm9vdF9wbG90MSA8LSBwbG90KGdmLCB0eXBlID0gImhhbmdpbmciLCBzaGFkZSA9IFRSVUUsIG1haW4gPSAiSGFuZ2luZyBSb290b2dyYW0iLCByZXR1cm5fZ3JvYiA9IFRSVUUpICAgICAgICMgaGFuZ2luZyByb290b2dyYW0NCmRyb290X3Bsb3QxIDwtIHBsb3QoZ2YsIHR5cGUgPSAiZGV2aWF0aW9uIiwgc2hhZGUgPSBUUlVFLCBtYWluID0gIkRldmlhdGlvbiBSb290b2dyYW0iLCByZXR1cm5fZ3JvYiA9IFRSVUUpICAgIyBkZXZpYXRpb24gcm9vdG9ncmFtDQpzdWJ0ZXh0MSA8LSAiTGVmdDogaGFuZ2luZyByb290b2dyYW07IFJpZ2h0OiBkZXZpYXRpb24gcm9vdG9ncmFtLiBIYW5naW5nIHJvb3RvZ3JhbSBDb2xvciByZWZsZWN0cyB0aGUgc2lnbiBhbmQgbWFnbml0dWRlIG9mIHRoZSBjb250cmlidXRpb25zDQp0byBsYWNrIG9mIGZpdC4gbW92ZXMgdGhlIHJvb3RvZ3JhbSBiYXJzIHNvIHRoZWlyIHRvcHMgYXJlIGF0IHRoZSBleHBlY3RlZCBmcmVxdWVuY2llcyBmb3IgcG9pc3NvbiBkaXN0cmlidXRpb24uIg0KdmNkOjptcGxvdChocm9vdF9wbG90MSwgZHJvb3RfcGxvdDEsIHN1YiA9IHN1YnRleHQxLCBncF9zdWIgPSBncmlkOjpncGFyKGZvbnRzaXplID0gMTEpKQ0KDQojIyBQb2lzc29ubmVzcyBwbG90cw0KZGlzdF9wbG90MSA8LSB2Y2Q6OmRpc3RwbG90KERhdGEkQ1lXLCB0eXBlID0gInBvaXNzb24iLCB4bGFiID0gIkFDRSIsIHJldHVybl9ncm9iID0gVFJVRSkNCnN1YnRleHQyIDwtICJUaGUgZml0dGVkIGxpbmUgaXMgbm90IHdpdGhpbiB0aGUgY29uZmlkZW5jZSBpbnRlcnZhbHMsIGluZGljYXRpbmcgdGhlIFBvaXNzb24gbW9kZWwgaXMgbm90IGFkZXF1YXRlIGZvciB0aGVzZSBkYXRhIg0KdmNkOjptcGxvdChkaXN0X3Bsb3QxLCBzdWIgPSBzdWJ0ZXh0MiwgZ3Bfc3ViID0gZ3JpZDo6Z3Bhcihmb250c2l6ZSA9IDExKSkNCg0KDQojIyBOZWdhdGl2ZSBCaW5vbWlhbD8NCmNhdCgiIyMjIERvZXMgQUNFIFNjb2FyZSBmb2xsb3cgYSBOZWdhdGl2ZSBCaW5vbWlhbCBkaXN0cmlidXRpb24/IikNCmdmMiA8LSB2Y2Q6Omdvb2RmaXQoRGF0YSRDWVcsIHR5cGUgPSAibmJpbm9taWFsIikNCiMgc3VtbWFyeShnZjIpICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAjIExpa2VsaWhvb2QgUmF0aW8gVGVzdA0KIyBwbG90KGdmMiwgbWFpbiA9ICJOZWdhdGl2ZSBiaW5vbWlhbCIsIHNoYWRlID0gVFJVRSwgbGVnZW5kID0gRkFMU0UpDQpkaXN0X3Bsb3QyIDwtIHZjZDo6ZGlzdHBsb3QoRGF0YSRDWVcsIHR5cGUgPSAibmJpbm9taWFsIiwgeGxhYiA9ICJBQ0UiLCByZXR1cm5fZ3JvYiA9IFRSVUUpDQpzdWJ0ZXh0MyA8LSAiVGhlIGZpdHRlZCBsaW5lIGlzIHdpdGhpbiB0aGUgY29uZmlkZW5jZSBpbnRlcnZhbHMsIGluZGljYXRpbmcgdGhlIGFkZXF1YWN5IG9mIHRoZSBQb2lzc29uIG1vZGVsIGZvciB0aGVzZSBkYXRhIg0KdmNkOjptcGxvdChkaXN0X3Bsb3QyLCBzdWIgPSBzdWJ0ZXh0MywgZ3Bfc3ViID0gZ3JpZDo6Z3Bhcihmb250c2l6ZSA9IDExKSkNCg0KIyMgT3JkIHBsb3RzOiBEaWFnbm9zdGljIHNsb3BlIGFuZCBpbnRlcmNlcHQgZm9yIGZvdXIgZGlzY3JldGUgZGlzdHJpYnV0aW9ucw0KdmNkOjpPcmRfcGxvdChEYXRhJENZVywgbWFpbiA9ICJPcmQgcGxvdCIsIGdwID0gZ3JpZDo6Z3BhcihjZXggPSAxKSwgcGNoID0gMTYpDQpgYGANCg0KDQojIFRlc3QgTGluZWFyIFJlZ3Jlc3Npb24gLSBBQ0UgU2NvYXJlDQoNCmBgYHtyIHJlZ19sbV9zdGVwX2FjZSwgZWNobz1GQUxTRSwgcmVzdWx0cz0iaGlkZSIsIHdhcm5pbmc9RkFMU0V9DQojIyBEYXRhIGZvciBMaW5lYXIgUmVncmVzc2lvbiBTdGVwDQpEYXRhX2xtX3N0ZXAgPC0NCiAgRGF0YSAlPiUNCiAgc2VsZWN0KENZVywgdmFyc3RhLCBnZW4sIDk6MjkpICU+JQ0KICBtdXRhdGVfYXQodmFycyhleHB1bmVyZV90b3g6Y29tdW5pdCksIGZ1bnMocmVwbGFjZV9uYSguLCAwKSkpDQoNCiMgZmluZCBvdXQgaG93IG1hbnkgcm93cyBjb250YWluIE5BDQojIHN1bShyb3dTdW1zKGlzLm5hKERhdGFfbG1fc3RlcCkpID4gMCkgICAgICAgICAgICAgIA0KDQojIyBMaW5lYXIgUmVncmVzc2lvbiBiZXN0IG1vZGVsIGF1dG9tYXRpYyBzZWxlY3Rpb24NCkRhdGFfbG1fc3RlcCA8LQ0KICBEYXRhX2xtX3N0ZXAgJT4lICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIyBiaWRpcmVjdGlvbmFsIHNlbGVjdGlvbiBkb2VzbnQgd29yayB3aXRoIE5Bcw0KICBkcm9wX25hKCkNCg0Kc3RlcF9sbV9udWxsIDwtIGxtKENZVyB+IDEsIGRhdGEgPSBEYXRhX2xtX3N0ZXApDQpzdGVwX2xtX2Z1bGwgPC0gbG0oQ1lXIH4gLiwgZGF0YSA9IERhdGFfbG1fc3RlcCkNCg0KY2F0KCIjIyMjIEJpZGlyZWN0aW9uYWwgc3RlcCBieSBCSUMiKQ0KbW9kX2xtX0JJQyA8LSBzdGVwKHN0ZXBfbG1fbnVsbCwgc2NvcGU9bGlzdChsb3dlcj1mb3JtdWxhKHN0ZXBfbG1fbnVsbCksIHVwcGVyPWZvcm11bGEoc3RlcF9sbV9mdWxsKSksIGRpcmVjdGlvbj0iYm90aCIsIGsgPSBsb2cobnJvdyhEYXRhX2xtX3N0ZXApKSwgdHJhY2UgPSBGQUxTRSkNCm1vZGVybmRpdmU6OmdldF9yZWdyZXNzaW9uX3RhYmxlKG1vZF9sbV9CSUMpDQptb2Rlcm5kaXZlOjpnZXRfcmVncmVzc2lvbl9zdW1tYXJpZXMobW9kX2xtX0JJQykNCg0KY2F0KCIjIyMjIEJpZGlyZWN0aW9uYWwgc3RlcCBieSBBSUMiKQ0KbW9kX2xtX0FJQyA8LSBzdGVwKHN0ZXBfbG1fbnVsbCwgc2NvcGU9bGlzdChsb3dlcj1mb3JtdWxhKHN0ZXBfbG1fbnVsbCksIHVwcGVyPWZvcm11bGEoc3RlcF9sbV9mdWxsKSksIGRpcmVjdGlvbj0iYm90aCIsIHRyYWNlID0gRkFMU0UpDQptb2Rlcm5kaXZlOjpnZXRfcmVncmVzc2lvbl90YWJsZShtb2RfbG1fQUlDKSANCm1vZGVybmRpdmU6OmdldF9yZWdyZXNzaW9uX3N1bW1hcmllcyhtb2RfbG1fQUlDKQ0KYGBgDQoNCg0KYGBge3IgcmVnX2xtX2FjZX0NCmNhdCgiIyMjIyBUZXN0IGEgZ29vZCBsaW5pYXIgbW9kZWwiKQ0KIyMgRGF0YSBmb3IgTGluZWFyIFJlZ3Jlc3Npb24gU3RlcA0KRGF0YV9sbSA8LQ0KICBEYXRhICU+JSAgICAjIHJlY29kZSB2X21hbWFfbmFzdGVyZSB0byBiaW5hcnkNCiAgICBtdXRhdGUodl9tYW1hX25hc3RlcmVfZCA9IGZjdF9yZWNvZGUodl9tYW1hX25hc3RlcmUsICIxIiA9ICI8MTkiICwgIjAiID0gIjIwLTI1IiwgIjAiID0gIjI2ljM0IiwgIjAiID0gIjM1PiIpKSAlPiUNCiAgICBtdXRhdGVfYXQodmFycyh2X21hbWFfbmFzdGVyZV9kKSwgZnVucyhhcy5udW1lcmljKGFzLmNoYXJhY3RlciguKSkpKSAlPiUNCiAgICBzZWxlY3QoQ1lXLCB2YXJzdGEsIGdlbiwgOToyOSwgdl9tYW1hX25hc3RlcmVfZCkgJT4lDQogICAgbXV0YXRlX2F0KHZhcnMoZXhwdW5lcmVfdG94OmNvbXVuaXQpLCBmdW5zKHJlcGxhY2VfbmEoLiwgMCkpKQ0KDQpsaWJyYXJ5KGd2bG1hKQ0KbGlicmFyeShvbHNycikNCg0KIyMgTGluZWFyIFJlZ3Jlc3Npb24gZm9yIFRlc3QgLSBmdWxsIG1vZGVsDQptb2RfbG1fZnVsbCA8LSBsbShDWVcgfiBleHB1bmVyZV90b3ggKyB2YXJzdGFfaW5zdCArIHRyYXNfZGV6ICsgc2NoaW1iX2RvbSArICBuZWdsaWphcmUgKyB2YXJzdGEgKyBib2xpLCBkYXRhID0gRGF0YV9sbSkNCg0KbW9kZXJuZGl2ZTo6Z2V0X3JlZ3Jlc3Npb25fdGFibGUobW9kX2xtX2Z1bGwpIA0KbW9kZXJuZGl2ZTo6Z2V0X3JlZ3Jlc3Npb25fc3VtbWFyaWVzKG1vZF9sbV9mdWxsKQ0KcGFyKG1mcm93ID0gYygyLCAyKSk7IHBsb3QobW9kX2xtX2Z1bGwpDQoNCmd2bG1hOjpndmxtYShtb2RfbG1fZnVsbCkNCiMgSW5mbHVlbnRpYWwgT2JzZXJ2YXRpb25zIC0tIENvb2sncyBEIHBsb3QNCiMgaWRlbnRpZnkgRCB2YWx1ZXMgPiA0LyhuLWstMSkgDQpjdXRvZmYgPC0gNC8oKG5yb3coRGF0YSktbGVuZ3RoKG1vZF9sbV9mdWxsJGNvZWZmaWNpZW50cyktMikpIA0KcGxvdChtb2RfbG1fZnVsbCwgd2hpY2ggPSA0LCBjb29rLmxldmVscz1jdXRvZmYpDQojIEluZmx1ZW5jZSBQbG90IA0KY2FyOjppbmZsdWVuY2VQbG90KG1vZF9sbV9mdWxsLCBtYWluID0gIkluZmx1ZW5jZSBQbG90Iiwgc3ViID0gIkNpcmNsZSBzaXplIGlzIHByb3BvcnRpYWwgdG8gQ29vaydzIERpc3RhbmNlIikNCiMgRXZhbHVhdGUgQ29sbGluZWFyaXR5DQpvbHNycjo6b2xzX2NvbGxfZGlhZyhtb2RfbG1fZnVsbCkgIyBWSUYgc2kgVG9sZXJhbmNlIGRpbiBvbHNycg0KY2FyOjp2aWYobW9kX2xtX2Z1bGwpICMgdmFyaWFuY2UgaW5mbGF0aW9uIGZhY3RvcnMgDQpzcXJ0KHZpZihtb2RfbG1fZnVsbCkpID4gMiAjIHByb2JsZW0/DQojIEV2YWx1YXRlIE5vbmxpbmVhcml0eQ0KIyBjb21wb25lbnQgKyByZXNpZHVhbCBwbG90IA0KY2FyOjpjclBsb3RzKG1vZF9sbV9mdWxsLCBhc2sgPSBGQUxTRSkNCiMgQ2VyZXMgcGxvdHMgDQojIGNhcjo6Y2VyZXNQbG90cyhtb2RfbG1fZnVsbCwgYXNrID0gRkFMU0UpDQpgYGANCg0KDQojIFZhcnN0YSBtYW1laSBsYSBuYXN0ZXJlIGlzIG5vdCBhIHByZWRpY3RvciBmb3IgQUNFIHNjb3JlDQpgYGB7ciB2X21hbWFfbmFzdGVyZX0NCkRhdGFfdmFyX2ltcCA8LQ0KICBEYXRhICU+JSAgICAjIHJlY29kZSB2X21hbWFfbmFzdGVyZSB0byBiaW5hcnkNCiAgICBzZWxlY3QoQ1lXLCB2YXJzdGEsIGdlbiwgOToyOSwgdl9tYW1hX25hc3RlcmUsIHRpcF9jaGVzdGlvbmFyKSAlPiUNCiAgICBtdXRhdGVfYXQodmFycyhleHB1bmVyZV90b3g6Y29tdW5pdCksIGZ1bnMocmVwbGFjZV9uYSguLCAwKSkpDQoNCndpdGgoRGF0YV92YXJfaW1wLA0KICAgICBieShDWVcsIElORElDRVMgPSB2X21hbWFfbmFzdGVyZSwgRlVOID0gc3VtbWFyeXRvb2xzOjpkZXNjciwgdHJhbnNwb3NlID0gVFJVRSwNCiAgICAgICAgc3RhdHMgPSBjKCJuLnZhbGlkIiwgIm1lYW4iLCAic2QiLCAibWluIiwgIm1lZCIsICJtYXgiLCAic2tld25lc3MiLCAia3VydG9zaXMiKSwgcGxhaW4uYXNjaWkgPSBGQUxTRSwgaGVhZGluZ3MgPSBGQUxTRSkpICAlPiUgIA0KICAgICAgICAgIHZpZXcobWV0aG9kID0gInJlbmRlciIsIHN0eWxlID0gInJtYXJrZG93biIsIGZvb3Rub3RlID0gTkEpDQoNCmdncGxvdChEYXRhX3Zhcl9pbXAsIGFlcyh4ID0gdl9tYW1hX25hc3RlcmUsIHkgPSBDWVcpKSArDQogIGdlb21fYm94cGxvdCgpICsNCiAgc3RhdF9zdW1tYXJ5KGZ1bi5kYXRhID0gbWVhbl9zZSwgIGNvbG91ciA9ICJkYXJrcmVkIikgKw0KICBnZ3B1YnI6OnN0YXRfY29tcGFyZV9tZWFucyhtZXRob2QgPSAidC50ZXN0IiwgDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIGxhYmVsID0gInAuc2lnbmlmIiwgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgdG8gYXZvaWQgc2NpZW50aWZpYyBub3RhdGlvbiBvZiB2ZXJ5IHNtYWxsIHAtdmFsdWVzDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIHBhaXJlZCA9IEZBTFNFLCANCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgY29tcGFyaXNvbnMgPSBsaXN0KGMoIjwxOSIsICIyMC0yNSIpLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgYygiPDE5IiwgIjI2ljM0IiksDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBjKCIyMC0yNSIsICIyNpYzNCIpLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgYygiMjAtMjUiLCAiMzU+IiksDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBjKCIyNpYzNCIsICIzNT4iKSkpIA0KDQpnZ3Bsb3QoRGF0YV92YXJfaW1wLCBhZXMoeCA9IGdlbiwgeSA9IENZVykpICsNCiAgZmFjZXRfd3JhcCh+dl9tYW1hX25hc3RlcmUpICsgDQogIGdlb21fYm94cGxvdCgpICsNCiAgc3RhdF9zdW1tYXJ5KGZ1bi5kYXRhID0gbWVhbl9zZSwgIGNvbG91ciA9ICJkYXJrcmVkIikgKw0KICBnZ3B1YnI6OnN0YXRfY29tcGFyZV9tZWFucyhtZXRob2QgPSAidC50ZXN0IiwgDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIGxhYmVsID0gInAuc2lnbmlmIiwgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgdG8gYXZvaWQgc2NpZW50aWZpYyBub3RhdGlvbiBvZiB2ZXJ5IHNtYWxsIHAtdmFsdWVzDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIHBhaXJlZCA9IEZBTFNFLCANCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgY29tcGFyaXNvbnMgPSBsaXN0KGMoIm0iLCAiZiIpKSkgDQpgYGANCg0KDQojIEdlbiBpcyBub3QgYSBwcmVkaWN0b3IgZm9yIEFDRSBzY29yZQ0KYGBge3IgZ2VufQ0Kd2l0aChEYXRhX3Zhcl9pbXAsDQogICAgIGJ5KENZVywgSU5ESUNFUyA9IGdlbiwgRlVOID0gc3VtbWFyeXRvb2xzOjpkZXNjciwgdHJhbnNwb3NlID0gVFJVRSwNCiAgICAgICAgc3RhdHMgPSBjKCJuLnZhbGlkIiwgIm1lYW4iLCAic2QiLCAibWluIiwgIm1lZCIsICJtYXgiLCAic2tld25lc3MiLCAia3VydG9zaXMiKSwgcGxhaW4uYXNjaWkgPSBGQUxTRSwgaGVhZGluZ3MgPSBGQUxTRSkpICAlPiUgIA0KICAgICAgICAgIHZpZXcobWV0aG9kID0gInJlbmRlciIsIHN0eWxlID0gInJtYXJrZG93biIsIGZvb3Rub3RlID0gTkEpDQoNCnRhZGFhdG9vbGJveDo6dGFkYWFfdC50ZXN0KGRhdGEgPSBEYXRhX3Zhcl9pbXAsDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgcmVzcG9uc2UgPSBDWVcsIGdyb3VwID0gZ2VuLCBwYWlyZWQgPSBGQUxTRSkgICAjICwgcHJpbnQgPSAibWFya2Rvd24iDQoNCg0KZ2dwbG90KERhdGFfdmFyX2ltcCwgYWVzKHggPSBnZW4sIHkgPSBDWVcpKSArDQogIGdlb21fYm94cGxvdCgpICsNCiAgc3RhdF9zdW1tYXJ5KGZ1bi5kYXRhID0gbWVhbl9zZSwgIGNvbG91ciA9ICJkYXJrcmVkIikgKw0KICBnZ3B1YnI6OnN0YXRfY29tcGFyZV9tZWFucyhtZXRob2QgPSAidC50ZXN0IiwgDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIGxhYmVsID0gInAuc2lnbmlmIiwgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgdG8gYXZvaWQgc2NpZW50aWZpYyBub3RhdGlvbiBvZiB2ZXJ5IHNtYWxsIHAtdmFsdWVzDQogICAgICAgICAgICAgICAgICAgICAgICAgICAgIHBhaXJlZCA9IEZBTFNFLCANCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgY29tcGFyaXNvbnMgPSBsaXN0KGMoImYiLCAibSIpKSkgIA0KYGBgDQoNCg0KIyBEZXNjcmlwdGl2ZXMgYnkgdGlwX2NoZXN0aW9uYXINCmBgYHtyIHRpcF9jaGVzdGlvbmFyfQ0Kd2l0aChEYXRhX3Zhcl9pbXAsDQogICAgIGJ5KENZVywgSU5ESUNFUyA9IHRpcF9jaGVzdGlvbmFyLCBGVU4gPSBzdW1tYXJ5dG9vbHM6OmRlc2NyLCB0cmFuc3Bvc2UgPSBUUlVFLA0KICAgICAgICBzdGF0cyA9IGMoIm4udmFsaWQiLCAibWVhbiIsICJzZCIsICJtaW4iLCAibWVkIiwgIm1heCIsICJza2V3bmVzcyIsICJrdXJ0b3NpcyIpLCBwbGFpbi5hc2NpaSA9IEZBTFNFLCBoZWFkaW5ncyA9IEZBTFNFKSkgICU+JSAgDQogICAgICAgICAgdmlldyhtZXRob2QgPSAicmVuZGVyIiwgc3R5bGUgPSAicm1hcmtkb3duIiwgZm9vdG5vdGUgPSBOQSkNCg0KdC50ZXN0KENZVyB+IHRpcF9jaGVzdGlvbmFyLCBkYXRhID0gRGF0YV92YXJfaW1wW0RhdGFfdmFyX2ltcCR0aXBfY2hlc3Rpb25hciAlaW4lIGMoIjUtOGFuaSIsICI1LThpbnRhcnppZXJlIiksXSkNCnRhZGFhdG9vbGJveDo6dGFkYWFfdC50ZXN0KGRhdGEgPSBEYXRhX3Zhcl9pbXBbRGF0YV92YXJfaW1wJHRpcF9jaGVzdGlvbmFyICVpbiUgYygiNS04YW5pIiwgIjUtOGludGFyemllcmUiKSxdLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgIHJlc3BvbnNlID0gQ1lXLCBncm91cCA9IHRpcF9jaGVzdGlvbmFyLCBwYWlyZWQgPSBGQUxTRSkgICAjICwgcHJpbnQgPSAibWFya2Rvd24iDQoNCnQudGVzdChDWVcgfiB0aXBfY2hlc3Rpb25hciwgZGF0YSA9IERhdGFfdmFyX2ltcFtEYXRhX3Zhcl9pbXAkdGlwX2NoZXN0aW9uYXIgJWluJSBjKCI1LThpbnRhcnppZXJlIiwgIjktMThhbmkiKSxdKSAgIyB0aGlzIHdvcmtzLCB0YWRhYXRvb2xib3ggYnVnDQojIHRhZGFhdG9vbGJveDo6dGFkYWFfdC50ZXN0KGRhdGEgPSBEYXRhX3Zhcl9pbXBbRGF0YV92YXJfaW1wJHRpcF9jaGVzdGlvbmFyICVpbiUgYygiNS04aW50YXJ6aWVyZSIsICI5LTE4YW5pIiksXSwNCiMgICAgICAgICAgICAgICAgICAgICAgICAgICAgIHJlc3BvbnNlID0gQ1lXLCBncm91cCA9IHRpcF9jaGVzdGlvbmFyLCBwYWlyZWQgPSBGQUxTRSkgICAjICwgcHJpbnQgPSAibWFya2Rvd24iDQojIA0KdC50ZXN0KENZVyB+IHRpcF9jaGVzdGlvbmFyLCBkYXRhID0gRGF0YV92YXJfaW1wW0RhdGFfdmFyX2ltcCR0aXBfY2hlc3Rpb25hciAlaW4lIGMoIjUtOGFuaSIsICI5LTE4YW5pIiksXSkgICMgdGhpcyB3b3JrcywgdGFkYWF0b29sYm94IGJ1Zw0KIyB0YWRhYXRvb2xib3g6OnRhZGFhX3QudGVzdChkYXRhID0gRGF0YV92YXJfaW1wW0RhdGFfdmFyX2ltcCR0aXBfY2hlc3Rpb25hciAlaW4lIGMoIjUtOGFuaSIsICI5LTE4YW5pIiksXSwNCiMgICAgICAgICAgICAgICAgICAgICAgICAgICAgIHJlc3BvbnNlID0gQ1lXLCBncm91cCA9IHRpcF9jaGVzdGlvbmFyLCBwYWlyZWQgPSBGQUxTRSkgICAjICwgcHJpbnQgPSAibWFya2Rvd24iDQoNCg0KZ2dwbG90KERhdGFfdmFyX2ltcCwgYWVzKHggPSB0aXBfY2hlc3Rpb25hciwgeSA9IENZVykpICsNCiAgZ2VvbV9ib3hwbG90KCkgKw0KICBzdGF0X3N1bW1hcnkoZnVuLmRhdGEgPSBtZWFuX3NlLCAgY29sb3VyID0gImRhcmtyZWQiKSArDQogIGdncHVicjo6c3RhdF9jb21wYXJlX21lYW5zKG1ldGhvZCA9ICJ0LnRlc3QiLCANCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgbGFiZWwgPSAicC5zaWduaWYiLCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIyB0byBhdm9pZCBzY2llbnRpZmljIG5vdGF0aW9uIG9mIHZlcnkgc21hbGwgcC12YWx1ZXMNCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgcGFpcmVkID0gRkFMU0UsIA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICBjb21wYXJpc29ucyA9IGxpc3QoYygiNS04YW5pIiwgIjUtOGludGFyemllcmUiKSwNCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIGMoIjUtOGludGFyemllcmUiLCAiOS0xOGFuaSIpLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgYygiNS04YW5pIiwgIjktMThhbmkiKSkpICANCmBgYA0KDQoNCiMgUG9pc3NvbiBSZWdyZXNzaW9uIE1vZGVsDQoNCmBgYHtyIHJlZ19nbG1fcG9pc3Nvbl9hY2UsIGZpZy53aWR0aD05LCBmaWcuaGVpZ2h0PTh9DQojIyBQb2lzc29uIFJlZ3Jlc3Npb24gU3RlcA0Kc3RlcF9wb2lzX251bGwgPC0gZ2xtKENZVyB+IDEsIGZhbWlseSA9IHBvaXNzb24sIGRhdGEgPSBEYXRhX2xtX3N0ZXApDQpzdGVwX3BvaXNfZnVsbCA8LSBnbG0oQ1lXIH4gLiwgZmFtaWx5ID0gcG9pc3NvbiwgZGF0YSA9IERhdGFfbG1fc3RlcCkNCg0KY2F0KCIjIyMjIFBvaXNzb24gLSBCaWRpcmVjdGlvbmFsIHN0ZXAgYnkgQklDIikNCm1vZF9wb2lzX0JJQyA8LSBzdGVwKHN0ZXBfcG9pc19udWxsLCBzY29wZT1saXN0KGxvd2VyPWZvcm11bGEoc3RlcF9wb2lzX251bGwpLCB1cHBlcj1mb3JtdWxhKHN0ZXBfcG9pc19mdWxsKSksIGRpcmVjdGlvbj0iYm90aCIsIGsgPSBsb2cobnJvdyhEYXRhX2xtX3N0ZXApKSwgdHJhY2UgPSBGQUxTRSkNCnN1bW1hcnkobW9kX3BvaXNfQklDKQ0KIyBzdW1tYXJ5KHN0ZXAoc3RlcF9wb2lzX2Z1bGwsIH4uXjIsICBkaXJlY3Rpb24gPSAiYm90aCIsIGsgPSBsb2cobnJvdyhEYXRhX2xtX3N0ZXApKSwgdHJhY2UgPSBGQUxTRSkpICAgIyBzdGVwIGZvciBhbGwgdGVybXMgYW5kIGFsbCBpbnRlcmFjdGlvbnMgISEhDQoNCmNhdCgiIyMjIyBQb2lzc29uIC0gQmlkaXJlY3Rpb25hbCBzdGVwIGJ5IEFJQyIpDQptb2RfcG9pc19BSUMgPC0gc3RlcChzdGVwX3BvaXNfbnVsbCwgc2NvcGU9bGlzdChsb3dlcj1mb3JtdWxhKHN0ZXBfcG9pc19udWxsKSwgdXBwZXI9Zm9ybXVsYShzdGVwX3BvaXNfZnVsbCkpLCBkaXJlY3Rpb249ImJvdGgiLCB0cmFjZSA9IEZBTFNFKQ0Kc3VtbWFyeShtb2RfcG9pc19BSUMpIA0KIyBzdW1tYXJ5KHN0ZXAoc3RlcF9wb2lzX2Z1bGwsIH4uXjIsICBkaXJlY3Rpb24gPSAiYm90aCIsIHRyYWNlID0gRkFMU0UpKSAgICMgc3RlcCBmb3IgYWxsIHRlcm1zIGFuZCBhbGwgaW50ZXJhY3Rpb25zICEhIQ0KDQoNCiMjIERhdGEgZm9yIEdMTXMNCkRhdGFfZ2xtIDwtDQogIERhdGEgJT4lICAgICMgcmVjb2RlIHZfbWFtYV9uYXN0ZXJlIHRvIGJpbmFyeQ0KICAgIG11dGF0ZSh2X21hbWFfbmFzdGVyZV9kID0gZmN0X3JlY29kZSh2X21hbWFfbmFzdGVyZSwgIjEiID0gIjwxOSIgLCAiMCIgPSAiMjAtMjUiLCAiMCIgPSAiMjaWMzQiLCAiMCIgPSAiMzU+IikpICU+JQ0KICAgIG11dGF0ZV9hdCh2YXJzKHZfbWFtYV9uYXN0ZXJlX2QpLCBmdW5zKGFzLm51bWVyaWMoYXMuY2hhcmFjdGVyKC4pKSkpICU+JQ0KICAgIHNlbGVjdChDWVcsIHZhcnN0YSwgZ2VuLCA5OjI5LCB2X21hbWFfbmFzdGVyZV9kKSAlPiUNCiAgICBtdXRhdGVfYXQodmFycyhleHB1bmVyZV90b3g6Y29tdW5pdCksIGZ1bnMocmVwbGFjZV9uYSguLCAwKSkpDQoNCg0KIyMgR0xNIC0gUG9pc3Nvbg0KY2F0KCIjIyMjIFRlc3QgYSBnb29kIHBvaXNzb24gbW9kZWwiKQ0KIyBtb2RfcG9pcyA8LSBnbG0oQ1lXIH4gZXhwdW5lcmVfdG94ICsgdmFyc3RhX2luc3QgKyB0cmFzX2RleiArIHNjaGltYl9kb20gKyBib2xpICsgbmVnbGlqYXJlICsgdmFyc3RhICsgDQojICAgICAgICAgICAgICAgICAgICAgICBucl9mcmF0aSArIFRDQywgZmFtaWx5ID0gcG9pc3NvbiwgZGF0YSA9IERhdGFfZ2xtKSAgICAgICAgICAgICAgICAgICAgICAgICAjIGZpc3J0IGRlY2VudCBtb2RlbA0KDQptb2RfcG9pcyA8LSBnbG0oQ1lXIH4gZXhwdW5lcmVfdG94ICsgdmFyc3RhX2luc3QgKyAgc2NoaW1iX2RvbSArIGJvbGkgKyB2YXJzdGEgKw0KICAgICAgICAgICAgICAgICAgICAgIG5yX2ZyYXRpICsgZ2VuICsgY29tdW5pdCArIGludGFyemllcmUgKyBUQ0MgKyBuZWdsaWphcmUgKyANCiAgICAgICAgICAgICAgICAgICAgICBzY29hbGFfc3BlYyArIHR1bGJfY29uZCwgZmFtaWx5ID0gcG9pc3NvbiwgZGF0YSA9IERhdGFfZ2xtKSAgICAgICAgICAgICAgICAgICMgYmVzdCBwb3NzaWJsZSBtb2RlbCANCg0Kc3VtbWFyeShtb2RfcG9pcykgIyB0aWR5KG1vZF9wb2lzKSAlPiUgbXV0YXRlX2lmKGlzLm51bWVyaWMsIHJvdW5kLCAyKSAgJT4lIHhsc3g6OndyaXRlLnhsc3goLiwgZmlsZSA9ICJwb2lzLnhsc3giKSANCnBsb3QobW9kX3BvaXMsIGFzayA9IEZBTFNFKQ0KDQojIHByIDwtIHN1bShyZXNpZHVhbHMobW9kX3BvaXMsIHR5cGU9InBlYXJzb24iKV4yKSAgICAgICAgICAgICAgICAgICMgUGVhcnNvbiBDaGkyDQojIHByL21vZF9wb2lzJGRmLnJlc2lkdWFsICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgZGlzcGVyc2lvbiBzdGF0aXN0aWMNCm1zbWU6OjpQX19kaXNwKG1vZF9wb2lzKSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIyBjb21tZW50ZWQgdGhlIDIgbGluZXMgYWJvdmUsIHRoaXMgZnVuY3Rpb24gZG9lcyBib3RoDQpkZXYgPC0gZGV2aWFuY2UobW9kX3BvaXMpDQpkZiA8LSBkZi5yZXNpZHVhbChtb2RfcG9pcykNCnBfdmFsdWUgPC0gMS1wY2hpc3EoZGV2LGRmKQ0KcHJpbnQobWF0cml4KGMoIkRldmlhbmNlIEdPRiIsIiAiLCJEIixyb3VuZChkZXYsNCksImRmIixkZiwgICAgICMgdGhlIGRldmlhbmNlIEdPRiB0ZXN0LCBhIENoaTIgcCA8IDAuMDUgaW5kaWNhdGVzIHRoYXQgdGhlIG1vZGVsIGlzIGNvbnNpZGVyZWQgd2VsbCBmaXQNCiAgICAgInBfdmFsdWUiLHBfdmFsdWUpLCBuY29sPTIpKQ0KDQojIHRoZXNlIGFzc2VzcyB0aGUgb3ZlcmFsbCBwZXJmb3JtYW5jZSBvZiBhIG1vZGVsIGluIHJlcHJvZHVjaW5nIHRoZSBkYXRhLiBUaGUgY29tbW9ubHkgdXNlZCBtZWFzdXJlcyBpbmNsdWRlIHRoZSBQZWFyc29uIGNoaS1zcXVhcmUgYW5kIGxpa2VsaWhvb2RyYXRpbw0KIyBkZXZpYW5jZSBzdGF0aXN0aWNzLCB3aGljaCBjYW4gYmUgc2VlbiBhcyB3ZWlnaHRlZCBzdW1zIG9mIHJlc2lkdWFscy4NCg0KQ09VTlQ6Om1vZGVsZml0KG1vZF9wb2lzKQ0KIyBjbnQgPC0gdGFibGUoRGF0YV9sbSRDWVcpDQojIGRhdGFmIDwtIGRhdGEuZnJhbWUocHJvcC50YWJsZSh0YWJsZShEYXRhX2xtJENZVykgKSApDQojIGRhdGFmJGN1bXVsYXRpdmUgPC0gY3Vtc3VtKGRhdGFmJEZyZXEpDQojIGRhdGFmYWxsIDwtIGRhdGEuZnJhbWUoY250LCBkYXRhZiRGcmVxKjEwMCwgZGF0YWYkY3VtdWxhdGl2ZSAqIDEwMCkNCm1vZF9wb2lzJGFpYyAvIChtb2RfcG9pcyRkZi5udWxsKzEpICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAjIEFJQy9uDQpleHAoY29lZihtb2RfcG9pcykpICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIyBJUlINCmV4cChjb2VmKG1vZF9wb2lzKSkqc3FydChkaWFnKHZjb3YobW9kX3BvaXMpKSkgICAgICAgICAgICAgICAgICAgICAjIGRlbHRhIG1ldGhvZA0KZXhwKGNvbmZpbnQuZGVmYXVsdChtb2RfcG9pcykpICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgQ0kgb2YgSVJSDQoNCiMjIFRlc3QgZm9yIG92ZXJkaXNwZXJzaW9uDQojIFotU2NvcmUgVGVzdCAoYXNzdW10aW9uczogVGhlIGRhdGEgc2V0IG9uIHdoaWNoIHRoZSB0ZXN0IGlzIHVzZWQgaXMgbGFyZ2UuIHogaXMgdC1kaXN0cmlidXRlZC4pLCBucyA9IG92ZXJkaXNwZXJzZWQNCm11IDwtcHJlZGljdChtb2RfcG9pcywgdHlwZT0icmVzcG9uc2UiKQ0KeiA8LSAoKERhdGFfZ2xtJENZVyAtIG11KV4yIC0gRGF0YV9nbG0kQ1lXKS8gKG11ICogc3FydCgyKSkNCnN1bW1hcnkoenNjb3JlIDwtIGxtKHogfiAxKSkgICAgICAgICAjIHRoZSBoeXBvdGhlc2lzIG9mIG5vIG92ZXJkaXNwZXJzaW9uIGlzIHJlamVjdGVkIChpLmUuLCB0aGF0IGl0IGlzIGxpa2VseSB0aGF0IHJlYWwgb3ZlcmRpc3BlcnNpb24gZXhpc3RzIGluIHRoZSBkYXRhKQ0KIyBMYWdyYW5nZSBNdWx0aXBsaWVyIFRlc3QsIG5zID0gb3ZlcmRpc3BlcnNlZA0Kb2JzIDwtIG5yb3coRGF0YV9nbG0pICAgIyBjb250aW51ZSBmcm9tIFRhYmxlIDMuMg0KbW11IDwtIG1lYW4obXUpOyBueWJhciA8LSBvYnMqbW11OyBtdXNxIDwtIG11Km11DQptdTIgPC0gbWVhbihtdXNxKSpvYnMNCmNoaXZhbCA8LSAobXUyIC0gbnliYXIpXjIvKDIqbXUyKTsgY2hpdmFsIA0KcGNoaXNxKGNoaXZhbCwxLGxvd2VyLnRhaWwgPSBGQUxTRSkgICAgICAjIHRoZSBoeXBvdGhlc2lzIG9mIG5vIG92ZXJkaXNwZXJzaW9uIGlzIGFnYWluIHJlamVjdGVkDQoNCiMgTWFueSBzdGF0aXN0aWNpYW5zIGFyZ3VlIHRoYXQgcm9idXN0IHN0YW5kYXJkIGVycm9ycyBzaG91bGQgYmUgdGhlIGRlZmF1bHQgc3RhbmRhcmQgZXJyb3JzIGZvciBhbGwgY291bnQgcmVzcG9uc2UgcmVncmVzc2lvbiBtb2RlbHMuDQojIEEgcm9idXN0IHZhcmlhbmNlIGVzdGltYXRvciBhZGp1c3RzIHN0YW5kYXJkIGVycm9ycyBmb3IgY29ycmVsYXRpb24gaW4gdGhlIGRhdGEuIFRoYXQgaXMsIHJvYnVzdCBzdGFuZGFyZA0KIyBlcnJvcnMgc2hvdWxkIGJlIHVzZWQgd2hlbiB0aGUgZGF0YSBhcmUgbm90IGluZGVwZW5kZW50LCBwZXJoYXBzIGdhdGhlcmVkDQojIG92ZXIgZGlmZmVyZW50IGhvdXNlaG9sZHMsIGhvc3BpdGFscywgc2Nob29scywgY2l0aWVzLCBsaXR0ZXJzLCBhbmQgc28gZm9ydGguDQojIFJvYnVzdCB2YXJpYW5jZSBlc3RpbWF0b3JzIGhhdmUgYWxzbyBiZWVuIHJlZmVycmVkIHRvIGFzIHNhbmR3aWNoIHZhcmlhbmNlDQojIGVzdGltYXRvcnMgb3IgaGV0ZXJvc2tlZGFzdGljIHJvYnVzdCBlc3RpbWF0b3JzLg0KDQojIExhY2sgb2YgZml0IGluIGEgR0xNIGZvciBjb3VudCBkYXRhIGNhbiByZXN1bHQgZWl0aGVyIGZyb20gYSBtaXMtc3BlY2lmaWVkIG1vZGVsIGZvciB0aGUgc3lzdGVtYXRpYw0KIyBjb21wb25lbnQgKG9taXR0ZWQgb3IgdW5tZWFzdXJlZCBwcmVkaWN0b3JzLCBub25saW5lYXIgcmVsYXRpb25zLCBldGMuKSBvciBmcm9tIGZhaWx1cmUgb2YgdGhlDQojIFBvaXNzb24gbWVhbiA9IHZhcmlhbmNlIGFzc3VtcHRpb24uIFRodXMsIHVzZSBvZiB0aGVzZSBtZXRob2RzIHJlcXVpcmVzIHNvbWUgaGlnaCBkZWdyZWUgb2YNCiMgY29uZmlkZW5jZSB0aGF0IHRoZSBzeXN0ZW1hdGljIHBhcnQgb2YgdGhlIG1vZGVsIGhhcyBiZWVuIGNvcnJlY3RseSBzcGVjaWZpZWQsIHNvIHRoYXQgYW55IGxhY2sgb2YgZml0DQojIGNhbiBiZSBhdHRyaWJ1dGVkIHRvIG92ZXJkaXNwZXJzaW9uLg0KIyBPbmUgd2F5IG9mIGRlYWxpbmcgd2l0aCB0aGlzIGlzIHRvIGJhc2UgaW5mZXJlbmNlIG9uIHNvLWNhbGxlZCBzYW5kd2ljaCBjb3ZhcmlhbmNlIGVzdGltYXRvcnMNCiMgdGhhdCBhcmUgcm9idXN0IGFnYWluc3Qgc29tZSB0eXBlcyBvZiBtb2RlbCBtaXMtc3BlY2lmaWNhdGlvbi4NCg0KcmVxdWlyZSgic2FuZHdpY2giKSAgICAgICAgICAgICAgICAgICAgICAjIG92ZXItZGlzcGVyc2lvbiBpcyBwcmVzZW50IGluIHRoaXMgZGF0YSBzZXQsIHdlIHJlLWNvbXB1dGUgdGhlIFdhbGQgdGVzdHMgdXNpbmcgc2FuZHdpY2ggc3RhbmRhcmQgZXJyb3JzDQpzYW5kd19jb2Vmc2UgPC0gbG10ZXN0Ojpjb2VmdGVzdChtb2RfcG9pcywgdmNvdiA9IHNhbmR3aWNoKSAgICAgICAjIHNhbmR3aWNoLWFkanVzdGVkIFBvaXNzb24NCnNhbmR3X2NvZWZzZQ0Kc2FuZHdfY2kgPC0gbG10ZXN0Ojpjb2VmY2kobW9kX3BvaXMsIHZjb3YgPSBzYW5kd2ljaCkNCmV4cChzYW5kd19jaSkgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgQ0kgb2Ygc2FuZHdpY2gtYWRqdXN0ZWQgSVJSICAgIA0KDQpsaWJyYXJ5KGVmZmVjdHMpDQpwbG90KGFsbEVmZmVjdHMobW9kX3BvaXMpLCBiYW5kLmNvbG9ycyA9ICJibHVlIiwgbHdkID0gMywgeWxhYiA9ICJBQ0UgU2NvcmUiLCBtYWluID0gIiIsIHJvd3M9NSwgY29scz0zKSAgIyBwbG90IG1ldGEtYXJyYXk6IHJvd3M9NSwgY29scz0zDQpgYGANCg0KDQojIFF1YXNpLVBvaXNzb24gUmVncmVzc2lvbiBNb2RlbA0KDQpgYGB7ciByZWdfZ2xtX3F1YXNpcG9pc3Nvbl9hY2V9DQojIyBHTE0gLSBRdWFzaSBQb2lzc29uDQojIEluIFIsIFBvaXNzb24gbW9kZWxzIHdpdGggc2NhbGVkIHN0YW5kYXJkIGVycm9ycyBhcmUgY2FsbGVkIHF1YXNpcG9pc3NvbjoNCiMgQSBQZWFyc29uIGRpc3BlcnNpb24gaW4gZXhjZXNzIG9mIDEuMCBpbmRpY2F0ZXMgbGlrZWx5IFBvaXNzb24gbW9kZWwNCiMgb3ZlcmRpc3BlcnNpb24uIFdoZXRoZXIgdGhlIG92ZXJkaXNwZXJzaW9uIGlzIHNpZ25pZmljYW50IGRlcGVuZHMgb24NCiMgKDEpIHRoZSB2YWx1ZSBvZiB0aGUgZGlzcGVyc2lvbiBzdGF0aXN0aWMsICgyKSB0aGUgbnVtYmVyIG9mIG9ic2VydmF0aW9ucw0KIyBpbiB0aGUgbW9kZWwsIGFuZCAoMykgdGhlIHN0cnVjdHVyZSBvZiB0aGUgZGF0YTsgZm9yIGV4YW1wbGUsIGlmIHRoZSBkYXRhDQojIGFyZSBoaWdobHkgdW5iYWxhbmNlZC4gDQoNCiMgbW9kX3Fwb2lzIDwtIGdsbShDWVcgfiBleHB1bmVyZV90b3ggKyB2YXJzdGFfaW5zdCArIHRyYXNfZGV6ICsgc2NoaW1iX2RvbSArIGJvbGkgKyBuZWdsaWphcmUgKyB2YXJzdGEgKyANCiMgICAgICAgICAgICAgICAgICAgICAgIG5yX2ZyYXRpICsgVENDLCBmYW1pbHkgPSBxdWFzaXBvaXNzb24sIGRhdGEgPSBEYXRhX2dsbSkgICAgICAgICAgICAgICAgICAgICAgICAgICAjIGZpcnN0IGRlY2VudCBtb2RlbA0KDQptb2RfcXBvaXMgPC0gZ2xtKENZVyB+IGV4cHVuZXJlX3RveCArIHZhcnN0YV9pbnN0ICsgIHNjaGltYl9kb20gKyBib2xpICsgdmFyc3RhICsNCiAgICAgICAgICAgICAgICAgICAgICBucl9mcmF0aSArIGdlbiArIGNvbXVuaXQgKyBpbnRhcnppZXJlICsgVENDICsgbmVnbGlqYXJlICsgDQogICAgICAgICAgICAgICAgICAgICAgc2NvYWxhX3NwZWMgKyB0dWxiX2NvbmQsIGZhbWlseSA9IHF1YXNpcG9pc3NvbiwgZGF0YSA9IERhdGFfZ2xtKSAgICAgICAgICAgICAgICAgICMgYmVzdCBwb3NzaWJsZSBtb2RlbCANCg0Kc3VtbWFyeShtb2RfcXBvaXMpICAjIERpc3BlcnNpb24gcGFyYW1ldGVyIGZvciBxdWFzaXBvaXNzb24gZmFtaWx5IHRha2VuIHRvIGJlIDEuOTc2NTc4IC0tIGlzID4gMSANCiAgICAgICAgICAgICAgICAgICAgIyAgb3Zlci1kaXNwZXJzaW9uIGNhbiBiZSBjb25maXJtZWQgYnkgY29tcGFyaXNvbiBvZiB0aGUgbG9nLWxpa2VsaWhvb2RzIG9mIHRoZSBQb2lzc29uIGFuZCBuZWdhdGl2ZSBiaW5vbWlhbCBtb2RlbA0KIyB0aWR5KG1vZF9xcG9pcykgJT4lIG11dGF0ZV9pZihpcy5udW1lcmljLCByb3VuZCwgMikgICU+JSB4bHN4Ojp3cml0ZS54bHN4KC4sIGZpbGUgPSAicXBvaXMueGxzeCIpDQpleHAoY29lZihtb2RfcXBvaXMpKSAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgSVJSDQpleHAoY29lZihtb2RfcXBvaXMpKSpzcXJ0KGRpYWcodmNvdihtb2RfcXBvaXMpKSkgICAgICAgICAgICAgICAgICAgICMgZGVsdGEgbWV0aG9kDQpleHAoY29uZmludC5kZWZhdWx0KG1vZF9xcG9pcykpICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICMgQ0kgb2YgSVJSDQoNCiMjIFRlc3QgaWYgdGhlcmUgaXMgb3ZlcmRpc3BlcnNpb24gaW4gQklDIHN0ZXAgc2VsZWN0ZWQgbW9kZWwgKGJlc3QgZml0dGluZykgLS0tIG92ZXJkaXNwZXJzaW9uIHN0aWxsIHByZXNlbnQNCiMgbW9kX3Fwb2lzQklDIDwtIGdsbShDWVcgfiBleHB1bmVyZV90b3ggKyB2YXJzdGFfaW5zdCArIHRyYXNfZGV6ICsgc2NoaW1iX2RvbSArIG5lZ2xpamFyZSArDQojICAgICAgICAgICAgICAgICAgICAgICAgICAgdmFyc3RhICsgYm9saSArIGNvbXVuaXQsIGZhbWlseSA9IHF1YXNpcG9pc3NvbiwgZGF0YSA9IERhdGFfZ2xtKSAgICAgICAgICAgICAgICAgICMgYmVzdCBCSUMNCiMgc3VtbWFyeShtb2RfcXBvaXNCSUMpDQpgYGANCg0KDQojIE5CIFJlZ3Jlc3Npb24gTW9kZWwNCg0KYGBge3IgcmVnX2dsbV9uYl9hY2V9DQojIFN0ZXAgc2VsZWN0aW9uIE5CIFJlZ3Jlc3Npb24NCnN0ZXBfZ2xtX25iX2Z1bGwgPC0gTUFTUzo6Z2xtLm5iKENZVyB+IC4sIGRhdGEgPSBEYXRhX2xtX3N0ZXApDQoNCmNhdCgiIyMjIyBOQiBHTE0gLSBCaWRpcmVjdGlvbmFsIHN0ZXAgYnkgQUlDIikNCm1vZF9uYl9BSUMgPC0gTUFTUzo6c3RlcEFJQyhzdGVwX2dsbV9uYl9mdWxsLCBkaXJlY3Rpb24gPSAiYm90aCIsIHRyYWNlID0gRkFMU0UpDQpzdW1tYXJ5KG1vZF9uYl9BSUMpDQoNCmNhdCgiIyMjIyBOQiBHTE0gLSBCaWRpcmVjdGlvbmFsIHN0ZXAgYnkgQklDIikNCm1vZF9uYl9CSUMgPC0gTUFTUzo6c3RlcEFJQyhzdGVwX2dsbV9uYl9mdWxsLCBkaXJlY3Rpb24gPSAiYm90aCIsIGsgPSBsb2cobnJvdyhEYXRhX2xtX3N0ZXApKSwgdHJhY2UgPSBGQUxTRSkNCnN1bW1hcnkobW9kX25iX0JJQykNCg0KIyBtb2RfbmIgPC0gTUFTUzo6Z2xtLm5iKENZVyB+IHZhcnN0YSArIHZhcnN0YV9pbnN0ICArICBleHB1bmVyZV90b3ggKyBUQ0MgICsgIGludGFyemllcmUgKyANCiMgICAgICAgICAgICAgICAgICAgICAgICBuZWdsaWphcmUgKyB0ZW1wZXJhbSAgKyBzY29hbGFfc3BlYyAgKyBzY2hpbWJfZG9tLCBkYXRhID0gRGF0YV9nbG0pICAgICAgICMgYSBnb29kIG1vZGVsIG9uIFBvaXNzb24gYW5kIE5CICAgICAgICAgIA0KIyBzdW1tYXJ5KG1vZF9uYikgICAgICAgICAgICAgDQoNCg0KIyMgTmVnYXRpdmUgQmlub21pYWwgR0xNIC0gYSBnb29kIG1vZGVsDQpjYXQoIiMjIyMgVGVzdCBhIGdvb2QgTkIgbW9kZWwiKQ0KIyBOQjINCm1vZF9uYjIgPC0gTUFTUzo6Z2xtLm5iKENZVyB+IGV4cHVuZXJlX3RveCArIHZhcnN0YV9pbnN0ICsgc2NoaW1iX2RvbSArIA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgdmFyc3RhICsgaW50YXJ6aWVyZSArIFRDQyArIG5lZ2xpamFyZSArIHNjb2FsYV9zcGVjICsgdHVsYl9jb25kLA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgZGF0YSA9IERhdGFfZ2xtKQ0Kc3VtbWFyeShtb2RfbmIyKSAjIHRpZHkobW9kX25iMikgJT4lIG11dGF0ZV9pZihpcy5udW1lcmljLCByb3VuZCwgMikgICU+JSB4bHN4Ojp3cml0ZS54bHN4KC4sIGZpbGUgPSAibW9kX25iMi54bHN4IikgDQpleHAoY29lZihtb2RfbmIyKSk7ICMgZXhwKGNvZWYobW9kX25iMikpKnNxcnQoZGlhZyh2Y292KG1vZF9uYjIpKSkgICAgIyBhZGp1c3QgU0U/Pz8sIG5vdCBoZXJlDQpleHAoY29uZmludC5kZWZhdWx0KG1vZF9uYjIpKQ0KDQptb2RfcG9pc19uZXcgPC0gZ2xtKGZvcm11bGEgPSBDWVcgfiBleHB1bmVyZV90b3ggKyB2YXJzdGFfaW5zdCArIHNjaGltYl9kb20gKyANCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIHZhcnN0YSArIGludGFyemllcmUgKyBUQ0MgKyBuZWdsaWphcmUgKyBzY29hbGFfc3BlYyArIHR1bGJfY29uZCwNCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIGZhbWlseSA9IHBvaXNzb24sIGRhdGEgPSBEYXRhX2dsbSkNCnN1bW1hcnkobW9kX3BvaXNfbmV3KSAjIHRpZHkobW9kX3BvaXNfbmV3KSAlPiUgbXV0YXRlX2lmKGlzLm51bWVyaWMsIHJvdW5kLCAyKSAgJT4lIHhsc3g6OndyaXRlLnhsc3goLiwgZmlsZSA9ICJtb2RfcG9pc19uZXcueGxzeCIpIA0KZXhwKGNvZWYobW9kX3BvaXNfbmV3KSkNCmV4cChjb25maW50LmRlZmF1bHQobW9kX3BvaXNfbmV3KSkNCg0KbG10ZXN0OjpscnRlc3QobW9kX3BvaXNfbmV3LCBtb2RfbmIyKSAgICAjICBsaWtlbGlob29kIHJhdGlvIHRlc3QNCg0KIyMgQVRURU5USU9OIEFUIERJU1BFUlNJT04gUEFSQU0gLS0gbmJpbm9taWFsIGhhcyBhbHBoYSAoZGlyZWN0IHJlbCksIGdsbS5uYiBoYXMgdGhldGEgKGluZGlyZWN0IHJlbCkgDQojIEFUVEVOVElPTiAtLSB3aGVuIHVzaW5nIG5hLm9taXQgZG8gaXQgb24gZGF0YXNldCB0aGF0IGhhcyBvbmx5IHRoZSBwcmVkaWN0b3JzIGFuZCBvdXRjb21lIGZyb20gdGhlIG1vZGVsIHRvIG5vdCBleGNsdWRlIG90aGVyIGNhc2VzDQojIE5CMiB3aXRoIGFscGhhIGRpc3BlcnNpb24gcGFyYW0gaW5zdGVhZCBvZiB0aGV0YQ0KICAgICMgbW9kX25iMiA8LSBtc21lOjpuYmlub21pYWwoQ1lXIH4gZXhwdW5lcmVfdG94ICsgdmFyc3RhX2luc3QgKyB0cmFzX2RleiArIHNjaGltYl9kb20gKyBib2xpICsgbmVnbGlqYXJlICsgdmFyc3RhICsNCiAgICAjICAgICAgICAgICAgICAgICAgICAgICBucl9mcmF0aSArIFRDQywgZGF0YSA9IG5hLm9taXQoRGF0YV9nbG0pKQ0KICAgICMgc3VtbWFyeShtb2RfbmIyKQ0KDQojIyBOQjENCiMgbGlicmFyeShnYW1sc3MpDQojIG1vZF9uYjEgPC0gZ2FtbHNzOjpnYW1sc3MoZm9ybXVsYSA9IENZVyB+IGV4cHVuZXJlX3RveCArIHZhcnN0YV9pbnN0ICsgdHJhc19kZXogKyBzY2hpbWJfZG9tICsgYm9saSArIG5lZ2xpamFyZSArIHZhcnN0YSArDQojICAgICAgICAgICAgICAgICAgICAgICBucl9mcmF0aSArIFRDQywgZmFtaWx5ID0gTkJJLCBkYXRhID0gbmEub21pdChEYXRhX2dsbSkpDQojIHN1bW1hcnkobW9kX25iMSk7IHBsb3QobW9kX25iMSkNCiMgbG10ZXN0OjpscnRlc3QobW9kX25iMSwgbW9kX25iMikgICAgIyAgbGlrZWxpaG9vZCByYXRpbyB0ZXN0DQoNCg0KDQoNCg0KDQoNCiMgaHR0cHM6Ly9kYXRhLmxpYnJhcnkudmlyZ2luaWEuZWR1L2dldHRpbmctc3RhcnRlZC13aXRoLW5lZ2F0aXZlLWJpbm9taWFsLXJlZ3Jlc3Npb24tbW9kZWxpbmcvDQoNCg0KDQoNCiMgaW5zdGFsbC5wYWNrYWdlcygiY291bnRyZWciLCByZXBvcz0iaHR0cDovL1ItRm9yZ2UuUi1wcm9qZWN0Lm9yZyIpICAgICAgICMgWmVpbGVpcyBhbmQgS2xlaWJlciwgMjAxNCAgbm90IG9uIENSQU4NCmxpYnJhcnkoY291bnRyZWcpDQpjb3VudHJlZzo6cm9vdG9ncmFtKG1vZF9wb2lzX25ldywgeWxpbSA9IGMoLTcsIDE4KSwgbWFpbiA9ICJQb2lzc29uIikgICAgICMgcm9vdG9ncmFtIG9uIHRoZSBmaXR0ZWQgbW9kZWwgb2JqZWN0cyAobm90IHBvc3NpYmxlIGluIHZjZDo6cm9vdG9ncmFtKQ0KY291bnRyZWc6OnJvb3RvZ3JhbShtb2RfbmIyLCB5bGltID0gYygtNywgMTgpLCBtYWluID0gIk5lZ2F0aXZlIEJpbm9taWFsIikgDQojIEZvciB0aGUgbmVnYXRpdmViaW5vbWlhbCB0aGUgdW5kZXJmaXR0aW5nIG9mIHRoZSBjb3VudCBmb3IgMCBhbmQgb3ZlcmZpdHRpbmcgZm9yIGNvdW50cyAxljIgaXMgY2hhcmFjdGVyaXN0aWMgb2YgZGF0YSB3aXRoIGV4Y2VzcyB6ZXJvcy4NCg0KYGBgDQoNCg0KIyBaSVAgYW5kIEh1cmRsZSBSZWdyZXNzaW9uIE1vZGVscw0KDQpgYGB7ciByZWdfaHVyZGxlX3plcm9uaW5mbF9hY2V9DQojIERhdGEgd2l0aG91dCBOQSBmb3IgZnVuY3Rpb25zIHRoYXQgZG9udCBleGNsdWRlIE5BcyBieSBkZWZhdWx0DQpEYXRhX2dsbV9ub25hIDwtIA0KICBEYXRhX2dsbSAlPiUNCiAgZHBseXI6OnNlbGVjdChDWVcsIGV4cHVuZXJlX3RveCwgdmFyc3RhX2luc3QsIHNjaGltYl9kb20sIHZhcnN0YSwNCiAgICAgICAgICBpbnRhcnppZXJlLCBUQ0MsIG5lZ2xpamFyZSwgc2NvYWxhX3NwZWMsIHR1bGJfY29uZCkgJT4lDQogIGRyb3BfbmEoKQ0KDQojIEZvcm1sdWxhIGZvciA5IHByZWN0b3IgYmVzdCBtb2RlbCAoUG9pc3NvbiBhbmQgTkIpDQpmb3JtdWxhX21vZGVsIDwtIGFzLmZvcm11bGEoIkNZVyB+IGV4cHVuZXJlX3RveCArIHZhcnN0YV9pbnN0ICsgc2NoaW1iX2RvbSArIA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICB2YXJzdGEgKyBpbnRhcnppZXJlICsgVENDICsgbmVnbGlqYXJlICsgc2NvYWxhX3NwZWMgKyB0dWxiX2NvbmQiKQ0KDQoNCmxpYnJhcnkocHNjbCkNCm1vZF9odXJkX3BvaXMgPC0gaHVyZGxlKGZvcm11bGFfbW9kZWwsIGRhdGEgPSBEYXRhX2dsbSAsIGRpc3QgPSAicG9pc3NvbiIpDQptb2RfaHVyZF9uYiA8LSBodXJkbGUoZm9ybXVsYV9tb2RlbCwgZGF0YSA9IERhdGFfZ2xtICwgZGlzdCA9ICJuZWdiaW4iKQ0KbW9kX3ppcCA8LSB6ZXJvaW5mbChmb3JtdWxhX21vZGVsLCBkYXRhID0gRGF0YV9nbG0gLCBkaXN0ID0gInBvaXNzb24iKQ0KbW9kX3puYiA8LSB6ZXJvaW5mbChmb3JtdWxhX21vZGVsLCBkYXRhID0gRGF0YV9nbG0gLCBkaXN0ID0gIm5lZ2JpbiIpDQptb2RfenBpZyA8LSBnYW1sc3M6OmdhbWxzcyhmb3JtdWxhX21vZGVsLCBkYXRhID0gRGF0YV9nbG1fbm9uYSAsIGZhbWlseSA9ICJaSVBJRyIpDQoNCmNvdW50cmVnOjpyb290b2dyYW0obW9kX2h1cmRfcG9pcywgbWF4ID0gNTAsIG1haW4gPSAiSHVyZGxlIFBvaXNzb24iKQ0KY291bnRyZWc6OnJvb3RvZ3JhbShtb2RfaHVyZF9uYiwgbWF4ID0gNTAsIG1haW4gPSAiSHVyZGxlIE5lZ2F0aXZlIEJpbm9taWFsIikNCmNvdW50cmVnOjpyb290b2dyYW0obW9kX3ppcCwgbWF4ID0gNTAsIG1haW4gPSAiWmVyby1pbmZsYXRlZCBQb2lzc29uIikNCmNvdW50cmVnOjpyb290b2dyYW0obW9kX3puYiwgbWF4ID0gNTAsIG1haW4gPSAiWmVyby1pbmZsYXRlZCBOZWdhdGl2ZSBCaW5vbWlhbCIpDQoNCnZjZEV4dHJhOjpMUnN0YXRzKG1vZF9wb2lzX25ldywgbW9kX25iMiwgbW9kX2h1cmRfcG9pcywgbW9kX2h1cmRfbmIsIG1vZF96aXAsIG1vZF96bmIsIG1vZF96cGlnLCBzb3J0YnkgPSAiQUlDIikgIyU+JSB4bHN4Ojp3cml0ZS54bHN4KC4sIGZpbGUgPSAiTFJ0ZXN0Lnhsc3giKQ0KbG10ZXN0OjpscnRlc3QobW9kX2h1cmRfbmIsIG1vZF96bmIpICAgICAgIA0KDQojIFZlcnkgQmVzdCBNb2RlbCAtIFplcm8taW5mbGF0ZWQgTmVnYXRpdmUgQmlub21pYWwNCnN1bW1hcnkobW9kX3puYikgIA0KZXhwKGNvZWYobW9kX3puYikpICMgICU+JSBhcy5kYXRhLmZyYW1lKCkgJT4lIG11dGF0ZV9pZihpcy5udW1lcmljLCByb3VuZCwgMikgICU+JSB4bHN4Ojp3cml0ZS54bHN4KC4sIGZpbGUgPSAibW9kX3puYi54bHN4IikNCmV4cChjb25maW50LmRlZmF1bHQobW9kX3puYikpICMgICU+JSBhcy5kYXRhLmZyYW1lKCkgJT4lIG11dGF0ZV9pZihpcy5udW1lcmljLCByb3VuZCwgMikgICU+JSB4bHN4Ojp3cml0ZS54bHN4KC4sIGZpbGUgPSAibW9kX3puYi54bHN4IikNCnByZWQgPC0gcm91bmQoY29sU3VtcyhwcmVkaWN0KG1vZF96bmIsIHR5cGU9InByb2IiKVssMToxOF0pKSAjIGV4cGVjdGVkIGNvdW50cw0Kb2JzIDwtIHRhYmxlKERhdGFfZ2xtJENZVylbMToxOF0gICAgICAgICAgICAgICAgICAgICAgICAgICAjIG9ic2VydmVkIGNvdW50cyAgICAgICAgDQpyYmluZChvYnMsIHByZWQpDQoNCg0KIyBPdGhlciBUZXN0cw0KbW9kX3puYjIgPC0gemVyb2luZmwoQ1lXIH4gZXhwdW5lcmVfdG94ICsgdmFyc3RhX2luc3QgKyBzY2hpbWJfZG9tICsgDQogICAgICAgICAgICAgICAgICAgICAgICAgICB2YXJzdGEgKyBpbnRhcnppZXJlICsgVENDICsgbmVnbGlqYXJlICsgc2NvYWxhX3NwZWMgKyB0dWxiX2NvbmQgfCAgbmVnbGlqYXJlICwgZGF0YSA9IERhdGFfZ2xtICwgZGlzdCA9ICJuZWdiaW4iKQ0Kc3VtbWFyeShtb2Rfem5iMikNCmxtdGVzdDo6bHJ0ZXN0KG1vZF96bmIsIG1vZF96bmIyKSAgICMgaXNudCBiZXR0ZXIgaWYgemVybyBjb3VudHMgcHJlZGljdGVkIGp1c3QgYnkgbmVnbGlqYXJlDQpgYGANCg0KDQpgYGB7ciBvdGhlciwgZXZhbD1GQUxTRSwgcmVzdWx0cz0iaGlkZSJ9DQojIE5CMiBhZ2FpbiAtLSBub2JzIGFuZCByZXN1bHRzIGFyZSB0aGUgc2FtZSBhcyBtb2RfbmIyICAtLS0gVEhFIERBVEEgSVMgVU5ERVJESVNQRVJFRCBIRVJFOiBEaXNwID0gMC45MzMNCm1vZF9uYm5iIDwtIG1zbWU6Om5iaW5vbWlhbChmb3JtdWxhX21vZGVsLCBkYXRhID0gRGF0YV9nbG1fbm9uYSkNCnN1bW1hcnkobW9kX25ibmIpDQoNCiMgSE5CDQptb2RfaG5iIDwtIG1zbWU6Om5iaW5vbWlhbChmb3JtdWxhMSA9IGZvcm11bGFfbW9kZWwsIA0KICAgICAgICAgICAgICAgICAgICAgICAgICAgIGZvcm11bGEyID1+IGV4cHVuZXJlX3RveCArIHZhcnN0YV9pbnN0ICsgc2NoaW1iX2RvbSArIHZhcnN0YSArICAgICAgICAgICMgbW9kZWxsaW5nIHByZWRpY3RvcnMgb2YgZGlzcGVyc2lvbg0KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIGludGFyemllcmUgKyBUQ0MgKyBuZWdsaWphcmUgKyBzY29hbGFfc3BlYyArIHR1bGJfY29uZCwNCiAgICAgICAgICAgICAgICAgICAgICAgICAgICBmYW1pbHkgPSAibmVnQmlub21pYWwiLCBtZWFuLmxpbmsgPSAibG9nIiwgc2NhbGUubGluayA9ICJsb2dfcyIsICBkYXRhID0gRGF0YV9nbG1fbm9uYSkNCnN1bW1hcnkobW9kX2huYikNCmV4cChjb2VmKG1vZF9obmIpKQ0KDQoNCiMgTkIxIC0tIHNlZW1zIGEgbGl0dGxlIGJldHRlciB0aGFuIE5CMiBidXQgbm90IHN1cmUNCm1vZF9uYjEgPC0gZ2FtbHNzOjpnYW1sc3MoZm9ybXVsYSA9IGZvcm11bGFfbW9kZWwsIGZhbWlseSA9IE5CSSwgZGF0YSA9IERhdGFfZ2xtX25vbmEpDQpzdW1tYXJ5KG1vZF9uYjEpOyAjIHBsb3QobW9kX25iMSkNCmNvdW50cmVnOjpyb290b2dyYW0obW9kX25iMSwgbWF4ID0gNTAsIG1haW4gPSAiTkIxIikNCnZjZEV4dHJhOjpMUnN0YXRzKG1vZF9uYjIsIG1vZF9uYjEpDQoNCg0KYGBgDQoNCg0KDQo8IS0tIFNlc3Npb24gSW5mbyBhbmQgTGljZW5zZSAtLT4NCg0KPGJyPg0KDQojIFNlc3Npb24gSW5mbw0KYGBge3Igc2Vzc2lvbl9pbmZvLCBlY2hvID0gRkFMU0UsIHJlc3VsdHMgPSAnbWFya3VwJ30NCnNlc3Npb25JbmZvKCkgICAgDQpgYGANCg0KPCEtLSBGb290ZXIgLS0+DQombmJzcDsNCjxociAvPg0KPHAgc3R5bGU9InRleHQtYWxpZ246IGNlbnRlcjsiPkEgd29yayBieSA8YSBocmVmPSJodHRwczovL2dpdGh1Yi5jb20vQ2xhdWRpdVBhcGFzdGVyaS8iPkNsYXVkaXUgUGFwYXN0ZXJpPC9hPjwvcD4NCjxwIHN0eWxlPSJ0ZXh0LWFsaWduOiBjZW50ZXI7Ij48c3BhbiBzdHlsZT0iY29sb3I6ICM4MDgwODA7Ij48ZW0+Y2xhdWRpdS5wYXBhc3RlcmlAZ21haWwuY29tPC9lbT48L3NwYW4+PC9wPg0KJm5ic3A7DQo=