t.test(data,mu=pmean)
t.test(data,mu=pmean,alternative="greater")
t.test(data,mu=pmean,alternative="less")
t.test(data1,data2,var.equal=TRUE)
t.test(data1,data2,var.equal=TRUE,alternative="greater")
t.test(data1,data2,var.equal=TRUE,alternative="less")
t.test(data1,data2)
t.test(data1,data2,alternative="greater")
t.test(data1,data2,alternative="less")
t.test(data1,data2,paired=TRUE)
t.test(data1,data2,paired=TRUE,alternative="greater")
t.test(data1,data2,paired=TRUE,alternative="less")
wilcox.test(data1,data2)
wilcox.test(data1,data2,alternative="greater")
wilcox.test(data1,data2,alternative="less")
wilcox.test(data1,data2,paired=TRUE)
wilcox.test(data1,data2,paired=TRUE,alternative="greater")
wilcox.test(data1,data2,paired=TRUE,alternative="less")
> A <- c(257, 205, 206, 231, 190, 214, 228, 203)
> B <- c(201, 164, 197, 185)
> C <- c(248, 165, 187, 220, 212, 215, 281)
> D <- c(202, 276, 207, 204, 230, 227)
> kruskal.test(list(A,B,C,D))
Kruskal-Wallis rank sum test
data: list(A, B, C, D)
Kruskal-Wallis chi-squared = 6.839, df = 3, p-value = 0.07721
theta <- 118
data <- c(123, 105, 117, 117,109, 118, 122)
library(BSDA)
SIGN.test(data, md=theta, alternative = "two.sided")
S <- sum(data > theta)
n <- sum(data != theta)
s <- min(n-S,S)
pbinom(s, n, 0.5, lower.tail = TRUE) * 2
library(MASS)
library(caret)
set.seed(123)
train <- createDataPartition(y=Boston$medv, p=0.7, list=F)
head(train)
Boston.train <- Boston[train,]
Boston.test <- Boston[-train,]
set.seed(123)
Boston.gnet <- train(form=medv ~ ., data=Boston.train, method="glmnet",
trControl=trainControl(method="cv",number=10), tuneLength=10)
Boston.rf <- train(form=medv ~ ., data=Boston.train, method="rf",
trControl=trainControl(method="cv",number=10), tuneLength=10)
Boston.svm <- train(form=medv ~ ., data=Boston.train, method="svmRadialSigma",
trControl=trainControl(method="cv",number=10), tuneLength=10)
gnet.pred <- predict(Boston.gnet,Boston.test)
postResample(pred=gnet.pred, obs=Boston.test$medv)
rf.pred <- predict(Boston.rf,Boston.test)
postResample(pred=rf.pred, obs=Boston.test$medv)
svm.pred <- predict(Boston.svm,Boston.test)
postResample(pred=svm.pred, obs=Boston.test$medv)
models <- list(gnet=Boston.gnet, rf=Boston.rf, svm=Boston.svm)
summary(resamples(models))
summary(resamples(models), metric="RMSE")
summary(diff(resamples(models), metric="RMSE"))
m <- matrix(c(15,60,25,25,69,6,10,77,13), ncol=3)
dimnames(m) <- list(score=c("1.5-2.5","2.5-3.5","3.5-4.5"),
subject=c("사회과학","자연과학","공학"))
m
addmargins(m)
Xsq <- chisq.test(m)
Xsq$expected
chisq.test(m)
pt(q=1, df=1, lower.tail = TRUE)
qt(p=0.75, df=1, lower.tail = TRUE)
rt(50, df=1)
pnorm(q, mean=0, sd=1, lower.tail=TRUE/FALSE)
qnorm(p, mean=0, sd=1, lower.tail=TRUE/FALSE)
rnorm(n, mean=0, sd=1)
pf(df1, df2, lower.tail=TRUE/FALSE)
qf(df1, df2, lower.tail=TRUE/FALSE)
rf(n, df1, df2)
library(TTR)
library(forecast)
library(tseries)
king <- scan("https://robjhyndman.com/tsdldata/misc/kings.dat", skip=3)
king.ts <- ts(king)
train <- subset(king.ts, end=length(king.ts)-4)
test <- subset(king.ts, start=length(king.ts)-3)
king.ts
train <- subset(king.ts, end=length(king.ts)-4)
test <- subset(king.ts, start=length(king.ts)-3)
adf.test(train)
ndiffs(train)
adf.test(diff(train))
par(mfrow=c(1,2))
acf(diff(train));pacf(diff(train))
m1 <- arima(king.ts, order=c(3,1,0))
m2 <- arima(king.ts, order=c(0,1,1))
m3 <- arima(king.ts, order=c(3,1,1))
library(dplyr)
rbind(cbind(m='m1',accuracy(m1)),
cbind(m='m2',accuracy(m2)),
cbind(m='m3',accuracy(m3))) %>%
data.frame() %>%
select(m,RMSE) %>%
arrange(RMSE)
qqnorm(m3$residuals, pch=21, col="black",
bg="gold", main="Q-Q Plot of Residuals")
qqline(m3$residuals, col="royalblue", lwd=2)
Box.test(m3$residuals, type="Ljung-Box")
m3.pred <- forecast(m3,4)
plot(m3.pred)
plot(m3.pred,
col="darkgreen",
lwd=2,
flty=1,flwd=2,
fcol="royalblue", shadecols=c("mistyrose","salmon"),
xlab="y",
ylab="x",
main="Forecast"
)
library(dplyr)
set.seed(3312)
round(rnorm(40, mean = 10, sd = 4)) %>%
data.frame(errors=.) %>%
mutate(lot=row_number()) %>%
mutate(errors=ifelse(errors<0,0,errors)) %>%
select(2,1) -> x
x
summary(x)
plot(x)
sum(x$errors)
pbar <- sum(x$errors) / (200 * 40)
sigma <- sqrt(pbar *(1-pbar) / 200)
CL <- pbar
UCL <- pbar+3*sigma
LCL <- pbar-3*sigma
library(ggplot2)
x %>% mutate(r=(errors/200)) %>%
ggplot(aes(lot, r)) +
geom_line(size=1, color = 'blue') +
geom_line(aes(y = UCL),col = 'red') +
geom_line(aes(y = CL),col = 'red') +
geom_line(aes(y = LCL),col = 'red')
install.packages("fastDummies")
library(fastDummies)
data <- PlantGrowth
data <- dummy_cols(data,
select_columns = "group")
X = pd.get_dummies(Data = X, columns=['group']
n <- 100
x <- 80
p <- x/n
prop.test(x,n,p,alternative="two.sided")
댓글