最後我寫了自己的滯後實現。這是hacky,並不漂亮,但速度更快。它可以在4分鐘內在我的蹩腳筆記本電腦上處理1000行。
# lags is a data.frame, eg:
# var amount
# y 1
# y 2
addLags <- function(dataset, lags) {
N <- nrow(dataset)
print(lags)
if(nrow(lags) > 0) {
print(lags)
for(j in 1:nrow(lags)) {
sourcename <- as.character(lags[j,"var"])
k <- lags[j,"amount"]
cat("k",k,"sourcename",sourcename,"\n")
lagcolname <- sprintf("%s_%d",sourcename,k)
dataset[,lagcolname] <- c(rep(0,k), dataset[1:(N-k),sourcename])
}
}
dataset
}
lmLagged <- function(formula, train, lags) {
# get largest lag, and skip that
N <- nrow(train)
skip <- 0
for(j in 1:nrow(lags)) {
k <- lags[j,"amount"]
skip <- max(k,skip)
}
print(train)
train <- addLags(train, lags)
print(train)
lm(formula, train[(skip+1):N,])
}
# pass in training data, test data,
# it will step through one by one
# need to give dependent var name
# lags is a data.frame, eg:
# var amount
# y 1
# y 2
predictLagged <- function(model, train, test, dependentvarname, lags) {
Ntrain <- nrow(train)
Ntest <- nrow(test)
test[,dependentvarname] <- NA
testtraindata <- rbind(train, test)
testtraindata <- addLags(testtraindata, lags)
for(i in 1:Ntest) {
thistestdata <- testtraindata[Ntrain + i,]
result <- predict(model,newdata=thistestdata)
for(j in 1:nrow(lags)) {
sourcename <- lags[j,"var"]
k <- lags[j,"amount"]
lagcolname <- sprintf("%s_%d",sourcename,k)
testtraindata[Ntrain + i + k,lagcolname] <- result
}
testtraindata[Ntrain+i,dependentvarname] <- result
}
return(testtraindata[(Ntrain+1):(Ntrain + Ntest),dependentvarname])
}
library("RUnit")
# size of training data
N <- 6
predictN <- 50
# create training data, which we can get exact fit on
set.seed(1)
x = sample(100, N)
traindata <- numeric()
traindata[1] <- 1 + 1.1 * x[1]
traindata[2] <- 2 + 1.1 * x[2]
for(i in 3:N) {
traindata[i] <- 0.5 + 0.3 * traindata[i-2] - 0.8 * traindata[i-1] + 1.1 * x[i]
}
train <- data.frame(x = x, y = traindata, foo = 1)
#train$x <- NULL
# create testing data, bunch of NAs
test <- data.frame(x = sample(100,predictN), y = rep(NA,predictN), foo = 1)
# specify which lags we need to handle
# one row per lag, with name of variable we are lagging, and the distance
# we can then use these in the formula, eg y_1, and y_2
# are y lagged by 1 and 2 respectively
# It's hacky but it kind of works...
lags <- data.frame(var = c("y","y"), amount = c(1,2))
# fit a model
model <- lmLagged( y ~ x + y_1 + y_2, train, lags)
# look at the model, it's a perfect fit. Nice!
print(model)
print(system.time(test <- predictLagged(model, train, test, "y", lags)))
#checkEqualsNumeric(69.10228, test[56-6], tolerance = 0.0001)
#checkEquals(2972.159, test$y[106-6])
print(test)
# nice plot
plot(test, type='l')
輸出:
> source("test/test.regressionlagged.r",echo=F)
Call:
lm(formula = formula, data = train[(skip + 1):N, ])
Coefficients:
(Intercept) x y_1 y_2
0.5 1.1 -0.8 0.3
user system elapsed
0.204 0.000 0.204
[1] -19.108620 131.494916 -42.228519 80.331290 -54.433588 86.846257
[7] -13.807082 77.199543 12.698241 64.101270 56.428457 72.487616
[13] -3.161555 99.575529 8.991110 44.079771 28.433517 3.077118
[19] 30.768361 12.008447 2.323751 36.343533 67.822299 -13.154779
[25] 72.070513 -11.602844 115.003429 -79.583596 164.667906 -102.309403
[31] 193.347894 -176.071136 254.361277 -225.010363 349.216673 -299.076448
[37] 400.626160 -371.223862 453.966938 -420.140709 560.802649 -542.284332
[43] 701.568260 -679.439907 839.222404 -773.509895 897.474637 -935.232679
[49] 1022.328534 -991.232631
有在那些91行代碼約12小時的工作。好吧,我承認我玩過植物和殭屍了一下。所以,10個小時。外加午餐和晚餐。無論如何,仍然有相當多的工作。
如果我們將predictN更改爲1000,我會從system.time
調用中獲得大約4.1秒。
我認爲這是速度更快,因爲:
- 我們沒有使用時間序列;我懷疑,加快東西
- 我們不使用動態LM庫,只是正常流明;我想這是稍快
- 我們只能傳遞數據的單一一行到使用DYN $流明或dynmlm預測每個預測,我認爲這是顯著速度更快,例如,如果一個人有30滯後,人們需要通過31行中的數據的預測成AFAIK
- 少了很多data.frame /矩陣複製,因爲我們只需要更新在每次迭代
編輯就地滯後值:糾正的小buggette其中predictLagged返回的多列數據幀而不是數字矢量 Edit2:更正了更少的小錯誤,您無法添加多個變量。還調整了評論和代碼的滯後時間,並將滯後結構更改爲「var」和「amount」,以取代「名稱」和「滯後」。此外,更新測試代碼以添加第二個變量。
編輯:這個版本有很多bug,我知道,因爲我已經對它進行了單元測試並修復了它們,但是複製和粘貼非常耗時,所以我會更新這篇文章幾天後,一旦我的截止日期結束。
爲什麼不看'predict.dyn'並查看代碼的作用?我懷疑它比你想象的更復雜:-) –
它顯然包裝了「預測」。 'NextMethod(「預測」)' –
但是直接調用某些手動滯後數據直接預測,而不使用時間序列,速度快6倍。 –