我想這是一個dplyr
比plyr
更多的問題。爲了提高速度,我在我寫的一些代碼中使用了data.table
。期間的中間步驟我有一些基因組學與數據的表〜32,000行:我可以使這個dplyr + data.table任務更快嗎?
> bedbin.dt
Source: local data table [32,138 x 4]
Groups: chr
bin start site chr
1 2 3500000 ssCTCF 1
2 3 4000000 ssCTCF+Cohesin 1
3 3 4000000 ssCTCF 1
4 4 4500000 ucCTCF 1
5 4 4500000 ssCTCF+Cohesin 1
6 4 4500000 ssCTCF+Cohesin 1
7 4 4500000 ssCTCF+Cohesin 1
8 4 4500000 ssCTCF 1
9 4 4500000 ssCTCF 1
10 5 5000000 ssCTCF 1
.. ... ... ... ...
EDIT
或者第一百行數據的像這樣(Thx至裏卡多薩波塔的說明)
bedbin.dt <- data.table(structure(list(bin = c("2", "3", "3", "4", "4", "4", "4", "4","4", "5", "5", "7", "7", "7", "7", "7", "7", "8", "8", "9", "9","11", "12", "14", "14", "14", "14", "14", "14", "14", "14", "15","15", "15", "15", "15", "15", "15", "15", "15", "15", "16", "16","17", "17", "17", "18", "20", "20", "20", "21", "21", "21", "21","21", "21", "21", "21", "21", "21", "22", "22", "5057", "5057","5057", "5057", "5059", "5059", "5059", "5059", "5059", "5060","5060", "5060", "5060", "5060", "5060", "5061", "5063", "5063","5064", "5064", "5064", "5064", "5064", "5064", "5064", "5064","5064", "5064", "5064", "5064", "5064", "5064", "5064", "5064","5064", "5064", "5064", "5064"), start = c(3500000L, 4000000L,4000000L, 4500000L, 4500000L, 4500000L, 4500000L, 4500000L, 4500000L,5000000L, 5000000L, 6000000L, 6000000L, 6000000L, 6000000L, 6000000L,6000000L, 6500000L, 6500000L, 7000000L, 7000000L, 8000000L, 8500000L,9500000L, 9500000L, 9500000L, 9500000L, 9500000L, 9500000L, 9500000L,9500000L, 10000000L, 10000000L, 10000000L, 10000000L, 10000000L,10000000L, 10000000L, 10000000L, 10000000L, 10000000L, 10500000L,10500000L, 11000000L, 11000000L, 11000000L, 11500000L, 12500000L,12500000L, 12500000L, 13000000L, 13000000L, 13000000L, 13000000L,13000000L, 13000000L, 13000000L, 13000000L, 13000000L, 13000000L,13500000L, 13500000L, 162500000L, 162500000L, 162500000L, 162500000L,163500000L, 163500000L, 163500000L, 163500000L, 163500000L, 164000000L,164000000L, 164000000L, 164000000L, 164000000L, 164000000L, 164500000L,165500000L, 165500000L, 166000000L, 166000000L, 166000000L, 166000000L,166000000L, 166000000L, 166000000L, 166000000L, 166000000L, 166000000L,166000000L, 166000000L, 166000000L, 166000000L, 166000000L, 166000000L,166000000L, 166000000L, 166000000L, 166000000L), site = c("ssCTCF","ssCTCF+Cohesin", "ssCTCF", "ucCTCF", "ssCTCF+Cohesin", "ssCTCF+Cohesin","ssCTCF+Cohesin", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF+Cohesin","ssCTCF", "ssCTCF+Cohesin", "ssCTCF+Cohesin", "ssCTCF", "ucCTCF","ucCTCF", "ucCTCF", "ssCTCF", "ssCTCF", "ssCTCF+Cohesin", "ssCTCF","ssCTCF+Cohesin", "ssCTCF", "ucCTCF", "ucCTCF", "ssCTCF", "ssCTCF+Cohesin","ssCTCF", "ssCTCF+Cohesin", "ssCTCF+Cohesin", "ssCTCF+Cohesin","ssCTCF+Cohesin", "ssCTCF", "ucCTCF", "ssCTCF+Cohesin", "ssCTCF","ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF","ssCTCF", "ssCTCF", "ucCTCF", "ucCTCF", "ucCTCF", "ssCTCF", "ssCTCF","ssCTCF", "ssCTCF", "ssCTCF+Cohesin", "ssCTCF", "ssCTCF", "ssCTCF","ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF+Cohesin", "ucCTCF", "ssCTCF","ssCTCF+Cohesin", "ssCTCF+Cohesin", "ssCTCF", "ucCTCF", "ssCTCF","ssCTCF+Cohesin", "ssCTCF", "ssCTCF", "ucCTCF", "ucCTCF", "ssCTCF","ucCTCF", "ssCTCF", "ucCTCF", "ucCTCF", "ssCTCF", "ssCTCF", "ucCTCF","ucCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ucCTCF", "ucCTCF", "ssCTCF","ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ssCTCF", "ucCTCF","ucCTCF", "ssCTCF+Cohesin", "ucCTCF", "ucCTCF", "ucCTCF"), chr = structure(c(1L,1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 20L, 20L,20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L,20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L,20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L, 20L), .Label = c("1","10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "2","3", "4", "5", "6", "7", "8", "9", "X"), class = "factor")), .Names = c("bin","start", "site", "chr"), sorted = "chr", class = c("data.table","data.frame"), row.names = c(NA, -100L)), key='chr')
編輯完
接下來我想CRE吃每行與其他所有可能的組合(按chr分組)。所以我想最好的,這將形成一個查詢(加入)到其他一些數據(最簡單的)預先計算:
# grouped by chr column
bedbin.dt = group_by(bedbin.dt, chr)
# an outer like function
outerFun= function(dt)
{
unique(data.table(
x=dt[rep(1:nrow(dt),each =nrow(dt)),],
y=dt[rep.int(1:nrow(dt),times=nrow(dt)),]))
}
> system.time((outer.bedbin.dt = do(bedbin.dt, outerFun1)))
user system elapsed
90.607 13.993 105.536
在我看來,這是sloooowwww ......雖然相對比使用data.frame
,或基像by()
或lapply()
這樣的功能要快得多。然而,這實際上是一個我正在測試它的小型數據集。
所以...我想知道如果有人有更快的版本outerFun的任何想法?有沒有比rep()
或rep.int()
更快的方法?
嗨,你能不能請張貼重複的例子? - 你可以使用'重現()'。說明在這裏:http://bit.ly/SORepro - [如何使一個偉大的R可重現的例子](http://bit.ly/SORepro) –
@RicardoSaporta,嗨我不知道如何發佈一個完全可重現的例子。讓我想一想。也許我可以讓代碼先創建它?請稍等片刻...... –
看看我發佈的鏈接。你可以簡單地使用'reproduce(bedbin.dt,rows = 100,cols = c(「bin」,「start」,..etc))' –