假设我们有一个包含多个data.csv文件的文件夹,每个文件包含相同数量的变量,但每个变量来自不同的时间。 在R中是否有一种方法可以同时导入它们而不是逐个导入?
我的问题是我有大约2000个数据文件要导入,并且只能通过使用代码单独导入它们:
read.delim(file="filename", header=TRUE, sep="\t")
效率不高。
假设我们有一个包含多个data.csv文件的文件夹,每个文件包含相同数量的变量,但每个变量来自不同的时间。 在R中是否有一种方法可以同时导入它们而不是逐个导入?
我的问题是我有大约2000个数据文件要导入,并且只能通过使用代码单独导入它们:
read.delim(file="filename", header=TRUE, sep="\t")
效率不高。
当前回答
使用purrr并将文件id作为列:
library(tidyverse)
p <- "my/directory"
files <- list.files(p, pattern="csv", full.names=TRUE) %>%
set_names()
merged <- files %>% map_dfr(read_csv, .id="filename")
如果没有set_names(), .id=将使用整数指示符,而不是实际的文件名。
如果你想要一个短的文件名而不是完整的路径:
merged <- merged %>% mutate(filename=basename(filename))
其他回答
基于dnlbrk的注释,对于大文件,assign可以比list2env快得多。
library(readr)
library(stringr)
List_of_file_paths <- list.files(path ="C:/Users/Anon/Documents/Folder_with_csv_files/", pattern = ".csv", all.files = TRUE, full.names = TRUE)
通过将full.names参数设置为true,您将在文件列表中获得每个文件的完整路径作为单独的字符串,例如,List_of_file_paths[1]将类似于"C:/Users/Anon/Documents/Folder_with_csv_files/ fil1 .csv"
for(f in 1:length(List_of_filepaths)) {
file_name <- str_sub(string = List_of_filepaths[f], start = 46, end = -5)
file_df <- read_csv(List_of_filepaths[f])
assign( x = file_name, value = file_df, envir = .GlobalEnv)
}
你可以利用这些数据。table package的fread或base R read.csv而不是read_csv。file_name步骤允许您整理名称,以便每个数据帧不保留文件的完整路径作为其名称。 在将数据表传输到全局环境之前,您可以扩展循环对数据表做进一步的处理,例如:
for(f in 1:length(List_of_filepaths)) {
file_name <- str_sub(string = List_of_filepaths[f], start = 46, end = -5)
file_df <- read_csv(List_of_filepaths[f])
file_df <- file_df[,1:3] #if you only need the first three columns
assign( x = file_name, value = file_df, envir = .GlobalEnv)
}
这是我读取多个文件并将它们组合成1个数据帧的具体示例:
path<- file.path("C:/folder/subfolder")
files <- list.files(path=path, pattern="/*.csv",full.names = T)
library(data.table)
data = do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
使用purrr并将文件id作为列:
library(tidyverse)
p <- "my/directory"
files <- list.files(p, pattern="csv", full.names=TRUE) %>%
set_names()
merged <- files %>% map_dfr(read_csv, .id="filename")
如果没有set_names(), .id=将使用整数指示符,而不是实际的文件名。
如果你想要一个短的文件名而不是完整的路径:
merged <- merged %>% mutate(filename=basename(filename))
一个快速而简洁的解决方案: (比Base R的read.csv快两倍多)
tbl <-
list.files(pattern = "*.csv") %>%
map_df(~read_csv(.))
和数据。Table的fread()甚至可以再次将加载时间减少一半。(1/4底R乘以)
library(data.table)
tbl_fread <-
list.files(pattern = "*.csv") %>%
map_df(~fread(.))
stringsAsFactors = FALSE参数保持数据帧因子不受影响(正如marbel指出的那样,这是fread的默认设置)。
如果类型转换不够厚颜无耻,可以使用col_types参数强制所有列都是字符。
tbl <-
list.files(pattern = "*.csv") %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
如果您想要深入子目录来构造最终要绑定的文件列表,那么请确保包含路径名,并在列表中以文件的全名注册这些文件。这将允许绑定工作在当前目录之外进行。(将完整的路径名视为护照,允许跨目录“边界”返回。)
tbl <-
list.files(path = "./subdirectory/",
pattern = "*.csv",
full.names = T) %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
正如Hadley在这里描述的(大约在中间):
Map_df (x, f)实际上与do相同。调用("rbind", lapply(x, f))....
额外功能-添加文件名的记录每尼克功能请求在评论下面: *为每条记录添加原始文件名。
代码解释:创建一个函数,在初始读取表期间将文件名附加到每个记录。然后使用该函数而不是简单的read_csv()函数。
read_plus <- function(flnm) {
read_csv(flnm) %>%
mutate(filename = flnm)
}
tbl_with_sources <-
list.files(pattern = "*.csv",
full.names = T) %>%
map_df(~read_plus(.))
(类型强制转换和子目录处理方法也可以在read_plus()函数中以与上面建议的第二个和第三个变体相同的方式进行处理。)
### Benchmark Code & Results
library(tidyverse)
library(data.table)
library(microbenchmark)
### Base R Approaches
#### Instead of a dataframe, this approach creates a list of lists
#### removed from analysis as this alone doubled analysis time reqd
# lapply_read.delim <- function(path, pattern = "*.csv") {
# temp = list.files(path, pattern, full.names = TRUE)
# myfiles = lapply(temp, read.delim)
# }
#### `read.csv()`
do.call_rbind_read.csv <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
}
map_df_read.csv <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~read.csv(., stringsAsFactors = FALSE))
}
### *dplyr()*
#### `read_csv()`
lapply_read_csv_bind_rows <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
lapply(files, read_csv) %>% bind_rows()
}
map_df_read_csv <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
}
### *data.table* / *purrr* hybrid
map_df_fread <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~fread(.))
}
### *data.table*
rbindlist_fread <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
rbindlist(lapply(files, function(x) fread(x)))
}
do.call_rbind_fread <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
do.call(rbind, lapply(files, function(x) fread(x, stringsAsFactors = FALSE)))
}
read_results <- function(dir_size){
microbenchmark(
# lapply_read.delim = lapply_read.delim(dir_size), # too slow to include in benchmarks
do.call_rbind_read.csv = do.call_rbind_read.csv(dir_size),
map_df_read.csv = map_df_read.csv(dir_size),
lapply_read_csv_bind_rows = lapply_read_csv_bind_rows(dir_size),
map_df_read_csv = map_df_read_csv(dir_size),
rbindlist_fread = rbindlist_fread(dir_size),
do.call_rbind_fread = do.call_rbind_fread(dir_size),
map_df_fread = map_df_fread(dir_size),
times = 10L)
}
read_results_lrg_mid_mid <- read_results('./testFolder/500MB_12.5MB_40files')
print(read_results_lrg_mid_mid, digits = 3)
read_results_sml_mic_mny <- read_results('./testFolder/5MB_5KB_1000files/')
read_results_sml_tny_mod <- read_results('./testFolder/5MB_50KB_100files/')
read_results_sml_sml_few <- read_results('./testFolder/5MB_500KB_10files/')
read_results_med_sml_mny <- read_results('./testFolder/50MB_5OKB_1000files')
read_results_med_sml_mod <- read_results('./testFolder/50MB_5OOKB_100files')
read_results_med_med_few <- read_results('./testFolder/50MB_5MB_10files')
read_results_lrg_sml_mny <- read_results('./testFolder/500MB_500KB_1000files')
read_results_lrg_med_mod <- read_results('./testFolder/500MB_5MB_100files')
read_results_lrg_lrg_few <- read_results('./testFolder/500MB_50MB_10files')
read_results_xlg_lrg_mod <- read_results('./testFolder/5000MB_50MB_100files')
print(read_results_sml_mic_mny, digits = 3)
print(read_results_sml_tny_mod, digits = 3)
print(read_results_sml_sml_few, digits = 3)
print(read_results_med_sml_mny, digits = 3)
print(read_results_med_sml_mod, digits = 3)
print(read_results_med_med_few, digits = 3)
print(read_results_lrg_sml_mny, digits = 3)
print(read_results_lrg_med_mod, digits = 3)
print(read_results_lrg_lrg_few, digits = 3)
print(read_results_xlg_lrg_mod, digits = 3)
# display boxplot of my typical use case results & basic machine max load
par(oma = c(0,0,0,0)) # remove overall margins if present
par(mfcol = c(1,1)) # remove grid if present
par(mar = c(12,5,1,1) + 0.1) # to display just a single boxplot with its complete labels
boxplot(read_results_lrg_mid_mid, las = 2, xlab = "", ylab = "Duration (seconds)", main = "40 files @ 12.5MB (500MB)")
boxplot(read_results_xlg_lrg_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 50MB (5GB)")
# generate 3x3 grid boxplots
par(oma = c(12,1,1,1)) # margins for the whole 3 x 3 grid plot
par(mfcol = c(3,3)) # create grid (filling down each column)
par(mar = c(1,4,2,1)) # margins for the individual plots in 3 x 3 grid
boxplot(read_results_sml_mic_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 5KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_tny_mod, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "100 files @ 50KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_sml_few, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "10 files @ 500KB (5MB)",)
boxplot(read_results_med_sml_mny, las = 2, xlab = "", ylab = "Duration (microseconds) ", main = "1000 files @ 50KB (50MB)", xaxt = 'n')
boxplot(read_results_med_sml_mod, las = 2, xlab = "", ylab = "Duration (microseconds)", main = "100 files @ 500KB (50MB)", xaxt = 'n')
boxplot(read_results_med_med_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 5MB (50MB)")
boxplot(read_results_lrg_sml_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 500KB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_med_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 5MB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_lrg_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 50MB (500MB)")
中等用例
更大的用例
用例的多样性
行:文件计数(1000,100,10) 列:最终数据帧大小(5MB, 50MB, 500MB) (点击图片查看原始尺寸)
在最小的用例中,使用purrr和dplyr的C库带来的开销超过了执行大规模处理任务时所观察到的性能收益,因此基本R结果更好。
如果您想运行自己的测试,您可能会发现这个bash脚本很有帮助。
for ((i=1; i<=$2; i++)); do
cp "$1" "${1:0:8}_${i}.csv";
done
bash what_you_name_this_script.sh " filename_you_want_copies " 100将创建100份按顺序编号的文件(在文件名的前8个字符和下划线之后)。
归因和欣赏
特别感谢:
Tyler Rinker和Akrun演示了微基准测试。 Jake Kaupp向我介绍了map_df()。 David McLaughlin提供了关于改进可视化和讨论/确认在小文件、小数据框架分析结果中观察到的性能反转的有用反馈。 Marbel指出了fread()的默认行为。(我需要研究一下数据表。)
只要你的电脑有多个核,下面的代码就能让你以最快的速度处理大数据:
if (!require("pacman")) install.packages("pacman")
pacman::p_load(doParallel, data.table, stringr)
# get the file name
dir() %>% str_subset("\\.csv$") -> fn
# use parallel setting
(cl <- detectCores() %>%
makeCluster()) %>%
registerDoParallel()
# read and bind all files together
system.time({
big_df <- foreach(
i = fn,
.packages = "data.table"
) %dopar%
{
fread(i, colClasses = "character")
} %>%
rbindlist(fill = TRUE)
})
# end of parallel work
stopImplicitCluster(cl)
更新于20/04/16: 当我发现一个可用于并行计算的新包时,使用以下代码提供了一个替代解决方案。
if (!require("pacman")) install.packages("pacman")
pacman::p_load(future.apply, data.table, stringr)
# get the file name
dir() %>% str_subset("\\.csv$") -> fn
plan(multiprocess)
future_lapply(fn,fread,colClasses = "character") %>%
rbindlist(fill = TRUE) -> res
# res is the merged data.table