假设我们有一个包含多个data.csv文件的文件夹,每个文件包含相同数量的变量,但每个变量来自不同的时间。 在R中是否有一种方法可以同时导入它们而不是逐个导入?
我的问题是我有大约2000个数据文件要导入,并且只能通过使用代码单独导入它们:
read.delim(file="filename", header=TRUE, sep="\t")
效率不高。
假设我们有一个包含多个data.csv文件的文件夹,每个文件包含相同数量的变量,但每个变量来自不同的时间。 在R中是否有一种方法可以同时导入它们而不是逐个导入?
我的问题是我有大约2000个数据文件要导入,并且只能通过使用代码单独导入它们:
read.delim(file="filename", header=TRUE, sep="\t")
效率不高。
当前回答
下面是一些使用R base将.csv文件转换为data.frame的选项,以及一些可用来读取R文件的包。
这比下面的选项要慢。
# Get the files names
files = list.files(pattern="*.csv")
# First apply read.csv, then rbind
myfiles = do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
编辑:-更多的额外选择使用数据。表和读取器
一个fread()版本,它是数据的函数。表方案。这是目前为止R中最快的选项。
library(data.table)
DT = do.call(rbind, lapply(files, fread))
# The same using `rbindlist`
DT = rbindlist(lapply(files, fread))
使用readr,这是另一个读取csv文件的包。它比fread慢,比base R快,但功能不同。
library(readr)
library(dplyr)
tbl = lapply(files, read_csv) %>% bind_rows()
其他回答
一个快速而简洁的解决方案: (比Base R的read.csv快两倍多)
tbl <-
list.files(pattern = "*.csv") %>%
map_df(~read_csv(.))
和数据。Table的fread()甚至可以再次将加载时间减少一半。(1/4底R乘以)
library(data.table)
tbl_fread <-
list.files(pattern = "*.csv") %>%
map_df(~fread(.))
stringsAsFactors = FALSE参数保持数据帧因子不受影响(正如marbel指出的那样,这是fread的默认设置)。
如果类型转换不够厚颜无耻,可以使用col_types参数强制所有列都是字符。
tbl <-
list.files(pattern = "*.csv") %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
如果您想要深入子目录来构造最终要绑定的文件列表,那么请确保包含路径名,并在列表中以文件的全名注册这些文件。这将允许绑定工作在当前目录之外进行。(将完整的路径名视为护照,允许跨目录“边界”返回。)
tbl <-
list.files(path = "./subdirectory/",
pattern = "*.csv",
full.names = T) %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
正如Hadley在这里描述的(大约在中间):
Map_df (x, f)实际上与do相同。调用("rbind", lapply(x, f))....
额外功能-添加文件名的记录每尼克功能请求在评论下面: *为每条记录添加原始文件名。
代码解释:创建一个函数,在初始读取表期间将文件名附加到每个记录。然后使用该函数而不是简单的read_csv()函数。
read_plus <- function(flnm) {
read_csv(flnm) %>%
mutate(filename = flnm)
}
tbl_with_sources <-
list.files(pattern = "*.csv",
full.names = T) %>%
map_df(~read_plus(.))
(类型强制转换和子目录处理方法也可以在read_plus()函数中以与上面建议的第二个和第三个变体相同的方式进行处理。)
### Benchmark Code & Results
library(tidyverse)
library(data.table)
library(microbenchmark)
### Base R Approaches
#### Instead of a dataframe, this approach creates a list of lists
#### removed from analysis as this alone doubled analysis time reqd
# lapply_read.delim <- function(path, pattern = "*.csv") {
# temp = list.files(path, pattern, full.names = TRUE)
# myfiles = lapply(temp, read.delim)
# }
#### `read.csv()`
do.call_rbind_read.csv <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
}
map_df_read.csv <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~read.csv(., stringsAsFactors = FALSE))
}
### *dplyr()*
#### `read_csv()`
lapply_read_csv_bind_rows <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
lapply(files, read_csv) %>% bind_rows()
}
map_df_read_csv <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~read_csv(., col_types = cols(.default = "c")))
}
### *data.table* / *purrr* hybrid
map_df_fread <- function(path, pattern = "*.csv") {
list.files(path, pattern, full.names = TRUE) %>%
map_df(~fread(.))
}
### *data.table*
rbindlist_fread <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
rbindlist(lapply(files, function(x) fread(x)))
}
do.call_rbind_fread <- function(path, pattern = "*.csv") {
files = list.files(path, pattern, full.names = TRUE)
do.call(rbind, lapply(files, function(x) fread(x, stringsAsFactors = FALSE)))
}
read_results <- function(dir_size){
microbenchmark(
# lapply_read.delim = lapply_read.delim(dir_size), # too slow to include in benchmarks
do.call_rbind_read.csv = do.call_rbind_read.csv(dir_size),
map_df_read.csv = map_df_read.csv(dir_size),
lapply_read_csv_bind_rows = lapply_read_csv_bind_rows(dir_size),
map_df_read_csv = map_df_read_csv(dir_size),
rbindlist_fread = rbindlist_fread(dir_size),
do.call_rbind_fread = do.call_rbind_fread(dir_size),
map_df_fread = map_df_fread(dir_size),
times = 10L)
}
read_results_lrg_mid_mid <- read_results('./testFolder/500MB_12.5MB_40files')
print(read_results_lrg_mid_mid, digits = 3)
read_results_sml_mic_mny <- read_results('./testFolder/5MB_5KB_1000files/')
read_results_sml_tny_mod <- read_results('./testFolder/5MB_50KB_100files/')
read_results_sml_sml_few <- read_results('./testFolder/5MB_500KB_10files/')
read_results_med_sml_mny <- read_results('./testFolder/50MB_5OKB_1000files')
read_results_med_sml_mod <- read_results('./testFolder/50MB_5OOKB_100files')
read_results_med_med_few <- read_results('./testFolder/50MB_5MB_10files')
read_results_lrg_sml_mny <- read_results('./testFolder/500MB_500KB_1000files')
read_results_lrg_med_mod <- read_results('./testFolder/500MB_5MB_100files')
read_results_lrg_lrg_few <- read_results('./testFolder/500MB_50MB_10files')
read_results_xlg_lrg_mod <- read_results('./testFolder/5000MB_50MB_100files')
print(read_results_sml_mic_mny, digits = 3)
print(read_results_sml_tny_mod, digits = 3)
print(read_results_sml_sml_few, digits = 3)
print(read_results_med_sml_mny, digits = 3)
print(read_results_med_sml_mod, digits = 3)
print(read_results_med_med_few, digits = 3)
print(read_results_lrg_sml_mny, digits = 3)
print(read_results_lrg_med_mod, digits = 3)
print(read_results_lrg_lrg_few, digits = 3)
print(read_results_xlg_lrg_mod, digits = 3)
# display boxplot of my typical use case results & basic machine max load
par(oma = c(0,0,0,0)) # remove overall margins if present
par(mfcol = c(1,1)) # remove grid if present
par(mar = c(12,5,1,1) + 0.1) # to display just a single boxplot with its complete labels
boxplot(read_results_lrg_mid_mid, las = 2, xlab = "", ylab = "Duration (seconds)", main = "40 files @ 12.5MB (500MB)")
boxplot(read_results_xlg_lrg_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 50MB (5GB)")
# generate 3x3 grid boxplots
par(oma = c(12,1,1,1)) # margins for the whole 3 x 3 grid plot
par(mfcol = c(3,3)) # create grid (filling down each column)
par(mar = c(1,4,2,1)) # margins for the individual plots in 3 x 3 grid
boxplot(read_results_sml_mic_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 5KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_tny_mod, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "100 files @ 50KB (5MB)", xaxt = 'n')
boxplot(read_results_sml_sml_few, las = 2, xlab = "", ylab = "Duration (milliseconds)", main = "10 files @ 500KB (5MB)",)
boxplot(read_results_med_sml_mny, las = 2, xlab = "", ylab = "Duration (microseconds) ", main = "1000 files @ 50KB (50MB)", xaxt = 'n')
boxplot(read_results_med_sml_mod, las = 2, xlab = "", ylab = "Duration (microseconds)", main = "100 files @ 500KB (50MB)", xaxt = 'n')
boxplot(read_results_med_med_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 5MB (50MB)")
boxplot(read_results_lrg_sml_mny, las = 2, xlab = "", ylab = "Duration (seconds)", main = "1000 files @ 500KB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_med_mod, las = 2, xlab = "", ylab = "Duration (seconds)", main = "100 files @ 5MB (500MB)", xaxt = 'n')
boxplot(read_results_lrg_lrg_few, las = 2, xlab = "", ylab = "Duration (seconds)", main = "10 files @ 50MB (500MB)")
中等用例
更大的用例
用例的多样性
行:文件计数(1000,100,10) 列:最终数据帧大小(5MB, 50MB, 500MB) (点击图片查看原始尺寸)
在最小的用例中,使用purrr和dplyr的C库带来的开销超过了执行大规模处理任务时所观察到的性能收益,因此基本R结果更好。
如果您想运行自己的测试,您可能会发现这个bash脚本很有帮助。
for ((i=1; i<=$2; i++)); do
cp "$1" "${1:0:8}_${i}.csv";
done
bash what_you_name_this_script.sh " filename_you_want_copies " 100将创建100份按顺序编号的文件(在文件名的前8个字符和下划线之后)。
归因和欣赏
特别感谢:
Tyler Rinker和Akrun演示了微基准测试。 Jake Kaupp向我介绍了map_df()。 David McLaughlin提供了关于改进可视化和讨论/确认在小文件、小数据框架分析结果中观察到的性能反转的有用反馈。 Marbel指出了fread()的默认行为。(我需要研究一下数据表。)
这是我读取多个文件并将它们组合成1个数据帧的具体示例:
path<- file.path("C:/folder/subfolder")
files <- list.files(path=path, pattern="/*.csv",full.names = T)
library(data.table)
data = do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
有人要求我将此功能添加到stackoverflow R包中。鉴于它是一个tinyverse包(不能依赖于第三方包),以下是我想到的:
#' Bulk import data files
#'
#' Read in each file at a path and then unnest them. Defaults to csv format.
#'
#' @param path a character vector of full path names
#' @param pattern an optional \link[=regex]{regular expression}. Only file names which match the regular expression will be returned.
#' @param reader a function that can read data from a file name.
#' @param ... optional arguments to pass to the reader function (eg \code{stringsAsFactors}).
#' @param reducer a function to unnest the individual data files. Use I to retain the nested structure.
#' @param recursive logical. Should the listing recurse into directories?
#'
#' @author Neal Fultz
#' @references \url{https://stackoverflow.com/questions/11433432/how-to-import-multiple-csv-files-at-once}
#'
#' @importFrom utils read.csv
#' @export
read.directory <- function(path='.', pattern=NULL, reader=read.csv, ...,
reducer=function(dfs) do.call(rbind.data.frame, dfs), recursive=FALSE) {
files <- list.files(path, pattern, full.names = TRUE, recursive = recursive)
reducer(lapply(files, reader, ...))
}
通过参数化读卡器和减速器功能,人们可以使用数据。table或dplyr(如果他们选择的话),或者只使用适用于较小数据集的基本R函数。
我喜欢使用list.files(), lapply()和list2env()(或fs::dir_ls(), purrr::map()和list2env())的方法。这看起来既简单又灵活。
或者,您可以尝试小包{tor} (to-R):默认情况下,它将文件从工作目录导入到列表(list_*()变量)或全局环境(load_*()变量)。
例如,这里我使用tor::list_csv()将我工作目录中的所有.csv文件读入一个列表:
library(tor)
dir()
#> [1] "_pkgdown.yml" "cran-comments.md" "csv1.csv"
#> [4] "csv2.csv" "datasets" "DESCRIPTION"
#> [7] "docs" "inst" "LICENSE.md"
#> [10] "man" "NAMESPACE" "NEWS.md"
#> [13] "R" "README.md" "README.Rmd"
#> [16] "tests" "tmp.R" "tor.Rproj"
list_csv()
#> $csv1
#> x
#> 1 1
#> 2 2
#>
#> $csv2
#> y
#> 1 a
#> 2 b
现在我用tor::load_csv()将这些文件加载到我的全局环境中:
# The working directory contains .csv files
dir()
#> [1] "_pkgdown.yml" "cran-comments.md" "CRAN-RELEASE"
#> [4] "csv1.csv" "csv2.csv" "datasets"
#> [7] "DESCRIPTION" "docs" "inst"
#> [10] "LICENSE.md" "man" "NAMESPACE"
#> [13] "NEWS.md" "R" "README.md"
#> [16] "README.Rmd" "tests" "tmp.R"
#> [19] "tor.Rproj"
load_csv()
# Each file is now available as a dataframe in the global environment
csv1
#> x
#> 1 1
#> 2 2
csv2
#> y
#> 1 a
#> 2 b
如果您需要读取特定的文件,您可以使用regexp, ignore匹配它们的文件路径。大小写颠倒。
为了获得更大的灵活性,请使用list_any()。它允许您通过参数.f提供reader函数。
(path_csv <- tor_example("csv"))
#> [1] "C:/Users/LeporeM/Documents/R/R-3.5.2/library/tor/extdata/csv"
dir(path_csv)
#> [1] "file1.csv" "file2.csv"
list_any(path_csv, read.csv)
#> $file1
#> x
#> 1 1
#> 2 2
#>
#> $file2
#> y
#> 1 a
#> 2 b
通过…传递附加参数或者在函数内部。
path_csv %>%
list_any(readr::read_csv, skip = 1)
#> Parsed with column specification:
#> cols(
#> `1` = col_double()
#> )
#> Parsed with column specification:
#> cols(
#> a = col_character()
#> )
#> $file1
#> # A tibble: 1 x 1
#> `1`
#> <dbl>
#> 1 2
#>
#> $file2
#> # A tibble: 1 x 1
#> a
#> <chr>
#> 1 b
path_csv %>%
list_any(~read.csv(., stringsAsFactors = FALSE)) %>%
map(as_tibble)
#> $file1
#> # A tibble: 2 x 1
#> x
#> <int>
#> 1 1
#> 2 2
#>
#> $file2
#> # A tibble: 2 x 1
#> y
#> <chr>
#> 1 a
#> 2 b
下面是一些使用R base将.csv文件转换为data.frame的选项,以及一些可用来读取R文件的包。
这比下面的选项要慢。
# Get the files names
files = list.files(pattern="*.csv")
# First apply read.csv, then rbind
myfiles = do.call(rbind, lapply(files, function(x) read.csv(x, stringsAsFactors = FALSE)))
编辑:-更多的额外选择使用数据。表和读取器
一个fread()版本,它是数据的函数。表方案。这是目前为止R中最快的选项。
library(data.table)
DT = do.call(rbind, lapply(files, fread))
# The same using `rbindlist`
DT = rbindlist(lapply(files, fread))
使用readr,这是另一个读取csv文件的包。它比fread慢,比base R快,但功能不同。
library(readr)
library(dplyr)
tbl = lapply(files, read_csv) %>% bind_rows()