关于使用fs.readdir进行异步目录搜索有什么想法吗?我意识到我们可以引入递归,并调用read目录函数来读取下一个目录,但我有点担心它不是异步的…
什么好主意吗?我已经看了node-walk,它很棒,但它不能像readdir那样只给我数组中的文件。虽然
寻找这样的输出…
['file1.txt', 'file2.txt', 'dir/file3.txt']
关于使用fs.readdir进行异步目录搜索有什么想法吗?我意识到我们可以引入递归,并调用read目录函数来读取下一个目录,但我有点担心它不是异步的…
什么好主意吗?我已经看了node-walk,它很棒,但它不能像readdir那样只给我数组中的文件。虽然
寻找这样的输出…
['file1.txt', 'file2.txt', 'dir/file3.txt']
当前回答
为了好玩,这里有一个基于流的版本,它与highland.js streams库一起工作。作者之一是维克多·伍。
###
directory >---m------> dirFilesStream >---------o----> out
| |
| |
+--------< returnPipe <-----------+
legend: (m)erge (o)bserve
+ directory has the initial file
+ dirListStream does a directory listing
+ out prints out the full path of the file
+ returnPipe runs stat and filters on directories
###
_ = require('highland')
fs = require('fs')
fsPath = require('path')
directory = _(['someDirectory'])
mergePoint = _()
dirFilesStream = mergePoint.merge().flatMap((parentPath) ->
_.wrapCallback(fs.readdir)(parentPath).sequence().map (path) ->
fsPath.join parentPath, path
)
out = dirFilesStream
# Create the return pipe
returnPipe = dirFilesStream.observe().flatFilter((path) ->
_.wrapCallback(fs.stat)(path).map (v) ->
v.isDirectory()
)
# Connect up the merge point now that we have all of our streams.
mergePoint.write directory
mergePoint.write returnPipe
mergePoint.end()
# Release backpressure. This will print files as they are discovered
out.each H.log
# Another way would be to queue them all up and then print them all out at once.
# out.toArray((files)-> console.log(files))
其他回答
还有一种方法。我把它放在这里。也许将来它会对某人有用。
const fs = require("fs");
const { promisify } = require("util");
const p = require("path");
const readdir = promisify(fs.readdir);
async function getFiles(path) {
try {
const entries = await readdir(path, { withFileTypes: true });
const files = entries
.filter((file) => !file.isDirectory())
.map((file) => ({
path: `${path}/${file.name}`,
ext: p.extname(`${path}/${file.name}`),
pathDir: path,
}));
const folders = entries.filter((folder) => folder.isDirectory());
for (const folder of folders) {
files.push(...(await getFiles(`${path}/${folder.name}`)));
}
return files;
} catch (error) {
return error;
}
}
用法:
getFiles(rootFolderPath)
.then()
.catch()
There are basically two ways of accomplishing this. In an async environment you'll notice that there are two kinds of loops: serial and parallel. A serial loop waits for one iteration to complete before it moves onto the next iteration - this guarantees that every iteration of the loop completes in order. In a parallel loop, all the iterations are started at the same time, and one may complete before another, however, it is much faster than a serial loop. So in this case, it's probably better to use a parallel loop because it doesn't matter what order the walk completes in, just as long as it completes and returns the results (unless you want them in order).
一个平行循环看起来是这样的:
var fs = require('fs');
var path = require('path');
var walk = function(dir, done) {
var results = [];
fs.readdir(dir, function(err, list) {
if (err) return done(err);
var pending = list.length;
if (!pending) return done(null, results);
list.forEach(function(file) {
file = path.resolve(dir, file);
fs.stat(file, function(err, stat) {
if (stat && stat.isDirectory()) {
walk(file, function(err, res) {
results = results.concat(res);
if (!--pending) done(null, results);
});
} else {
results.push(file);
if (!--pending) done(null, results);
}
});
});
});
};
一个串行循环看起来像这样:
var fs = require('fs');
var path = require('path');
var walk = function(dir, done) {
var results = [];
fs.readdir(dir, function(err, list) {
if (err) return done(err);
var i = 0;
(function next() {
var file = list[i++];
if (!file) return done(null, results);
file = path.resolve(dir, file);
fs.stat(file, function(err, stat) {
if (stat && stat.isDirectory()) {
walk(file, function(err, res) {
results = results.concat(res);
next();
});
} else {
results.push(file);
next();
}
});
})();
});
};
并且在你的主目录中测试它(警告:如果你的主目录中有很多东西,结果列表将会非常大):
walk(process.env.HOME, function(err, results) {
if (err) throw err;
console.log(results);
});
编辑:改进的示例。
因为每个人都应该写自己的,所以我写了一个。
步行(dir, cb, endCb) cb(文件) 零endCb (err |)
脏
module.exports = walk;
function walk(dir, cb, endCb) {
var fs = require('fs');
var path = require('path');
fs.readdir(dir, function(err, files) {
if (err) {
return endCb(err);
}
var pending = files.length;
if (pending === 0) {
endCb(null);
}
files.forEach(function(file) {
fs.stat(path.join(dir, file), function(err, stats) {
if (err) {
return endCb(err)
}
if (stats.isDirectory()) {
walk(path.join(dir, file), cb, function() {
pending--;
if (pending === 0) {
endCb(null);
}
});
} else {
cb(path.join(dir, file));
pending--;
if (pending === 0) {
endCb(null);
}
}
})
});
});
}
我必须将基于promise的砂光器库添加到列表中。
var sander = require('sander');
sander.lsr(directory).then( filenames => { console.log(filenames) } );
为了以防有人发现它有用,我还整理了一个同步版本。
var walk = function(dir) {
var results = [];
var list = fs.readdirSync(dir);
list.forEach(function(file) {
file = dir + '/' + file;
var stat = fs.statSync(file);
if (stat && stat.isDirectory()) {
/* Recurse into a subdirectory */
results = results.concat(walk(file));
} else {
/* Is a file */
results.push(file);
}
});
return results;
}
提示:在筛选时使用更少的资源。这个函数本身的过滤器。例如:替换results.push(文件);下面的代码。根据需要调整:
file_type = file.split(".").pop();
file_name = file.split(/(\\|\/)/g).pop();
if (file_type == "json") results.push(file);