关于使用fs.readdir进行异步目录搜索有什么想法吗?我意识到我们可以引入递归,并调用read目录函数来读取下一个目录,但我有点担心它不是异步的…
什么好主意吗?我已经看了node-walk,它很棒,但它不能像readdir那样只给我数组中的文件。虽然
寻找这样的输出…
['file1.txt', 'file2.txt', 'dir/file3.txt']
关于使用fs.readdir进行异步目录搜索有什么想法吗?我意识到我们可以引入递归,并调用read目录函数来读取下一个目录,但我有点担心它不是异步的…
什么好主意吗?我已经看了node-walk,它很棒,但它不能像readdir那样只给我数组中的文件。虽然
寻找这样的输出…
['file1.txt', 'file2.txt', 'dir/file3.txt']
当前回答
它使用了节点8中最多的新功能,包括Promises、util/promisify、destructuring、async-await、map+reduce等等,让你的同事在试图弄清楚发生了什么时挠头。
节点 8+
没有外部依赖。
const { promisify } = require('util');
const { resolve } = require('path');
const fs = require('fs');
const readdir = promisify(fs.readdir);
const stat = promisify(fs.stat);
async function getFiles(dir) {
const subdirs = await readdir(dir);
const files = await Promise.all(subdirs.map(async (subdir) => {
const res = resolve(dir, subdir);
return (await stat(res)).isDirectory() ? getFiles(res) : res;
}));
return files.reduce((a, f) => a.concat(f), []);
}
使用
getFiles(__dirname)
.then(files => console.log(files))
.catch(e => console.error(e));
节点 10.10+
更新到节点10+,甚至更多的whizbang:
const { resolve } = require('path');
const { readdir } = require('fs').promises;
async function getFiles(dir) {
const dirents = await readdir(dir, { withFileTypes: true });
const files = await Promise.all(dirents.map((dirent) => {
const res = resolve(dir, dirent.name);
return dirent.isDirectory() ? getFiles(res) : res;
}));
return Array.prototype.concat(...files);
}
请注意,从节点11.15.0开始,您可以使用files.flat()而不是array. prototype.concat(…files)来扁平化files数组。
11 +节点
如果你想让所有人都大吃一惊,你可以使用下面使用异步迭代器的版本。除了非常酷之外,它还允许使用者每次提取一个结果,这使得它更适合于真正大的目录。
const { resolve } = require('path');
const { readdir } = require('fs').promises;
async function* getFiles(dir) {
const dirents = await readdir(dir, { withFileTypes: true });
for (const dirent of dirents) {
const res = resolve(dir, dirent.name);
if (dirent.isDirectory()) {
yield* getFiles(res);
} else {
yield res;
}
}
}
用法发生了变化,因为返回类型现在是异步迭代器而不是promise
;(async () => {
for await (const f of getFiles('.')) {
console.log(f);
}
})()
如果有人感兴趣,我在这里写了更多关于异步迭代器的文章:https://qwtel.com/posts/software/async-generators-in-the-wild/
其他回答
谁想要一个公认答案的同步替代方案(我知道我做过):
var fs = require('fs');
var path = require('path');
var walk = function(dir) {
let results = [], err = null, list;
try {
list = fs.readdirSync(dir)
} catch(e) {
err = e.toString();
}
if (err) return err;
var i = 0;
return (function next() {
var file = list[i++];
if(!file) return results;
file = path.resolve(dir, file);
let stat = fs.statSync(file);
if (stat && stat.isDirectory()) {
let res = walk(file);
results = results.concat(res);
return next();
} else {
results.push(file);
return next();
}
})();
};
console.log(
walk("./")
)
There are basically two ways of accomplishing this. In an async environment you'll notice that there are two kinds of loops: serial and parallel. A serial loop waits for one iteration to complete before it moves onto the next iteration - this guarantees that every iteration of the loop completes in order. In a parallel loop, all the iterations are started at the same time, and one may complete before another, however, it is much faster than a serial loop. So in this case, it's probably better to use a parallel loop because it doesn't matter what order the walk completes in, just as long as it completes and returns the results (unless you want them in order).
一个平行循环看起来是这样的:
var fs = require('fs');
var path = require('path');
var walk = function(dir, done) {
var results = [];
fs.readdir(dir, function(err, list) {
if (err) return done(err);
var pending = list.length;
if (!pending) return done(null, results);
list.forEach(function(file) {
file = path.resolve(dir, file);
fs.stat(file, function(err, stat) {
if (stat && stat.isDirectory()) {
walk(file, function(err, res) {
results = results.concat(res);
if (!--pending) done(null, results);
});
} else {
results.push(file);
if (!--pending) done(null, results);
}
});
});
});
};
一个串行循环看起来像这样:
var fs = require('fs');
var path = require('path');
var walk = function(dir, done) {
var results = [];
fs.readdir(dir, function(err, list) {
if (err) return done(err);
var i = 0;
(function next() {
var file = list[i++];
if (!file) return done(null, results);
file = path.resolve(dir, file);
fs.stat(file, function(err, stat) {
if (stat && stat.isDirectory()) {
walk(file, function(err, res) {
results = results.concat(res);
next();
});
} else {
results.push(file);
next();
}
});
})();
});
};
并且在你的主目录中测试它(警告:如果你的主目录中有很多东西,结果列表将会非常大):
walk(process.env.HOME, function(err, results) {
if (err) throw err;
console.log(results);
});
编辑:改进的示例。
看看装载机 https://npmjs.org/package/loaddir
npm无处不在
loaddir = require('loaddir')
allJavascripts = []
loaddir({
path: __dirname + '/public/javascripts',
callback: function(){ allJavascripts.push(this.relativePath + this.baseName); }
})
如果还需要扩展名,可以使用fileName而不是baseName。
一个额外的好处是,它也会监视文件,并再次调用回调。有大量的配置选项使它非常灵活。
我只是在短时间内用加载器从红宝石重造了守卫宝石
Filehound库是另一种选择。它将递归地搜索给定目录(默认为工作目录)。它支持各种过滤器、回调、承诺和同步搜索。
例如,搜索当前工作目录中的所有文件(使用回调):
const Filehound = require('filehound');
Filehound.create()
.find((err, files) => {
if (err) {
return console.error(`error: ${err}`);
}
console.log(files); // array of files
});
或承诺,并指定特定的目录:
const Filehound = require('filehound');
Filehound.create()
.paths("/tmp")
.find()
.each(console.log);
更多的用例和使用示例请参考文档:https://github.com/nspragg/filehound
声明:我是作者。
使用bluebird promise.coroutine:
let promise = require('bluebird'),
PC = promise.coroutine,
fs = promise.promisifyAll(require('fs'));
let getFiles = PC(function*(dir){
let files = [];
let contents = yield fs.readdirAsync(dir);
for (let i = 0, l = contents.length; i < l; i ++) {
//to remove dot(hidden) files on MAC
if (/^\..*/.test(contents[i])) contents.splice(i, 1);
}
for (let i = 0, l = contents.length; i < l; i ++) {
let content = path.resolve(dir, contents[i]);
let contentStat = yield fs.statAsync(content);
if (contentStat && contentStat.isDirectory()) {
let subFiles = yield getFiles(content);
files = files.concat(subFiles);
} else {
files.push(content);
}
}
return files;
});
//how to use
//easy error handling in one place
getFiles(your_dir).then(console.log).catch(err => console.log(err));