如何只计算目录中的文件?这将目录本身计算为一个文件:
len(glob.glob('*'))
如何只计算目录中的文件?这将目录本身计算为一个文件:
len(glob.glob('*'))
当前回答
Os.listdir()将比使用glob.glob更有效。要测试文件名是否为普通文件(而不是目录或其他实体),请使用os.path.isfile():
import os, os.path
# simple version for working with CWD
print len([name for name in os.listdir('.') if os.path.isfile(name)])
# path joining version for other paths
DIR = '/tmp'
print len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
其他回答
我发现有时我不知道是否会收到文件名或文件的路径。所以我打印了os walk解决方案的输出:
def count_number_of_raw_data_point_files(path: Union[str, Path], with_file_prefix: str) -> int:
import os
path: Path = force_expanduser(path)
_, _, files = next(os.walk(path))
# file_count = len(files)
filename: str
count: int = 0
for filename in files:
print(f'-->{filename=}') # e.g. print -->filename='data_point_99.json'
if with_file_prefix in filename:
count += 1
return count
out:
-->filename='data_point_780.json'
-->filename='data_point_781.json'
-->filename='data_point_782.json'
-->filename='data_point_783.json'
-->filename='data_point_784.json'
-->filename='data_point_785.json'
-->filename='data_point_786.json'
-->filename='data_point_787.json'
-->filename='data_point_788.json'
-->filename='data_point_789.json'
-->filename='data_point_79.json'
-->filename='data_point_790.json'
-->filename='data_point_791.json'
-->filename='data_point_792.json'
-->filename='data_point_793.json'
-->filename='data_point_794.json'
-->filename='data_point_795.json'
-->filename='data_point_796.json'
-->filename='data_point_797.json'
-->filename='data_point_798.json'
-->filename='data_point_799.json'
-->filename='data_point_8.json'
-->filename='data_point_80.json'
-->filename='data_point_800.json'
-->filename='data_point_801.json'
-->filename='data_point_802.json'
-->filename='data_point_803.json'
-->filename='data_point_804.json'
-->filename='data_point_805.json'
-->filename='data_point_806.json'
-->filename='data_point_807.json'
-->filename='data_point_808.json'
-->filename='data_point_809.json'
-->filename='data_point_81.json'
-->filename='data_point_810.json'
-->filename='data_point_811.json'
-->filename='data_point_812.json'
-->filename='data_point_813.json'
-->filename='data_point_814.json'
-->filename='data_point_815.json'
-->filename='data_point_816.json'
-->filename='data_point_817.json'
-->filename='data_point_818.json'
-->filename='data_point_819.json'
-->filename='data_point_82.json'
-->filename='data_point_820.json'
-->filename='data_point_821.json'
-->filename='data_point_822.json'
-->filename='data_point_823.json'
-->filename='data_point_824.json'
-->filename='data_point_825.json'
-->filename='data_point_826.json'
-->filename='data_point_827.json'
-->filename='data_point_828.json'
-->filename='data_point_829.json'
-->filename='data_point_83.json'
-->filename='data_point_830.json'
-->filename='data_point_831.json'
-->filename='data_point_832.json'
-->filename='data_point_833.json'
-->filename='data_point_834.json'
-->filename='data_point_835.json'
-->filename='data_point_836.json'
-->filename='data_point_837.json'
-->filename='data_point_838.json'
-->filename='data_point_839.json'
-->filename='data_point_84.json'
-->filename='data_point_840.json'
-->filename='data_point_841.json'
-->filename='data_point_842.json'
-->filename='data_point_843.json'
-->filename='data_point_844.json'
-->filename='data_point_845.json'
-->filename='data_point_846.json'
-->filename='data_point_847.json'
-->filename='data_point_848.json'
-->filename='data_point_849.json'
-->filename='data_point_85.json'
-->filename='data_point_850.json'
-->filename='data_point_851.json'
-->filename='data_point_852.json'
-->filename='data_point_853.json'
-->filename='data_point_86.json'
-->filename='data_point_87.json'
-->filename='data_point_88.json'
-->filename='data_point_89.json'
-->filename='data_point_9.json'
-->filename='data_point_90.json'
-->filename='data_point_91.json'
-->filename='data_point_92.json'
-->filename='data_point_93.json'
-->filename='data_point_94.json'
-->filename='data_point_95.json'
-->filename='data_point_96.json'
-->filename='data_point_97.json'
-->filename='data_point_98.json'
-->filename='data_point_99.json'
854
注意,你可能需要排序。
这就是fnmatch非常方便的地方:
import fnmatch
print len(fnmatch.filter(os.listdir(dirpath), '*.txt'))
详情:http://docs.python.org/2/library/fnmatch.html
这是一个简单的解决方案,可以计算包含子文件夹的目录中的文件数量。它可能会派上用场:
import os
from pathlib import Path
def count_files(rootdir):
'''counts the number of files in each subfolder in a directory'''
for path in pathlib.Path(rootdir).iterdir():
if path.is_dir():
print("There are " + str(len([name for name in os.listdir(path) \
if os.path.isfile(os.path.join(path, name))])) + " files in " + \
str(path.name))
count_files(data_dir) # data_dir is the directory you want files counted.
你应该得到一个类似这样的输出(当然,占位符改变了):
There are {number of files} files in {name of sub-folder1}
There are {number of files} files in {name of sub-folder2}
import os
print len(os.listdir(os.getcwd()))
很简单:
print(len([iq for iq in os.scandir('PATH')]))
它只是简单地计算目录中的文件数量,我使用了列表理解技术来遍历特定目录,返回所有文件。"len(返回列表)"返回文件数。