如何在整数列表中找到重复项并创建重复项的另一个列表?
当前回答
还有其他测试。当然要做……
set([x for x in l if l.count(x) > 1])
...代价太大了。使用下一个final方法大约快500倍(数组越长结果越好):
def dups_count_dict(l):
d = {}
for item in l:
if item not in d:
d[item] = 0
d[item] += 1
result_d = {key: val for key, val in d.iteritems() if val > 1}
return result_d.keys()
只有2个循环,没有非常昂贵的l.count()操作。
下面是一个比较方法的代码。代码如下,输出如下:
dups_count: 13.368s # this is a function which uses l.count()
dups_count_dict: 0.014s # this is a final best function (of the 3 functions)
dups_count_counter: 0.024s # collections.Counter
测试代码:
import numpy as np
from time import time
from collections import Counter
class TimerCounter(object):
def __init__(self):
self._time_sum = 0
def start(self):
self.time = time()
def stop(self):
self._time_sum += time() - self.time
def get_time_sum(self):
return self._time_sum
def dups_count(l):
return set([x for x in l if l.count(x) > 1])
def dups_count_dict(l):
d = {}
for item in l:
if item not in d:
d[item] = 0
d[item] += 1
result_d = {key: val for key, val in d.iteritems() if val > 1}
return result_d.keys()
def dups_counter(l):
counter = Counter(l)
result_d = {key: val for key, val in counter.iteritems() if val > 1}
return result_d.keys()
def gen_array():
np.random.seed(17)
return list(np.random.randint(0, 5000, 10000))
def assert_equal_results(*results):
primary_result = results[0]
other_results = results[1:]
for other_result in other_results:
assert set(primary_result) == set(other_result) and len(primary_result) == len(other_result)
if __name__ == '__main__':
dups_count_time = TimerCounter()
dups_count_dict_time = TimerCounter()
dups_count_counter = TimerCounter()
l = gen_array()
for i in range(3):
dups_count_time.start()
result1 = dups_count(l)
dups_count_time.stop()
dups_count_dict_time.start()
result2 = dups_count_dict(l)
dups_count_dict_time.stop()
dups_count_counter.start()
result3 = dups_counter(l)
dups_count_counter.stop()
assert_equal_results(result1, result2, result3)
print 'dups_count: %.3f' % dups_count_time.get_time_sum()
print 'dups_count_dict: %.3f' % dups_count_dict_time.get_time_sum()
print 'dups_count_counter: %.3f' % dups_count_counter.get_time_sum()
其他回答
我在寻找相关的东西时遇到了这个问题-想知道为什么没有人提供基于生成器的解决方案?解决这个问题的方法是:
>>> print list(getDupes_9([1,2,3,2,1,5,6,5,5,5]))
[1, 2, 5]
我很关心可伸缩性,所以测试了几种方法,包括在小列表上工作得很好的naive项,但随着列表变大,可伸缩性很差(注意-使用timeit会更好,但这只是说明)。
我加入了@moooeeeep作为比较(它的速度非常快:如果输入列表是完全随机的,速度最快)和itertools方法,对于大多数排序的列表,它甚至更快……现在包括来自@ fireynx的熊猫方法-缓慢,但不是可怕的,而且简单。注意:在我的机器上,sort/tee/zip方法对于大型有序列表始终是最快的,moooeeeep对于洗牌列表是最快的,但您的情况可能会有所不同。
优势
非常快速简单的测试'任何'重复使用相同的代码
假设
重复项只应报告一次 重复的订单不需要保留 Duplicate可能位于列表中的任何位置
最快的解决方案,1m条目:
def getDupes(c):
'''sort/tee/izip'''
a, b = itertools.tee(sorted(c))
next(b, None)
r = None
for k, g in itertools.izip(a, b):
if k != g: continue
if k != r:
yield k
r = k
方法测试
import itertools
import time
import random
def getDupes_1(c):
'''naive'''
for i in xrange(0, len(c)):
if c[i] in c[:i]:
yield c[i]
def getDupes_2(c):
'''set len change'''
s = set()
for i in c:
l = len(s)
s.add(i)
if len(s) == l:
yield i
def getDupes_3(c):
'''in dict'''
d = {}
for i in c:
if i in d:
if d[i]:
yield i
d[i] = False
else:
d[i] = True
def getDupes_4(c):
'''in set'''
s,r = set(),set()
for i in c:
if i not in s:
s.add(i)
elif i not in r:
r.add(i)
yield i
def getDupes_5(c):
'''sort/adjacent'''
c = sorted(c)
r = None
for i in xrange(1, len(c)):
if c[i] == c[i - 1]:
if c[i] != r:
yield c[i]
r = c[i]
def getDupes_6(c):
'''sort/groupby'''
def multiple(x):
try:
x.next()
x.next()
return True
except:
return False
for k, g in itertools.ifilter(lambda x: multiple(x[1]), itertools.groupby(sorted(c))):
yield k
def getDupes_7(c):
'''sort/zip'''
c = sorted(c)
r = None
for k, g in zip(c[:-1],c[1:]):
if k == g:
if k != r:
yield k
r = k
def getDupes_8(c):
'''sort/izip'''
c = sorted(c)
r = None
for k, g in itertools.izip(c[:-1],c[1:]):
if k == g:
if k != r:
yield k
r = k
def getDupes_9(c):
'''sort/tee/izip'''
a, b = itertools.tee(sorted(c))
next(b, None)
r = None
for k, g in itertools.izip(a, b):
if k != g: continue
if k != r:
yield k
r = k
def getDupes_a(l):
'''moooeeeep'''
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
for x in l:
if x in seen or seen_add(x):
yield x
def getDupes_b(x):
'''iter*/sorted'''
x = sorted(x)
def _matches():
for k,g in itertools.izip(x[:-1],x[1:]):
if k == g:
yield k
for k, n in itertools.groupby(_matches()):
yield k
def getDupes_c(a):
'''pandas'''
import pandas as pd
vc = pd.Series(a).value_counts()
i = vc[vc > 1].index
for _ in i:
yield _
def hasDupes(fn,c):
try:
if fn(c).next(): return True # Found a dupe
except StopIteration:
pass
return False
def getDupes(fn,c):
return list(fn(c))
STABLE = True
if STABLE:
print 'Finding FIRST then ALL duplicates, single dupe of "nth" placed element in 1m element array'
else:
print 'Finding FIRST then ALL duplicates, single dupe of "n" included in randomised 1m element array'
for location in (50,250000,500000,750000,999999):
for test in (getDupes_2, getDupes_3, getDupes_4, getDupes_5, getDupes_6,
getDupes_8, getDupes_9, getDupes_a, getDupes_b, getDupes_c):
print 'Test %-15s:%10d - '%(test.__doc__ or test.__name__,location),
deltas = []
for FIRST in (True,False):
for i in xrange(0, 5):
c = range(0,1000000)
if STABLE:
c[0] = location
else:
c.append(location)
random.shuffle(c)
start = time.time()
if FIRST:
print '.' if location == test(c).next() else '!',
else:
print '.' if [location] == list(test(c)) else '!',
deltas.append(time.time()-start)
print ' -- %0.3f '%(sum(deltas)/len(deltas)),
print
print
“all dupes”测试的结果是一致的,在这个数组中找到“first”重复,然后是“all”重复:
Finding FIRST then ALL duplicates, single dupe of "nth" placed element in 1m element array
Test set len change : 500000 - . . . . . -- 0.264 . . . . . -- 0.402
Test in dict : 500000 - . . . . . -- 0.163 . . . . . -- 0.250
Test in set : 500000 - . . . . . -- 0.163 . . . . . -- 0.249
Test sort/adjacent : 500000 - . . . . . -- 0.159 . . . . . -- 0.229
Test sort/groupby : 500000 - . . . . . -- 0.860 . . . . . -- 1.286
Test sort/izip : 500000 - . . . . . -- 0.165 . . . . . -- 0.229
Test sort/tee/izip : 500000 - . . . . . -- 0.145 . . . . . -- 0.206 *
Test moooeeeep : 500000 - . . . . . -- 0.149 . . . . . -- 0.232
Test iter*/sorted : 500000 - . . . . . -- 0.160 . . . . . -- 0.221
Test pandas : 500000 - . . . . . -- 0.493 . . . . . -- 0.499
当列表首先被打乱时,排序的代价就变得明显了——效率显著下降,@moooeeeep方法占主导地位,set和dict方法类似,但性能较差:
Finding FIRST then ALL duplicates, single dupe of "n" included in randomised 1m element array
Test set len change : 500000 - . . . . . -- 0.321 . . . . . -- 0.473
Test in dict : 500000 - . . . . . -- 0.285 . . . . . -- 0.360
Test in set : 500000 - . . . . . -- 0.309 . . . . . -- 0.365
Test sort/adjacent : 500000 - . . . . . -- 0.756 . . . . . -- 0.823
Test sort/groupby : 500000 - . . . . . -- 1.459 . . . . . -- 1.896
Test sort/izip : 500000 - . . . . . -- 0.786 . . . . . -- 0.845
Test sort/tee/izip : 500000 - . . . . . -- 0.743 . . . . . -- 0.804
Test moooeeeep : 500000 - . . . . . -- 0.234 . . . . . -- 0.311 *
Test iter*/sorted : 500000 - . . . . . -- 0.776 . . . . . -- 0.840
Test pandas : 500000 - . . . . . -- 0.539 . . . . . -- 0.540
试试这个检查副本
>>> def checkDuplicate(List):
duplicate={}
for i in List:
## checking whether the item is already present in dictionary or not
## increasing count if present
## initializing count to 1 if not present
duplicate[i]=duplicate.get(i,0)+1
return [k for k,v in duplicate.items() if v>1]
>>> checkDuplicate([1,2,3,"s",1,2,3])
[1, 2, 3]
你可以使用iteration_utilities.duplicate:
>>> from iteration_utilities import duplicates
>>> list(duplicates([1,1,2,1,2,3,4,2]))
[1, 1, 2, 2]
或者如果你只想要一个副本,可以结合iteration_utilities.unique_everseen:
>>> from iteration_utilities import unique_everseen
>>> list(unique_everseen(duplicates([1,1,2,1,2,3,4,2])))
[1, 2]
它也可以处理不可哈希的元素(但是以性能为代价):
>>> list(duplicates([[1], [2], [1], [3], [1]]))
[[1], [1]]
>>> list(unique_everseen(duplicates([[1], [2], [1], [3], [1]])))
[[1]]
这是只有少数其他方法可以处理的问题。
基准
我做了一个快速的基准测试,其中包含了这里提到的大部分(但不是全部)方法。
第一个基准测试只包含了很小范围的列表长度,因为一些方法具有O(n**2)行为。
在图表中,y轴代表时间,所以值越低越好。它还绘制了log-log,以便更好地可视化广泛的值范围:
除去O(n**2)方法,我在一个列表中做了另一个多达50万个元素的基准测试:
正如您所看到的iteration_utilities。duplicate方法比任何其他方法都快,甚至连链接unique_everseen(duplicate(…))也比其他方法更快或同样快。
这里需要注意的另一件有趣的事情是,熊猫方法对于小列表非常慢,但可以轻松地竞争较长的列表。
然而,由于这些基准测试显示大多数方法的性能大致相同,因此使用哪一种并不重要(除了有O(n**2)运行时的3种方法)。
from iteration_utilities import duplicates, unique_everseen
from collections import Counter
import pandas as pd
import itertools
def georg_counter(it):
return [item for item, count in Counter(it).items() if count > 1]
def georg_set(it):
seen = set()
uniq = []
for x in it:
if x not in seen:
uniq.append(x)
seen.add(x)
def georg_set2(it):
seen = set()
return [x for x in it if x not in seen and not seen.add(x)]
def georg_set3(it):
seen = {}
dupes = []
for x in it:
if x not in seen:
seen[x] = 1
else:
if seen[x] == 1:
dupes.append(x)
seen[x] += 1
def RiteshKumar_count(l):
return set([x for x in l if l.count(x) > 1])
def moooeeeep(seq):
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set( x for x in seq if x in seen or seen_add(x) )
# turn the set into a list (as requested)
return list( seen_twice )
def F1Rumors_implementation(c):
a, b = itertools.tee(sorted(c))
next(b, None)
r = None
for k, g in zip(a, b):
if k != g: continue
if k != r:
yield k
r = k
def F1Rumors(c):
return list(F1Rumors_implementation(c))
def Edward(a):
d = {}
for elem in a:
if elem in d:
d[elem] += 1
else:
d[elem] = 1
return [x for x, y in d.items() if y > 1]
def wordsmith(a):
return pd.Series(a)[pd.Series(a).duplicated()].values
def NikhilPrabhu(li):
li = li.copy()
for x in set(li):
li.remove(x)
return list(set(li))
def firelynx(a):
vc = pd.Series(a).value_counts()
return vc[vc > 1].index.tolist()
def HenryDev(myList):
newList = set()
for i in myList:
if myList.count(i) >= 2:
newList.add(i)
return list(newList)
def yota(number_lst):
seen_set = set()
duplicate_set = set(x for x in number_lst if x in seen_set or seen_set.add(x))
return seen_set - duplicate_set
def IgorVishnevskiy(l):
s=set(l)
d=[]
for x in l:
if x in s:
s.remove(x)
else:
d.append(x)
return d
def it_duplicates(l):
return list(duplicates(l))
def it_unique_duplicates(l):
return list(unique_everseen(duplicates(l)))
基准1
from simple_benchmark import benchmark
import random
funcs = [
georg_counter, georg_set, georg_set2, georg_set3, RiteshKumar_count, moooeeeep,
F1Rumors, Edward, wordsmith, NikhilPrabhu, firelynx,
HenryDev, yota, IgorVishnevskiy, it_duplicates, it_unique_duplicates
]
args = {2**i: [random.randint(0, 2**(i-1)) for _ in range(2**i)] for i in range(2, 12)}
b = benchmark(funcs, args, 'list size')
b.plot()
基准2
funcs = [
georg_counter, georg_set, georg_set2, georg_set3, moooeeeep,
F1Rumors, Edward, wordsmith, firelynx,
yota, IgorVishnevskiy, it_duplicates, it_unique_duplicates
]
args = {2**i: [random.randint(0, 2**(i-1)) for _ in range(2**i)] for i in range(2, 20)}
b = benchmark(funcs, args, 'list size')
b.plot()
免责声明
1这是我写的一个第三方库:iteration_utilities。
假设我们有这个元素列表:
a = [1, 2, 3, 2, 1, 5, 6, 5, 5, 5]
我们可以只使用集合来找到唯一的元素:
unique = set()
for num in a:
if num not in unique:
unique.add(num)
else:
unique = unique - set([num])
最后:
>>> unique
{3, 6}
如果你想要得到副本,你可以简单地做:
>>> duplicates = set(a) - unique
>>> duplicates
{1, 2, 5}
注:
集合中的元素查找是O(1) 从集合中移除的元素是O(1)
不需要转换为列表,可能最简单的方法是如下所示。 在面试中,当他们要求不要使用集合时,这可能会很有用
a=[1,2,3,3,3]
dup=[]
for each in a:
if each not in dup:
dup.append(each)
print(dup)
======= else获取唯一值和重复值的2个单独列表
a=[1,2,3,3,3]
uniques=[]
dups=[]
for each in a:
if each not in uniques:
uniques.append(each)
else:
dups.append(each)
print("Unique values are below:")
print(uniques)
print("Duplicate values are below:")
print(dups)
推荐文章
- Numpy Max vs amax vs maximum
- 我应该在.gitignore文件中添加Django迁移文件吗?
- 每n行有熊猫
- 实例属性attribute_name定义在__init__之外
- 如何获取在Python中捕获的异常的名称?
- 第一次出现的值大于现有值的Numpy
- 如何从Python函数中返回两个值?
- 前一个月的Python日期
- Python中方括号括起来的列表和圆括号括起来的列表有什么区别?
- Python日志记录不输出任何东西
- 每n秒运行特定代码
- SQLAlchemy是否有与Django的get_or_create等价的函数?
- 如何将python datetime转换为字符串,具有可读格式的日期?
- 美丽的汤和提取div及其内容的ID
- 在Python中重置生成器对象