这是我能想到的最好的算法。
def get_primes(n):
numbers = set(range(n, 1, -1))
primes = []
while numbers:
p = numbers.pop()
primes.append(p)
numbers.difference_update(set(range(p*2, n+1, p)))
return primes
>>> timeit.Timer(stmt='get_primes.get_primes(1000000)', setup='import get_primes').timeit(1)
1.1499958793645562
还能做得更快吗?
这段代码有一个缺陷:由于numbers是一个无序集,不能保证numbers.pop()将从集合中移除最低的数字。尽管如此,它还是适用于(至少对我来说)一些输入数字:
>>> sum(get_primes(2000000))
142913828922L
#That's the correct sum of all numbers below 2 million
>>> 529 in get_primes(1000)
False
>>> 529 in get_primes(530)
True
对于Python 3
def rwh_primes2(n):
correction = (n%6>1)
n = {0:n,1:n-1,2:n+4,3:n+3,4:n+2,5:n+1}[n%6]
sieve = [True] * (n//3)
sieve[0] = False
for i in range(int(n**0.5)//3+1):
if sieve[i]:
k=3*i+1|1
sieve[ ((k*k)//3) ::2*k]=[False]*((n//6-(k*k)//6-1)//k+1)
sieve[(k*k+4*k-2*k*(i&1))//3::2*k]=[False]*((n//6-(k*k+4*k-2*k*(i&1))//6-1)//k+1)
return [2,3] + [3*i+1|1 for i in range(1,n//3-correction) if sieve[i]]
使用Sundaram的Sieve,我想我打破了pure-Python的记录:
def sundaram3(max_n):
numbers = range(3, max_n+1, 2)
half = (max_n)//2
initial = 4
for step in xrange(3, max_n+1, 2):
for i in xrange(initial, half, step):
numbers[i-1] = 0
initial += 2*(step+1)
if initial > half:
return [2] + filter(None, numbers)
Comparasion:
C:\USERS>python -m timeit -n10 -s "import get_primes" "get_primes.get_primes_erat(1000000)"
10 loops, best of 3: 710 msec per loop
C:\USERS>python -m timeit -n10 -s "import get_primes" "get_primes.daniel_sieve_2(1000000)"
10 loops, best of 3: 435 msec per loop
C:\USERS>python -m timeit -n10 -s "import get_primes" "get_primes.sundaram3(1000000)"
10 loops, best of 3: 327 msec per loop
这是问题解的一种变化应该比问题本身更快。它使用埃拉托色尼的静态筛,没有其他优化。
from typing import List
def list_primes(limit: int) -> List[int]:
primes = set(range(2, limit + 1))
for i in range(2, limit + 1):
if i in primes:
primes.difference_update(set(list(range(i, limit + 1, i))[1:]))
return sorted(primes)
>>> list_primes(100)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
在Pure Python中最快的质数筛分:
from itertools import compress
def half_sieve(n):
"""
Returns a list of prime numbers less than `n`.
"""
if n <= 2:
return []
sieve = bytearray([True]) * (n // 2)
for i in range(3, int(n ** 0.5) + 1, 2):
if sieve[i // 2]:
sieve[i * i // 2::i] = bytearray((n - i * i - 1) // (2 * i) + 1)
primes = list(compress(range(1, n, 2), sieve))
primes[0] = 2
return primes
我优化了埃拉托色尼筛子的速度和内存。
基准
from time import clock
import platform
def benchmark(iterations, limit):
start = clock()
for x in range(iterations):
half_sieve(limit)
end = clock() - start
print(f'{end/iterations:.4f} seconds for primes < {limit}')
if __name__ == '__main__':
print(platform.python_version())
print(platform.platform())
print(platform.processor())
it = 10
for pw in range(4, 9):
benchmark(it, 10**pw)
输出
>>> 3.6.7
>>> Windows-10-10.0.17763-SP0
>>> Intel64 Family 6 Model 78 Stepping 3, GenuineIntel
>>> 0.0003 seconds for primes < 10000
>>> 0.0021 seconds for primes < 100000
>>> 0.0204 seconds for primes < 1000000
>>> 0.2389 seconds for primes < 10000000
>>> 2.6702 seconds for primes < 100000000
很抱歉打扰,但erat2()在算法中有一个严重的缺陷。
在搜索下一个合成时,我们只需要测试奇数。
Q p都是奇数;那么q+p是偶数,不需要检验,但q+2*p总是奇数。这消除了while循环条件中的“if even”测试,并节省了大约30%的运行时。
当我们在它:而不是优雅的'D.pop(q,None)'获取和删除方法,使用'if q in D: p=D[q],del D[q]',这是两倍的速度!至少在我的机器上(P3-1Ghz)。
所以我建议这个聪明算法的实现:
def erat3( ):
from itertools import islice, count
# q is the running integer that's checked for primeness.
# yield 2 and no other even number thereafter
yield 2
D = {}
# no need to mark D[4] as we will test odd numbers only
for q in islice(count(3),0,None,2):
if q in D: # is composite
p = D[q]
del D[q]
# q is composite. p=D[q] is the first prime that
# divides it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiple of its witnesses to prepare for larger
# numbers.
x = q + p+p # next odd(!) multiple
while x in D: # skip composites
x += p+p
D[x] = p
else: # is prime
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations.
D[q*q] = q
yield q
在写这篇文章的时候,这是最快的工作解决方案(至少在我的机器上是这样)。它同时使用numpy和bitarray,并受到这个答案的primesfrom2to的启发。
import numpy as np
from bitarray import bitarray
def bit_primes(n):
bit_sieve = bitarray(n // 3 + (n % 6 == 2))
bit_sieve.setall(1)
bit_sieve[0] = False
for i in range(int(n ** 0.5) // 3 + 1):
if bit_sieve[i]:
k = 3 * i + 1 | 1
bit_sieve[k * k // 3::2 * k] = False
bit_sieve[(k * k + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False
np_sieve = np.unpackbits(np.frombuffer(bit_sieve.tobytes(), dtype=np.uint8)).view(bool)
return np.concatenate(((2, 3), ((3 * np.flatnonzero(np_sieve) + 1) | 1)))
下面是与素数from2to的比较,它之前被发现是unutbu比较中最快的解:
python3 -m timeit -s "import fast_primes" "fast_primes.bit_primes(1000000)"
200 loops, best of 5: 1.19 msec per loop
python3 -m timeit -s "import fast_primes" "fast_primes.primesfrom2to(1000000)"
200 loops, best of 5: 1.23 msec per loop
对于寻找100万以下的质数,bit_primes稍微快一些。
n值越大,差异就越大。在某些情况下,bit_primes的速度是原来的两倍多:
python3 -m timeit -s "import fast_primes" "fast_primes.bit_primes(500_000_000)"
1 loop, best of 5: 540 msec per loop
python3 -m timeit -s "import fast_primes" "fast_primes.primesfrom2to(500_000_000)"
1 loop, best of 5: 1.15 sec per loop
作为参考,以下是primesfrom2to I的最小修改版本(适用于Python 3):
def primesfrom2to(n):
# https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Input n>=6, Returns a array of primes, 2 <= p < n"""
sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)
sieve[0] = False
for i in range(int(n ** 0.5) // 3 + 1):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[((k * k) // 3)::2 * k] = False
sieve[(k * k + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0] + 1) | 1)]