这是我能想到的最好的算法。
def get_primes(n):
numbers = set(range(n, 1, -1))
primes = []
while numbers:
p = numbers.pop()
primes.append(p)
numbers.difference_update(set(range(p*2, n+1, p)))
return primes
>>> timeit.Timer(stmt='get_primes.get_primes(1000000)', setup='import get_primes').timeit(1)
1.1499958793645562
还能做得更快吗?
这段代码有一个缺陷:由于numbers是一个无序集,不能保证numbers.pop()将从集合中移除最低的数字。尽管如此,它还是适用于(至少对我来说)一些输入数字:
>>> sum(get_primes(2000000))
142913828922L
#That's the correct sum of all numbers below 2 million
>>> 529 in get_primes(1000)
False
>>> 529 in get_primes(530)
True
很抱歉打扰,但erat2()在算法中有一个严重的缺陷。
在搜索下一个合成时,我们只需要测试奇数。
Q p都是奇数;那么q+p是偶数,不需要检验,但q+2*p总是奇数。这消除了while循环条件中的“if even”测试,并节省了大约30%的运行时。
当我们在它:而不是优雅的'D.pop(q,None)'获取和删除方法,使用'if q in D: p=D[q],del D[q]',这是两倍的速度!至少在我的机器上(P3-1Ghz)。
所以我建议这个聪明算法的实现:
def erat3( ):
from itertools import islice, count
# q is the running integer that's checked for primeness.
# yield 2 and no other even number thereafter
yield 2
D = {}
# no need to mark D[4] as we will test odd numbers only
for q in islice(count(3),0,None,2):
if q in D: # is composite
p = D[q]
del D[q]
# q is composite. p=D[q] is the first prime that
# divides it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiple of its witnesses to prepare for larger
# numbers.
x = q + p+p # next odd(!) multiple
while x in D: # skip composites
x += p+p
D[x] = p
else: # is prime
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations.
D[q*q] = q
yield q
如果你接受itertools,但不接受numpy,这里有一个针对Python 3的rwh_primes2的改编版本,它在我的机器上运行速度大约是原来的两倍。唯一的实质性变化是使用bytearray而不是列表来表示布尔值,并使用压缩而不是列表推导来构建最终列表。(如果可以的话,我会把这句话作为moarningsun之类的评论。)
import itertools
izip = itertools.zip_longest
chain = itertools.chain.from_iterable
compress = itertools.compress
def rwh_primes2_python3(n):
""" Input n>=6, Returns a list of primes, 2 <= p < n """
zero = bytearray([False])
size = n//3 + (n % 6 == 2)
sieve = bytearray([True]) * size
sieve[0] = False
for i in range(int(n**0.5)//3+1):
if sieve[i]:
k=3*i+1|1
start = (k*k+4*k-2*k*(i&1))//3
sieve[(k*k)//3::2*k]=zero*((size - (k*k)//3 - 1) // (2 * k) + 1)
sieve[ start ::2*k]=zero*((size - start - 1) // (2 * k) + 1)
ans = [2,3]
poss = chain(izip(*[range(i, n, 6) for i in (1,5)]))
ans.extend(compress(poss, sieve))
return ans
比较:
>>> timeit.timeit('primes.rwh_primes2(10**6)', setup='import primes', number=1)
0.0652179726976101
>>> timeit.timeit('primes.rwh_primes2_python3(10**6)', setup='import primes', number=1)
0.03267321276325674
and
>>> timeit.timeit('primes.rwh_primes2(10**8)', setup='import primes', number=1)
6.394284538007014
>>> timeit.timeit('primes.rwh_primes2_python3(10**8)', setup='import primes', number=1)
3.833829450302801
这些都是经过编写和测试的。所以没有必要重新发明轮子。
python -m timeit -r10 -s"from sympy import sieve" "primes = list(sieve.primerange(1, 10**6))"
打破了12.2秒的记录!
10 loops, best of 10: 12.2 msec per loop
如果这还不够快,你可以试试PyPy:
pypy -m timeit -r10 -s"from sympy import sieve" "primes = list(sieve.primerange(1, 10**6))"
结果是:
10 loops, best of 10: 2.03 msec per loop
得到247张赞成票的答案列出了15.9毫秒的最佳解决方案。
比较这个! !
我很惊讶居然没人提到numba。
该版本在2.47 ms±36.5µs内达到1M标记。
几年前,维基百科页面上出现了一个阿特金筛子的伪代码。这已经不存在了,参考阿特金筛似乎是一个不同的算法。一个2007/03/01版本的维基百科页面(Primer number as 2007-03-01)显示了我用作参考的伪代码。
import numpy as np
from numba import njit
@njit
def nb_primes(n):
# Generates prime numbers 2 <= p <= n
# Atkin's sieve -- see https://en.wikipedia.org/w/index.php?title=Prime_number&oldid=111775466
sqrt_n = int(np.sqrt(n)) + 1
# initialize the sieve
s = np.full(n + 1, -1, dtype=np.int8)
s[2] = 1
s[3] = 1
# put in candidate primes:
# integers which have an odd number of
# representations by certain quadratic forms
for x in range(1, sqrt_n):
x2 = x * x
for y in range(1, sqrt_n):
y2 = y * y
k = 4 * x2 + y2
if k <= n and (k % 12 == 1 or k % 12 == 5): s[k] *= -1
k = 3 * x2 + y2
if k <= n and (k % 12 == 7): s[k] *= -1
k = 3 * x2 - y2
if k <= n and x > y and k % 12 == 11: s[k] *= -1
# eliminate composites by sieving
for k in range(5, sqrt_n):
if s[k]:
k2 = k*k
# k is prime, omit multiples of its square; this is sufficient because
# composites which managed to get on the list cannot be square-free
for i in range(1, n // k2 + 1):
j = i * k2 # j ∈ {k², 2k², 3k², ..., n}
s[j] = -1
return np.nonzero(s>0)[0]
# initial run for "compilation"
nb_primes(10)
时机
In[10]:
%timeit nb_primes(1_000_000)
Out[10]:
2.47 ms ± 36.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
In[11]:
%timeit nb_primes(10_000_000)
Out[11]:
33.4 ms ± 373 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
In[12]:
%timeit nb_primes(100_000_000)
Out[12]:
828 ms ± 5.64 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
使用Numpy实现的半筛子略有不同:
http://rebrained.com/?p=458
import math
import numpy
def prime6(upto):
primes=numpy.arange(3,upto+1,2)
isprime=numpy.ones((upto-1)/2,dtype=bool)
for factor in primes[:int(math.sqrt(upto))]:
if isprime[(factor-2)/2]: isprime[(factor*3-2)/2:(upto-1)/2:factor]=0
return numpy.insert(primes[isprime],0,2)
有人能把这个和其他时间比较一下吗?在我的机器上,它似乎与其他Numpy半筛相当。
假设N < 9,080,191, Miller-Rabin's Primality检验的确定性实现
import sys
def miller_rabin_pass(a, n):
d = n - 1
s = 0
while d % 2 == 0:
d >>= 1
s += 1
a_to_power = pow(a, d, n)
if a_to_power == 1:
return True
for i in range(s-1):
if a_to_power == n - 1:
return True
a_to_power = (a_to_power * a_to_power) % n
return a_to_power == n - 1
def miller_rabin(n):
if n <= 2:
return n == 2
if n < 2_047:
return miller_rabin_pass(2, n)
return all(miller_rabin_pass(a, n) for a in (31, 73))
n = int(sys.argv[1])
primes = [2]
for p in range(3,n,2):
if miller_rabin(p):
primes.append(p)
print len(primes)
根据维基百科(http://en.wikipedia.org/wiki/Miller -Rabin_primality_test)上的文章,对于a = 37和73,测试N < 9,080,191足以判断N是否为合数。
我从原始米勒-拉宾测试的概率实现中改编了源代码:https://www.literateprograms.org/miller-rabin_primality_test__python_.html