Rust, score ≈ 22
This uses a dynamic programming approach (I’ve added an explanation here) whose running time seems to scale as approximately \$\tilde O(2^{1.5n})\$, rather than the \$\tilde O(2^{2n})\$ of a brute-force search. On my Ryzen 7 1800X (8 cores/16 threads), it gets through \$1 \le n \le 21\$ in 1.7 minutes, \$1 \le n \le 22\$ in 5.1 minutes.
Now using SIMD for the inner loop.
src/main.rs
use fxhash::FxBuildHasher;
use itertools::izip;
use rayon::prelude::*;
use std::arch::x86_64::*;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::mem;
use typed_arena::Arena;
#[global_allocator]
static ALLOC: mimallocator::Mimalloc = mimallocator::Mimalloc;
type Distance = i8;
type Count = u32;
type Total = u64;
#[derive(Debug)]
struct Distances(__m128i);
impl PartialEq for Distances {
fn eq(&self, other: &Distances) -> bool {
unsafe {
let x = _mm_xor_si128(self.0, other.0);
_mm_testz_si128(x, x) != 0
}
}
}
impl Eq for Distances {}
impl Hash for Distances {
fn hash<H: Hasher>(&self, state: &mut H) {
unsafe {
_mm_extract_epi64(self.0, 0).hash(state);
_mm_extract_epi64(self.0, 1).hash(state);
}
}
}
fn main() {
let splat0 = unsafe { _mm_set1_epi8(0) };
let splat1 = unsafe { _mm_set1_epi8(1) };
let splatff = unsafe { _mm_set1_epi8(!0) };
let splat7f = unsafe { _mm_set1_epi8(0x7f) };
let seq = unsafe { _mm_set_epi8(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) };
let grow0 = unsafe {
_mm_set_epi8(
-0x80, 0, 0x20, 0, 0x8, 0, 0x2, 0, 0, 0x40, 0, 0x10, 0, 0x4, 0, 0x1,
)
};
let grow1 = unsafe {
_mm_set_epi8(
0, 0x40, 0, 0x10, 0, 0x4, 0, 0x1, -0x80, 0, 0x20, 0, 0x8, 0, 0x2, 0,
)
};
for n in 1 as Distance.. {
if n > 31
|| (n as Count).leading_zeros() < n as u32
|| (n as Total).leading_zeros() < 2 * n as u32
{
break;
}
let total: Total = (0u32..1 << (n - 1))
.into_par_iter()
.map(|a| {
let mut a_sym = a.reverse_bits();
a_sym ^= (a_sym >> 31).wrapping_neg();
a_sym >>= 32 - n as usize;
if a_sym < a {
return 0;
}
let arena = Arena::<Distances>::new();
let stride = (n as usize + 16) / 16 * 16;
let idx = |i: Distance, j: Distance| i as usize + stride * j as usize;
let both = |[x, y]: [Distance; 2]| x.max(y);
let mut worst = vec![[stride as Distance; 2]; idx(0, n + 1)];
for j in 0..=n {
worst[idx(n, j)] = [n - j; 2];
}
for i in (0..n).rev() {
worst[idx(i, n)] = [n - i; 2];
let good = (a >> i & 1) as usize;
let bad = good ^ 1;
for j in (0..n).rev() {
worst[idx(i, j)][good] = both(worst[idx(i + 1, j + 1)]);
worst[idx(i, j)][bad] = 1 + worst[idx(i + 1, j)][bad]
.min(both(worst[idx(i, j + 1)]))
.min(both(worst[idx(i + 1, j + 1)]));
}
}
let worst: &[Distances] = arena.alloc_extend(
worst
.into_iter()
.map(both)
.collect::<Box<[Distance]>>()
.chunks(16)
.map(|chunk| {
Distances(unsafe {
_mm_loadu_si128(chunk as *const [i8] as *const __m128i)
})
}),
);
let mut states: HashMap<&[Distances], Count, FxBuildHasher> = HashMap::default();
let mut new_states = HashMap::default();
states.insert(
arena.alloc_extend(
(0..n + 1)
.step_by(16)
.map(|i| Distances(unsafe { _mm_add_epi8(_mm_set1_epi8(i), seq) })),
),
1,
);
let bvs: Vec<_> = [a, !a]
.iter()
.map(|b| {
arena.alloc_extend((0..n + 1).step_by(16).map(|i| unsafe {
let x = _mm_set1_epi16(((b << 1) >> i) as i16);
Distances(_mm_xor_si128(
_mm_cmpeq_epi8(
_mm_or_si128(
_mm_and_si128(x, grow0),
_mm_and_si128(_mm_alignr_epi8(x, x, 1), grow1),
),
splat0,
),
splatff,
))
}))
})
.collect();
for j in 1..=n {
new_states.reserve(2 * states.len());
let worst_slice = &worst[idx(0, j) / 16..idx(0, j + 1) / 16];
for (state, count) in states.drain() {
for bv in &bvs {
let mut x = j;
let mut y = n.into();
let mut bound = n;
let new_state: &mut [Distances] =
arena.alloc_extend(izip!(&**bv, state, worst_slice).map(
|(&Distances(bc), &Distances(yc), &Distances(wc))| unsafe {
let o = _mm_min_epi8(
_mm_add_epi8(yc, splat1),
_mm_sub_epi8(
_mm_insert_epi8(_mm_slli_si128(yc, 1), y, 0),
bc,
),
);
y = _mm_extract_epi8(yc, 15);
let o = _mm_sub_epi8(o, seq);
let o = _mm_min_epi8(o, _mm_set1_epi8(x));
let o = _mm_sub_epi8(splat7f, o);
let o = _mm_max_epu8(o, _mm_slli_si128(o, 1));
let o = _mm_max_epu8(o, _mm_slli_si128(o, 2));
let o = _mm_max_epu8(o, _mm_slli_si128(o, 4));
let o = _mm_max_epu8(o, _mm_slli_si128(o, 8));
let o = _mm_sub_epi8(splat7f, o);
x = _mm_extract_epi8(o, 15) as i8 + 16;
let o = _mm_add_epi8(o, seq);
let z = _mm_add_epi8(o, wc);
let z = _mm_min_epi8(z, _mm_srli_si128(z, 1));
let z = _mm_min_epi8(z, _mm_srli_si128(z, 2));
let z = _mm_min_epi8(z, _mm_srli_si128(z, 4));
let z = _mm_min_epi8(z, _mm_srli_si128(z, 8));
bound = bound.min(_mm_extract_epi8(z, 0) as i8);
Distances(o)
},
));
let bound = unsafe { _mm_set1_epi8(bound) };
for (i, Distances(x)) in (0..).step_by(16).zip(&mut *new_state) {
*x = unsafe {
_mm_min_epi8(
*x,
_mm_sub_epi8(
bound,
_mm_abs_epi8(_mm_add_epi8(_mm_set1_epi8(i - j), seq)),
),
)
};
}
*new_states.entry(&*new_state).or_insert(0) += count;
}
}
mem::swap(&mut states, &mut new_states);
}
let control = unsafe { _mm_insert_epi8(splatff, (n % 16).into(), 0) };
Total::from(
states
.into_iter()
.map(|(state, count)| unsafe {
count
* _mm_extract_epi8(
_mm_shuffle_epi8(state[n as usize / 16].0, control),
0,
) as Count
})
.sum::<Count>(),
) * if a_sym == a { 1 } else { 2 }
})
.sum();
let shift = total.trailing_zeros();
println!(
"{} {}/{}",
n,
total >> shift,
(1 as Total) << (2 * n as u32 - 1 - shift),
);
}
}
Cargo.toml
[package]
name = "levenshtein"
version = "0.1.0"
authors = ["Anders Kaseorg <andersk@mit.edu>"]
edition = "2018"
[profile.release]
lto = true
codegen-units = 1
[dependencies]
fxhash = "0.2.1"
itertools = "0.8.2"
mimallocator = "0.1.3"
rayon = "1.3.0"
typed-arena = "2.0.0"
Running
RUSTFLAGS='-C target-cpu=native' cargo build --release
target/release/levenshtein
Output
(With cumulative timing data prepended by ts -s %.s
.)
0.000008 1 1/2
0.000150 2 1/1
0.000219 3 47/32
0.000282 4 243/128
0.000344 5 1179/512
0.000413 6 2755/1024
0.000476 7 12561/4096
0.000538 8 56261/16384
0.000598 9 124329/32768
0.000660 10 2175407/524288
0.000721 11 589839/131072
0.000782 12 40664257/8388608
0.000843 13 174219279/33554432
0.006964 14 742795299/134217728
0.068070 15 1576845897/268435456
0.310136 16 13340661075/2147483648
1.062122 17 14062798725/2147483648
3.586745 18 59125997473/8589934592
11.265840 19 123976260203/17179869184
33.691822 20 259354089603/34359738368
101.514674 21 8662782598909/1099511627776
307.427106 22 72199426617073/8796093022208
956.299101 23 150173613383989/17592186044416
3077.477731 24 1247439983177201/140737488355328
10276.205241 25 5173410986415247/562949953421312
34550.754308 26 5356540527479769/562949953421312
Static core2
build for Anush
@LuisMendo I managed to update it from my phone. – Anush – 2019-12-29T22:15:54.850
1This formulation of the question allows you to pre-create a table of output values. – Кирилл Малышев – 2019-12-30T02:53:41.533
2If you prohibit tables, it will be possible to derive a mathematical function from the table that will give the correct result for the selected n. – Кирилл Малышев – 2019-12-30T02:57:39.087
2
Interesting, even the limiting form of the average distance is not known precisely, so I doubt a closed form is known.
– xnor – 2019-12-30T06:15:16.2334@КириллМалышев The standard loophole rules prohibit precomputimg the answers, if you manage to work out a closed formula, then you deserve both the win and the bounty that I will give you. – Anush – 2019-12-30T08:30:58.600
1"As always this should be a competition per language" why on earth would a [fastest-code] competition be per-language? To prevent people from creating magic languages that excel at calculating the average Levenshtein distance exactly and nothing else and easily winning? We're supposedly comparing the programs' performance and ignoring their code in [fastest-code], similarly to how we ignore the programs' performance and compare their code in [code-golf] challenges. The equivalent of per-language things in [code-golf] is probably a per-CPU competition in [fastest-code] or something. – my pronoun is monicareinstate – 2019-12-31T05:58:01.720
@mypronounismonicareinstate My reasoning is that we are testing the ability of the coder not the different programming languages. If you can make a Python program that is faster than anyone else’s then that is great. It’s fairly obvious that hand optimised assembly is always the fastest programming language. To compare with code-golf, we are comparing the performance and ignoring the length of the code. – Anush – 2019-12-31T06:18:01.137
1@Anush “It’s fairly obvious that hand optimised assembly is always the fastest programming language.” This is actually false, because most assembly nowadays is actually more like C bytecode than the actual primitives the processor will execute. You've got microcode, vectorisation… – wizzwizz4 – 2019-12-31T18:42:37.970
@wizzwizz4 Also optimal register allocation by hand is not very easy. – Anush – 2019-12-31T19:37:54.683
I noticed that my solution does not scale very well on your test computer:
(n my yours factor)-> {(v4 17 148 452 3.1), (v3 16 271 483 1.7), (v1 15 139 224 1.6)}. Can you confirm this? – Bob Genom – 2020-01-11T14:59:11.487