Skip to content

Commit a6e8a31

Browse files
authored
Rollup merge of #151611 - bonega:improve-is-slice-is-ascii-performance, r=folkertdev
Improve is_ascii performance on x86_64 with explicit SSE2 intrinsics # Summary Improves `slice::is_ascii` performance for SSE2 target roughly 1.5-2x on larger inputs. AVX-512 keeps similiar performance characteristics. This is building on the work already merged in #151259. In particular this PR improves the default SSE2 performance, I don't consider this a temporary fix anymore. Thanks to @folkertdev for pointing me to consider `as_chunk` again. # The implementation: - Uses 64-byte chunks with 4x 16-byte SSE2 loads OR'd together - Extracts the MSB mask with a single `pmovmskb` instruction - Falls back to usize-at-a-time SWAR for inputs < 64 bytes # Performance impact (vs before #151259): - AVX-512: 34-48x faster - SSE2: 1.5-2x faster <details> <summary>Benchmark Results (click to expand)</summary> Benchmarked on AMD Ryzen 9 9950X (AVX-512 capable). Values show relative performance (1.00 = fastest). Tops out at 139GB/s for large inputs. ### early_non_ascii | Input Size | new_avx512 | new_sse2 | old_avx512 | old_sse2 | |------------|------------|----------|------------|----------| | 64 | 1.01 | **1.00** | 13.45 | 1.13 | | 1024 | 1.01 | **1.00** | 13.53 | 1.14 | | 65536 | 1.01 | **1.00** | 13.99 | 1.12 | | 1048576 | 1.02 | **1.00** | 13.29 | 1.12 | ### late_non_ascii | Input Size | new_avx512 | new_sse2 | old_avx512 | old_sse2 | |------------|------------|----------|------------|----------| | 64 | **1.00** | 1.01 | 13.37 | 1.13 | | 1024 | 1.10 | **1.00** | 42.42 | 1.95 | | 65536 | **1.00** | 1.06 | 42.22 | 1.73 | | 1048576 | **1.00** | 1.03 | 34.73 | 1.46 | ### pure_ascii | Input Size | new_avx512 | new_sse2 | old_avx512 | old_sse2 | |------------|------------|----------|------------|----------| | 4 | 1.03 | **1.00** | 1.75 | 1.32 | | 8 | **1.00** | 1.14 | 3.89 | 2.06 | | 16 | **1.00** | 1.04 | 1.13 | 1.62 | | 32 | 1.07 | 1.19 | 5.11 | **1.00** | | 64 | **1.00** | 1.13 | 13.32 | 1.57 | | 128 | **1.00** | 1.01 | 19.97 | 1.55 | | 256 | **1.00** | 1.02 | 27.77 | 1.61 | | 1024 | **1.00** | 1.02 | 41.34 | 1.84 | | 4096 | 1.02 | **1.00** | 45.61 | 1.98 | | 16384 | 1.01 | **1.00** | 48.67 | 2.04 | | 65536 | **1.00** | 1.03 | 43.86 | 1.77 | | 262144 | **1.00** | 1.06 | 41.44 | 1.79 | | 1048576 | 1.02 | **1.00** | 35.36 | 1.44 | </details> ## Reproduction / Test Projects Standalone validation tools: https://github.com/bonega/is-ascii-fix-validation - `bench/` - Criterion benchmarks for SSE2 vs AVX-512 comparison - `fuzz/` - Compares old/new implementations with libfuzzer Relates to: llvm/llvm-project#176906
2 parents 873d468 + dbc870a commit a6e8a31

2 files changed

Lines changed: 25 additions & 43 deletions

File tree

library/core/src/slice/ascii.rs

Lines changed: 22 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -460,56 +460,38 @@ const fn is_ascii(s: &[u8]) -> bool {
460460
)
461461
}
462462

463-
/// Chunk size for vectorized ASCII checking (two 16-byte SSE registers).
463+
/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
464464
#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
465-
const CHUNK_SIZE: usize = 32;
465+
const SSE2_CHUNK_SIZE: usize = 64;
466466

467-
/// SSE2 implementation using `_mm_movemask_epi8` (compiles to `pmovmskb`) to
468-
/// avoid LLVM's broken AVX-512 auto-vectorization of counting loops.
469-
///
470-
/// FIXME(llvm#176906): Remove this workaround once LLVM generates efficient code.
471467
#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
468+
#[inline]
472469
fn is_ascii_sse2(bytes: &[u8]) -> bool {
473470
use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
474471

475-
let mut i = 0;
476-
477-
while i + CHUNK_SIZE <= bytes.len() {
478-
// SAFETY: We have verified that `i + CHUNK_SIZE <= bytes.len()`.
479-
let ptr = unsafe { bytes.as_ptr().add(i) };
480-
481-
// Load two 16-byte chunks and combine them.
482-
// SAFETY: We verified `i + 32 <= len`, so ptr is valid for 32 bytes.
483-
// `_mm_loadu_si128` allows unaligned loads.
484-
let chunk1 = unsafe { _mm_loadu_si128(ptr as *const __m128i) };
485-
// SAFETY: Same as above - ptr.add(16) is within the valid 32-byte range.
486-
let chunk2 = unsafe { _mm_loadu_si128(ptr.add(16) as *const __m128i) };
487-
488-
// OR them together - if any byte has the high bit set, the result will too.
489-
// SAFETY: SSE2 is guaranteed by the cfg predicate.
490-
let combined = unsafe { _mm_or_si128(chunk1, chunk2) };
491-
492-
// Create a mask from the MSBs of each byte.
493-
// If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
494-
// SAFETY: SSE2 is guaranteed by the cfg predicate.
495-
let mask = unsafe { _mm_movemask_epi8(combined) };
496-
472+
let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
473+
474+
for chunk in chunks {
475+
let ptr = chunk.as_ptr();
476+
// SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
477+
let mask = unsafe {
478+
let a1 = _mm_loadu_si128(ptr as *const __m128i);
479+
let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
480+
let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
481+
let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
482+
// OR all chunks - if any byte has high bit set, combined will too.
483+
let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
484+
// Create a mask from the MSBs of each byte.
485+
// If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
486+
_mm_movemask_epi8(combined)
487+
};
497488
if mask != 0 {
498489
return false;
499490
}
500-
501-
i += CHUNK_SIZE;
502-
}
503-
504-
// Handle remaining bytes with simple loop
505-
while i < bytes.len() {
506-
if !bytes[i].is_ascii() {
507-
return false;
508-
}
509-
i += 1;
510491
}
511492

512-
true
493+
// Handle remaining bytes
494+
rest.iter().all(|b| b.is_ascii())
513495
}
514496

515497
/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
@@ -529,7 +511,7 @@ const fn is_ascii(bytes: &[u8]) -> bool {
529511
is_ascii_simple(bytes)
530512
} else {
531513
// For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
532-
if bytes.len() < CHUNK_SIZE {
514+
if bytes.len() < SSE2_CHUNK_SIZE {
533515
let chunks = bytes.chunks_exact(USIZE_SIZE);
534516
let remainder = chunks.remainder();
535517
for chunk in chunks {

tests/assembly-llvm/slice-is-ascii.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,15 @@
1313
/// Verify `is_ascii` generates efficient code on different architectures:
1414
///
1515
/// - x86_64: Must NOT use `kshiftrd`/`kshiftrq` (broken AVX-512 auto-vectorization).
16-
/// The fix uses explicit SSE2 intrinsics (`pmovmskb`/`vpmovmskb`).
17-
/// See: https://github.com/llvm/llvm-project/issues/176906
16+
/// Good version uses explicit SSE2 intrinsics (`pmovmskb`/`vpmovmskb`).
1817
///
1918
/// - loongarch64: Should use `vmskltz.b` instruction for the fast-path.
20-
/// This architecture still relies on LLVM auto-vectorization.
2119
2220
// X86_64-LABEL: test_is_ascii
2321
// X86_64-NOT: kshiftrd
2422
// X86_64-NOT: kshiftrq
23+
// X86_64: {{vpor|por}}
24+
// X86_64: {{vpmovmskb|pmovmskb}}
2525

2626
// LA64-LABEL: test_is_ascii
2727
// LA64: vmskltz.b

0 commit comments

Comments
 (0)