From 4b62a77e4d76f2f29cd46812e6d7400636790723 Mon Sep 17 00:00:00 2001 From: AngelicosPhosphoros Date: Mon, 20 Dec 2021 15:57:47 +0300 Subject: [PATCH] Little improves in CString `new` when creating from slice Old code already contain optimization for cases with `&str` and `&[u8]` args. This commit adds a specialization for `&mut[u8]` too. Also, I added usage of old slice in search for zero bytes instead of new buffer because it produce better code for Windows on LTO builds. For other platforms, this wouldn't cause any difference because it calls `libc` anyway. Inlined `_new` method into spec trait to reduce amount of code generated to `CString::new` callers. --- library/std/src/ffi/c_str.rs | 67 ++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 22 deletions(-) diff --git a/library/std/src/ffi/c_str.rs b/library/std/src/ffi/c_str.rs index 9c1b79d696697..d859bff1a45fa 100644 --- a/library/std/src/ffi/c_str.rs +++ b/library/std/src/ffi/c_str.rs @@ -373,38 +373,61 @@ impl CString { /// the position of the nul byte. #[stable(feature = "rust1", since = "1.0.0")] pub fn new>>(t: T) -> Result { - trait SpecIntoVec { - fn into_vec(self) -> Vec; + trait SpecNewImpl { + fn spec_new_impl(self) -> Result; } - impl>> SpecIntoVec for T { - default fn into_vec(self) -> Vec { - self.into() + + impl>> SpecNewImpl for T { + default fn spec_new_impl(self) -> Result { + let bytes: Vec = self.into(); + match memchr::memchr(0, &bytes) { + Some(i) => Err(NulError(i, bytes)), + None => Ok(unsafe { CString::from_vec_unchecked(bytes) }), + } } } - // Specialization for avoiding reallocation. - impl SpecIntoVec for &'_ [u8] { - fn into_vec(self) -> Vec { - let mut v = Vec::with_capacity(self.len() + 1); - v.extend(self); - v + + // Specialization for avoiding reallocation + #[inline(always)] // Without that it is not inlined into specializations + fn spec_new_impl_bytes(bytes: &[u8]) -> Result { + // We cannot have such large slice that we would overflow here + // but using `checked_add` allows LLVM to assume that capacity never overflows + // and generate twice shorter code. + // `saturating_add` doesn't help for some reason. + let capacity = bytes.len().checked_add(1).unwrap(); + + // Allocate before validation to avoid duplication of allocation code. + // We still need to allocate and copy memory even if we get an error. + let mut buffer = Vec::with_capacity(capacity); + buffer.extend(bytes); + + // Check memory of self instead of new buffer. + // This allows better optimizations if lto enabled. + match memchr::memchr(0, bytes) { + Some(i) => Err(NulError(i, buffer)), + None => Ok(unsafe { CString::from_vec_unchecked(buffer) }), } } - impl SpecIntoVec for &'_ str { - fn into_vec(self) -> Vec { - let mut v = Vec::with_capacity(self.len() + 1); - v.extend(self.as_bytes()); - v + + impl SpecNewImpl for &'_ [u8] { + fn spec_new_impl(self) -> Result { + spec_new_impl_bytes(self) } } - Self::_new(SpecIntoVec::into_vec(t)) - } + impl SpecNewImpl for &'_ str { + fn spec_new_impl(self) -> Result { + spec_new_impl_bytes(self.as_bytes()) + } + } - fn _new(bytes: Vec) -> Result { - match memchr::memchr(0, &bytes) { - Some(i) => Err(NulError(i, bytes)), - None => Ok(unsafe { CString::from_vec_unchecked(bytes) }), + impl SpecNewImpl for &'_ mut [u8] { + fn spec_new_impl(self) -> Result { + spec_new_impl_bytes(self) + } } + + t.spec_new_impl() } /// Creates a C-compatible string by consuming a byte vector,