Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Zlib: Add some protections to the allocator used by zlib #84604

Merged
merged 6 commits into from
Apr 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/native/external/zlib-intel.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ set(ZLIB_SOURCES_BASE
trees.c
x86.c
zutil.c
../../libs/System.IO.Compression.Native/zlib_allocator_win.c
)

# enable custom zlib allocator
add_definitions(-DMY_ZCALLOC)

addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib-intel" "${ZLIB_SOURCES_BASE}")
8 changes: 8 additions & 0 deletions src/native/external/zlib.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,12 @@ set(ZLIB_SOURCES_BASE
zutil.h
)

# enable custom zlib allocator
add_definitions(-DMY_ZCALLOC)
if(CLR_CMAKE_HOST_WIN32)
set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_win.c)
else()
set(ZLIB_SOURCES_BASE ${ZLIB_SOURCES_BASE} ../../libs/System.IO.Compression.Native/zlib_allocator_unix.c)
endif()

addprefix(ZLIB_SOURCES "${CMAKE_CURRENT_LIST_DIR}/zlib" "${ZLIB_SOURCES_BASE}")
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ else ()

if (CLR_CMAKE_HOST_ARCH_I386 OR CLR_CMAKE_HOST_ARCH_AMD64)
include(${CLR_SRC_NATIVE_DIR}/external/zlib-intel.cmake)
add_definitions(-DINTERNAL_ZLIB_INTEL)
else ()
include(${CLR_SRC_NATIVE_DIR}/external/zlib.cmake)
endif ()
Expand Down
11 changes: 6 additions & 5 deletions src/native/libs/System.IO.Compression.Native/pal_zlib.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@
#ifdef _WIN32
#define c_static_assert(e) static_assert((e),"")
#endif
#include <external/zlib/zlib.h>
#ifdef INTERNAL_ZLIB_INTEL
#include <external/zlib-intel/zlib.h>
#else
#include <external/zlib/zlib.h>
#endif
#else
#include "pal_utilities.h"
#include <zlib.h>
Expand Down Expand Up @@ -39,14 +43,11 @@ Initializes the PAL_ZStream by creating and setting its underlying z_stream.
*/
static int32_t Init(PAL_ZStream* stream)
{
z_stream* zStream = (z_stream*)malloc(sizeof(z_stream));
z_stream* zStream = (z_stream*)calloc(1, sizeof(z_stream));
stream->internalState = zStream;

if (zStream != NULL)
{
zStream->zalloc = Z_NULL;
zStream->zfree = Z_NULL;
zStream->opaque = Z_NULL;
return PAL_Z_OK;
}
else
Expand Down
153 changes: 153 additions & 0 deletions src/native/libs/System.IO.Compression.Native/zlib_allocator_unix.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <stdbool.h>
#include <stdint.h>
#include <external/zlib/zutil.h>

/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
* (non-Windows version)
*
* 1. When zlib allocates fixed-length data structures for containing stream metadata, we zero
* the memory before using it, preventing use of uninitialized memory within these structures.
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
* these data structures contain most of the metadata used for managing the variable-length
* dynamically allocated buffers.
*
* 2. We put a cookie both before and after any allocated memory, which allows us to detect local
* buffer overruns on the call to free(). The cookie values are tied to the addresses where
* the data is located in memory.
*
* 3. We trash the aforementioned cookie on free(), which allows us to detect double-free.
*
* If any of these checks fails, the application raises SIGABRT.
*/

#ifndef MEMORY_ALLOCATION_ALIGNMENT
// malloc() returns an address suitably aligned for any built-in data type.
// Historically, this has been twice the arch's natural word size.
#ifdef HOST_64BIT
#define MEMORY_ALLOCATION_ALIGNMENT 16
#else
#define MEMORY_ALLOCATION_ALIGNMENT 8
#endif
#endif

typedef struct _DOTNET_ALLOC_COOKIE
{
void* Address;
size_t Size;
} DOTNET_ALLOC_COOKIE;

static bool SafeAdd(size_t a, size_t b, size_t* sum)
{
if (SIZE_MAX - a >= b) { *sum = a + b; return true; }
else { *sum = 0; return false; }
}

static bool SafeMult(size_t a, size_t b, size_t* product)
{
if (SIZE_MAX / a >= b) { *product = a * b; return true; }
else { *product = 0; return false; }
}

static DOTNET_ALLOC_COOKIE ReadAllocCookieUnaligned(const void* pSrc)
{
DOTNET_ALLOC_COOKIE vCookie;
memcpy(&vCookie, pSrc, sizeof(DOTNET_ALLOC_COOKIE));
return vCookie;
}

static void WriteAllocCookieUnaligned(void* pDest, DOTNET_ALLOC_COOKIE vCookie)
{
memcpy(pDest, &vCookie, sizeof(DOTNET_ALLOC_COOKIE));
}

// Historically, the memory allocator always returns addresses aligned to some
// particular boundary. We'll make that same guarantee here just in case somebody
// depends on it.
const size_t DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((size_t)MEMORY_ALLOCATION_ALIGNMENT - 1);
const size_t DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);

voidpf ZLIB_INTERNAL zcalloc(opaque, items, size)
voidpf opaque;
unsigned items;
unsigned size;
{
(void)opaque; // unreferenced formal parameter

// If initializing a fixed-size structure, zero the memory.
bool fZeroMemory = (items == 1);

size_t cbRequested;
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
{
// multiplication can't overflow; no need for safeint
cbRequested = (size_t)items * (size_t)size;
}
else
{
// multiplication can overflow; go through safeint
if (!SafeMult((size_t)items, (size_t)size, &cbRequested)) { return NULL; }
}

// Make sure the actual allocation has enough room for our frontside & backside cookies.
size_t cbActualAllocationSize;
if (!SafeAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize)) { return NULL; }

void* pAlloced = (fZeroMemory) ? calloc(1, cbActualAllocationSize) : malloc(cbActualAllocationSize);
if (pAlloced == NULL) { return NULL; } // OOM

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
uint8_t* pReturnToCaller = (uint8_t*)pAlloced + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;
uint8_t* pTrailerCookie = pReturnToCaller + cbRequested;

// Write out the same cookie for the header & the trailer, then we're done.

DOTNET_ALLOC_COOKIE vCookie = { 0 };
vCookie.Address = pReturnToCaller;
vCookie.Size = cbRequested;
*pHeaderCookie = vCookie; // aligned
WriteAllocCookieUnaligned(pTrailerCookie, vCookie);

return pReturnToCaller;
}

static void zcfree_trash_cookie(void* pCookie)
{
memset(pCookie, 0, sizeof(DOTNET_ALLOC_COOKIE));
}

void ZLIB_INTERNAL zcfree(opaque, ptr)
voidpf opaque;
voidpf ptr;
{
(void)opaque; // unreferenced formal parameter

if (ptr == NULL) { return; } // ok to free nullptr

// Check cookie at beginning

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((uint8_t*)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
if (pHeaderCookie->Address != ptr) { goto Fail; }
size_t cbRequested = pHeaderCookie->Size;

// Check cookie at end

uint8_t* pTrailerCookie = (uint8_t*)ptr + cbRequested;
DOTNET_ALLOC_COOKIE vTrailerCookie = ReadAllocCookieUnaligned(pTrailerCookie);
if (vTrailerCookie.Address != ptr) { goto Fail; }
if (vTrailerCookie.Size != cbRequested) { goto Fail; }

// Checks passed - now trash the cookies and free memory

zcfree_trash_cookie(pHeaderCookie);
zcfree_trash_cookie(pTrailerCookie);

free(pHeaderCookie);
return;

Fail:
abort(); // cookie check failed
}
181 changes: 181 additions & 0 deletions src/native/libs/System.IO.Compression.Native/zlib_allocator_win.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include <Windows.h>
#include <heapapi.h>
#include <intsafe.h>
#include <winnt.h>
#include <crtdbg.h> /* _ASSERTE */

#ifdef INTERNAL_ZLIB_INTEL
#include <external/zlib-intel/zutil.h>
#else
#include <external/zlib/zutil.h>
#endif

/* A custom allocator for zlib that provides some defense-in-depth over standard malloc / free.
* (Windows-specific version)
*
* 1. In 64-bit processes, we use a custom heap rather than relying on the standard process heap.
* This should cause zlib's buffers to go into a separate address range from the rest of app
* data, making it more difficult for buffer overruns to affect non-zlib-related data structures.
*
* 2. When zlib allocates fixed-length data structures for containing stream metadata, we zero
* the memory before using it, preventing use of uninitialized memory within these structures.
* Ideally we would do this for dynamically-sized buffers as well, but there is a measurable
* perf impact to doing this. Zeroing fixed structures seems like a good trade-off here, since
* these data structures contain most of the metadata used for managing the variable-length
* dynamically allocated buffers.
*
* 3. We put a cookie both before and after any allocated memory, which allows us to detect local
* buffer overruns on the call to free(). The cookie values are enciphered to make it more
* difficult for somebody to guess a correct value.
*
* 4. We trash the aforementioned cookie on free(), which allows us to detect double-free.
*
* If any of these checks fails, the application terminates immediately, optionally triggering a
* crash dump. We use a special code that's easy to search for in Watson.
*/

// Gets the special heap we'll allocate from.
HANDLE GetZlibHeap()
{
#ifdef _WIN64
static HANDLE s_hPublishedHeap = NULL;

// If already initialized, return immediately.
// We don't need a volatile read here since the publish is performed with release semantics.
if (s_hPublishedHeap != NULL) { return s_hPublishedHeap; }

// Attempt to create a new heap. The heap will be dynamically sized.
HANDLE hNewHeap = HeapCreate(0, 0, 0);

if (hNewHeap != NULL)
{
// We created a new heap. Attempt to publish it.
if (InterlockedCompareExchangePointer(&s_hPublishedHeap, hNewHeap, NULL) != NULL)
{
HeapDestroy(hNewHeap); // Somebody published before us. Destroy our heap.
hNewHeap = NULL; // Guard against accidental use later in the method.
}
}
else
{
// If we can't create a new heap, fall back to the process default heap.
InterlockedCompareExchangePointer(&s_hPublishedHeap, GetProcessHeap(), NULL);
}

// Some thread - perhaps us, perhaps somebody else - published the heap. Return it.
// We don't need a volatile read here since the publish is performed with release semantics.
_ASSERTE(s_hPublishedHeap != NULL);
return s_hPublishedHeap;
#else
// We don't want to create a new heap in a 32-bit process because it could end up
// reserving too much of the address space. Instead, fall back to the normal process heap.
return GetProcessHeap();
#endif
}

typedef struct _DOTNET_ALLOC_COOKIE
{
PVOID CookieValue;
union _Size
{
SIZE_T RawValue;
LPVOID EncodedValue;
} Size;
} DOTNET_ALLOC_COOKIE;

// Historically, the Windows memory allocator always returns addresses aligned to some
// particular boundary. We'll make that same guarantee here just in case somebody
// depends on it.
const SIZE_T DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING = (sizeof(DOTNET_ALLOC_COOKIE) + MEMORY_ALLOCATION_ALIGNMENT - 1) & ~((SIZE_T)MEMORY_ALLOCATION_ALIGNMENT - 1);
const SIZE_T DOTNET_ALLOC_TRAILER_COOKIE_SIZE = sizeof(DOTNET_ALLOC_COOKIE);

voidpf ZLIB_INTERNAL zcalloc(opaque, items, size)
voidpf opaque;
unsigned items;
unsigned size;
{
(void)opaque; // suppress C4100 - unreferenced formal parameter

// If initializing a fixed-size structure, zero the memory.
DWORD dwFlags = (items == 1) ? HEAP_ZERO_MEMORY : 0;

SIZE_T cbRequested;
if (sizeof(items) + sizeof(size) <= sizeof(cbRequested))
{
// multiplication can't overflow; no need for safeint
cbRequested = (SIZE_T)items * (SIZE_T)size;
}
else
{
// multiplication can overflow; go through safeint
if (FAILED(SIZETMult(items, size, &cbRequested))) { return NULL; }
}

// Make sure the actual allocation has enough room for our frontside & backside cookies.
SIZE_T cbActualAllocationSize;
if (FAILED(SIZETAdd(cbRequested, DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING + DOTNET_ALLOC_TRAILER_COOKIE_SIZE, &cbActualAllocationSize))) { return NULL; }

LPVOID pAlloced = HeapAlloc(GetZlibHeap(), dwFlags, cbActualAllocationSize);
if (pAlloced == NULL) { return NULL; } // OOM

// Now set the header & trailer cookies
DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)pAlloced;
pHeaderCookie->CookieValue = EncodePointer(&pHeaderCookie->CookieValue);
pHeaderCookie->Size.RawValue = cbRequested;

LPBYTE pReturnToCaller = (LPBYTE)pHeaderCookie + DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING;

UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)(pReturnToCaller + cbRequested);
pTrailerCookie->CookieValue = EncodePointer(&pTrailerCookie->CookieValue);
pTrailerCookie->Size.EncodedValue = EncodePointer((PVOID)cbRequested);

return pReturnToCaller;
}

FORCEINLINE
void zcfree_trash_cookie(UNALIGNED DOTNET_ALLOC_COOKIE* pCookie)
{
memset(pCookie, 0, sizeof(*pCookie));
pCookie->CookieValue = (PVOID)(SIZE_T)0xDEADBEEF;
}

// Marked noinline to keep it on the call stack during crash reports.
DECLSPEC_NOINLINE
DECLSPEC_NORETURN
void zcfree_cookie_check_failed()
{
__fastfail(FAST_FAIL_HEAP_METADATA_CORRUPTION);
}

void ZLIB_INTERNAL zcfree(opaque, ptr)
voidpf opaque;
voidpf ptr;
{
(void)opaque; // suppress C4100 - unreferenced formal parameter

if (ptr == NULL) { return; } // ok to free nullptr

// Check cookie at beginning and end

DOTNET_ALLOC_COOKIE* pHeaderCookie = (DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr - DOTNET_ALLOC_HEADER_COOKIE_SIZE_WITH_PADDING);
if (DecodePointer(pHeaderCookie->CookieValue) != &pHeaderCookie->CookieValue) { goto Fail; }
SIZE_T cbRequested = pHeaderCookie->Size.RawValue;

UNALIGNED DOTNET_ALLOC_COOKIE* pTrailerCookie = (UNALIGNED DOTNET_ALLOC_COOKIE*)((LPBYTE)ptr + cbRequested);
if (DecodePointer(pTrailerCookie->CookieValue) != &pTrailerCookie->CookieValue) { goto Fail; }
if (DecodePointer(pTrailerCookie->Size.EncodedValue) != (LPVOID)cbRequested) { goto Fail; }

// Checks passed - now trash the cookies and free memory

zcfree_trash_cookie(pHeaderCookie);
zcfree_trash_cookie(pTrailerCookie);

if (!HeapFree(GetZlibHeap(), 0, pHeaderCookie)) { goto Fail; }
return;

Fail:
zcfree_cookie_check_failed();
}