malloc: Implement heap protector
2016-11-08 Florian Weimer <fweimer@redhat.com>
* malloc/mallc-internal.h (__malloc_header_guard)
(__malloc_footer_guard): Declare.
* malloc/malloc-guard.c: New file.
* malloc/Makefile (routines): Add it.
* malloc/malloc.c: Update malloc_chunk comment.
(HEAP_MANGLE_SIZE, HEAP_DEMANGLE_SIZE)
(HEAP_MANGLE_PREVSIZE, HEAP_DEMANGLE_PREVSIZE): Define.
(chunksize_nomask, prev_size, set_prev_size, set_head_size)
(set_head, set_foot): Add encryption.
* malloc/arena.c (ptmalloc_init): For shared builds, initialize
the heap guard variables. Initialize the top chunk.
* malloc/hooks.c (malloc_set_state): Apply the heap guard to the
dumped heap.
* malloc/tst-mallocstate.c (malloc_usable_size_valid): New
variable.
(check_allocation): Check malloc_usable_size result if
malloc_usable_size_valid.
(init_heap): Set malloc_usable_size_valid.
* csu/libc-start.c (LIBC_START_MAIN): Initialize heap guard
variables.
* sysdeps/generic/ldsodefs.h (struct rtld_global_ro): Add members
_dl_malloc_header_guard, _dl_malloc_footer_guard.
* elf/rtld.c (security_init): Initialize temporary copy of the
heap guard variables.
@@ -71,6 +71,10 @@ Version 2.25
for the Linux quota interface which predates kernel version 2.4.22 has
been removed.
+* The malloc implementation attempts to stop certain exploitation techniques
+ targeted at malloc heap metadata. This is not intended as a security
+ feature, merely as a post-exploitation hardening measure.
+
* The malloc_get_state and malloc_set_state functions have been removed.
Already-existing binaries that dynamically link to these functions will
get a hidden implementation in which malloc_get_state is a stub. As far
@@ -22,6 +22,7 @@
#include <ldsodefs.h>
#include <exit-thread.h>
#include <elf/dl-keysetup.h>
+#include <malloc/malloc-internal.h>
extern void __libc_init_first (int argc, char **argv, char **envp);
@@ -210,6 +211,11 @@ LIBC_START_MAIN (int (*main) (int, char **, char ** MAIN_AUXVEC_DECL),
__pointer_chk_guard_local = keys.pointer;
# endif
+ /* In the non-shared case, we initialize the heap guard
+ directly. */
+ __malloc_header_guard = keys.heap_header;
+ __malloc_footer_guard = keys.heap_footer;
+
#endif
/* Register the destructor of the dynamic linker if there is any. */
@@ -42,6 +42,7 @@
#include <stap-probe.h>
#include <stackinfo.h>
#include <dl-keysetup.h>
+#include <malloc/malloc-internal.h>
#include <assert.h>
@@ -716,6 +717,11 @@ security_init (void)
#endif
__pointer_chk_guard_local = keys.pointer;
+ /* Keep a copy of the computed keys, so that they can be obtained
+ during malloc initialization in libc.so. */
+ GLRO (dl_malloc_header_guard) = keys.heap_header;
+ GLRO (dl_malloc_footer_guard) = keys.heap_footer;
+
/* We do not need the _dl_random value anymore. The less
information we leave behind, the better, so clear the
variable. */
@@ -41,7 +41,7 @@ tests-static := \
tests += $(tests-static)
test-srcs = tst-mtrace
-routines = malloc morecore mcheck mtrace obstack \
+routines = malloc morecore mcheck mtrace obstack malloc-guard \
scratch_buffer_grow scratch_buffer_grow_preserve \
scratch_buffer_set_array_size
@@ -340,6 +340,19 @@ ptmalloc_init (void)
if (check_action != 0)
__malloc_check_init ();
}
+
+#ifdef SHARED
+ /* For a shared library, elf/rtld.c performed key setup in
+ security_init, and we copy the keys. In static builds, the guard
+ cookies have already been initialized in csu/libc-start.c. */
+ __malloc_header_guard = GLRO (dl_malloc_header_guard);
+ __malloc_footer_guard = GLRO (dl_malloc_footer_guard);
+#endif
+
+ /* Initialize the top chunk, based on the heap protector guards. */
+ malloc_init_state (&main_arena);
+ set_head (main_arena.top, 0);
+
#if HAVE_MALLOC_INIT_HOOK
void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
if (hook != NULL)
@@ -537,35 +537,38 @@ malloc_set_state (void *msptr)
dumped_main_arena_end, realloc and free will recognize these
chunks as dumped fake mmapped chunks and never free them. */
- /* Find the chunk with the lowest address with the heap. */
- mchunkptr chunk = NULL;
+ /* Find the chunk with the lowest address with the heap. If
+ successful, size_header will point to the mchunk_size member (not
+ the chunk start, i.e. the mchunck_prev_size member). */
+ size_t *size_header = NULL;
{
size_t *candidate = (size_t *) ms->sbrk_base;
size_t *end = (size_t *) (ms->sbrk_base + ms->sbrked_mem_bytes);
while (candidate < end)
if (*candidate != 0)
{
- chunk = mem2chunk ((void *) (candidate + 1));
+ size_header = candidate;
break;
}
else
++candidate;
}
- if (chunk == NULL)
+ if (size_header == NULL)
return 0;
/* Iterate over the dumped heap and patch the chunks so that they
- are treated as fake mmapped chunks. */
+ are treated as fake mmapped chunks. We cannot use the regular
+ accessors because the chunks we read are not yet encrypted. */
mchunkptr top = ms->av[2];
- while (chunk < top)
+ size_t *top_size_header = ((size_t *) top) + 1;
+ while (size_header < top_size_header)
{
- if (inuse (chunk))
- {
- /* Mark chunk as mmapped, to trigger the fallback path. */
- size_t size = chunksize (chunk);
- set_head (chunk, size | IS_MMAPPED);
- }
- chunk = next_chunk (chunk);
+ size_t size = *size_header & ~SIZE_BITS;
+ /* We treat all chunks as allocated. The heap consistency
+ checks do not trigger because they are not active for the
+ dumped heap. */
+ *size_header = HEAP_MANGLE_SIZE (size) | IS_MMAPPED;
+ size_header += size / sizeof (*size_header);
}
/* The dumped fake mmapped chunks all lie in this address range. */
new file mode 100644
@@ -0,0 +1,31 @@
+/* Heap protector variables.
+ Copyright (C) 2016 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+/* These variables are defined in a separate file because the static
+ startup code initializes them, but this should not pull the rest of
+ the libc malloc implementation into the link. */
+
+#include <malloc-internal.h>
+
+/* The heap cookies. The lowest three bits (corresponding to
+ SIZE_BITS) in __malloc_header_guard must be clear. Computed by
+ elf/dl-keysetup.c, and initialized from rtld_global_ro in
+ ptmalloc_init. See "malloc_chunk details" in malloc.c for
+ information on how these values are used. */
+INTERNAL_SIZE_T __malloc_header_guard; /* For size. */
+INTERNAL_SIZE_T __malloc_footer_guard; /* For prev_size. */
@@ -81,5 +81,8 @@ void __malloc_fork_unlock_parent (void) internal_function attribute_hidden;
/* Called in the child process after a fork. */
void __malloc_fork_unlock_child (void) internal_function attribute_hidden;
+/* Random values for the heap protector. */
+extern INTERNAL_SIZE_T __malloc_header_guard attribute_hidden;
+extern INTERNAL_SIZE_T __malloc_footer_guard attribute_hidden;
#endif /* _MALLOC_INTERNAL_H */
@@ -1070,9 +1070,9 @@ struct malloc_chunk {
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if unallocated (P clear) |
+ | (X1) Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of chunk, in bytes |A|M|P|
+ | (X0) Size of chunk, in bytes |A|M|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| User data starts here... .
. .
@@ -1081,7 +1081,7 @@ struct malloc_chunk {
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| (size of chunk, but used for application data) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of next chunk, in bytes |A|0|1|
+ | (X0) Size of next chunk, in bytes |A|0|1|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Where "chunk" is the front of the chunk for the purpose of most of
@@ -1095,9 +1095,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Free chunks are stored in circular doubly-linked lists, and look like this:
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of previous chunk, if unallocated (P clear) |
+ | (X1) Size of previous chunk, if unallocated (P clear) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `head:' | Size of chunk, in bytes |A|0|P|
+ `head:' | (X0) Size of chunk, in bytes |A|0|P|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Forward pointer to next chunk in list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -1107,9 +1107,9 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
. .
. |
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- `foot:' | Size of chunk, in bytes |
+ `foot:' | (X1) Size of chunk, in bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Size of next chunk, in bytes |A|0|0|
+ | (X0) Size of next chunk, in bytes |A|0|0|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
@@ -1137,6 +1137,13 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
deal with alignments etc but can be very confusing when trying
to extend or adapt this code.
+ The (X0) and (X1) markers in the diagrams above refer to the heap
+ protector. Fields marked with (X0) are XOR-obfuscated with the
+ __malloc_header_guard cookie. (X1) chunks are obfuscated with
+ __malloc_footer_guard. The hope is that this obfuscation makes it
+ more difficult for an attacker to create a valid malloc chunk,
+ after exploiting a heap buffer overflow.
+
The three exceptions to all this are:
1. The special chunk `top' doesn't bother using the
@@ -1241,6 +1248,17 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Mark a chunk as not being on the main arena. */
#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
+/* Encryption and decryption suitable for the mchunk_size member. */
+#define HEAP_MANGLE_SIZE(val) \
+ (__malloc_header_guard ^ ((INTERNAL_SIZE_T) (val)))
+#define HEAP_DEMANGLE_SIZE(val) \
+ (__malloc_header_guard ^ ((INTERNAL_SIZE_T) (val)))
+
+/* Encryption and decryption suitable for the mchunk_prev_size member. */
+#define HEAP_MANGLE_PREVSIZE(val) \
+ (__malloc_footer_guard ^ ((INTERNAL_SIZE_T) (val)))
+#define HEAP_DEMANGLE_PREVSIZE(val) \
+ (__malloc_footer_guard ^ ((INTERNAL_SIZE_T) (val)))
/*
Bits to mask off when extracting size
@@ -1256,16 +1274,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
/* Like chunksize, but do not mask SIZE_BITS. */
-#define chunksize_nomask(p) ((p)->mchunk_size)
+#define chunksize_nomask(p) HEAP_DEMANGLE_SIZE ((p)->mchunk_size)
/* Ptr to next physical malloc_chunk. */
#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
/* Size of the chunk below P. Only valid if prev_inuse (P). */
-#define prev_size(p) ((p)->mchunk_prev_size)
+#define prev_size(p) HEAP_DEMANGLE_PREVSIZE ((p)->mchunk_prev_size)
/* Set the size of the chunk below P. Only valid if prev_inuse (P). */
-#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
+#define set_prev_size(p, sz) ((p)->mchunk_prev_size = HEAP_MANGLE_PREVSIZE (sz))
/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */
#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
@@ -1297,13 +1315,16 @@ nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
/* Set size at head, without disturbing its use bit */
-#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
+#define set_head_size(p, s) \
+ ((p)->mchunk_size = ((p)->mchunk_size & SIZE_BITS) | HEAP_MANGLE_SIZE (s))
/* Set size/use field */
-#define set_head(p, s) ((p)->mchunk_size = (s))
+#define set_head(p, s) ((p)->mchunk_size = HEAP_MANGLE_SIZE (s))
/* Set size at footer (only when chunk is not in use) */
-#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
+#define set_foot(p, s) \
+ (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size \
+ = HEAP_MANGLE_PREVSIZE (s))
#pragma GCC poison mchunk_size
@@ -186,6 +186,10 @@ struct allocation
unsigned int seed;
};
+/* After heap initialization, we can call malloc_usable_size to check
+ if it gives valid results. */
+static bool malloc_usable_size_valid;
+
/* Check that the allocation task allocation has the expected
contents. */
static void
@@ -221,6 +225,23 @@ check_allocation (const struct allocation *alloc, int index)
putc ('\n', stdout);
errors = true;
}
+
+ if (malloc_usable_size_valid)
+ {
+ size_t usable = malloc_usable_size (alloc->data);
+ if (usable < size)
+ {
+ printf ("error: allocation %d has reported size %zu (expected %zu)\n",
+ index, usable, size);
+ errors = true;
+ }
+ else if (usable - size > 4096)
+ {
+ printf ("error: allocation %d reported as %zu bytes (requested %zu)\n",
+ index, usable, size);
+ errors = true;
+ }
+ }
}
/* A heap allocation combined with pending actions on it. */
@@ -317,6 +338,10 @@ init_heap (void)
write_message ("error: malloc_set_state failed\n");
_exit (1);
}
+
+ /* The heap has been initialized. We may now call
+ malloc_usable_size. */
+ malloc_usable_size_valid = true;
}
/* Interpose the initialization callback. */
@@ -607,6 +607,10 @@ struct rtld_global_ro
/* List of auditing interfaces. */
struct audit_ifaces *_dl_audit;
unsigned int _dl_naudit;
+
+ /* malloc protection keys. */
+ uintptr_t _dl_malloc_header_guard;
+ uintptr_t _dl_malloc_footer_guard;
};
# define __rtld_global_attribute__
# if IS_IN (rtld)