Rewrite malloc(3).

This commit is contained in:
Jonas 'Sortie' Termansen 2013-10-23 00:40:10 +02:00
parent 4e9746c314
commit 74247eb71e
12 changed files with 1312 additions and 733 deletions

View File

@ -56,6 +56,12 @@ inttypes/strtoimax.o \
inttypes/strtoumax.o \
inttypes/wcstoimax.o \
inttypes/wcstoumax.o \
malloc/__heap_expand_current_part.o \
malloc/heap_get_paranoia.o \
malloc/heap_init.o \
malloc/__heap_lock.o \
malloc/__heap_unlock.o \
malloc/__heap_verify.o \
signal/sigaddset.o \
signal/sigandset.o \
signal/sigdelset.o \
@ -162,16 +168,18 @@ stdlib/atol.o \
stdlib/bsearch.o \
stdlib/calloc.o \
stdlib/div.o \
stdlib/heap.o \
stdlib/free.o \
stdlib/labs.o \
stdlib/ldiv.o \
stdlib/llabs.o \
stdlib/lldiv.o \
stdlib/malloc.o \
stdlib/mblen.o \
stdlib/mbstowcs.o \
stdlib/mbtowc.o \
stdlib/qsort.o \
stdlib/qsort_r.o \
stdlib/realloc.o \
stdlib/strtod.o \
stdlib/strtof.o \
stdlib/strtold.o \

View File

@ -1,6 +1,6 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2012, 2014.
Copyright(C) Jonas 'Sortie' Termansen 2012, 2013, 2014.
This file is part of the Sortix C Library.
@ -22,13 +22,620 @@
*******************************************************************************/
#ifndef _MALLOC_H
#define _MALLOC_H 1
#ifndef INCLUDE_MALLOC_H
#define INCLUDE_MALLOC_H
#include <sys/cdefs.h>
#if __is_sortix_libc
#include <__/wordsize.h>
#endif
#if __is_sortix_libc
#include <assert.h>
#if !defined(__cplusplus)
#include <stdalign.h>
#include <stdbool.h>
#endif
#include <stddef.h>
#include <stdint.h>
#endif
__BEGIN_DECLS
int heap_get_paranoia(void);
/* TODO: Operations to verify pointers and consistency check the heap. */
/* NOTE: The following declarations are heap internals and are *NOT* part of the
API or ABI in any way. You should in no way depend on this information,
except perhaps for debugging purposes. */
#if __is_sortix_libc
/* This macro governs how paranoid the malloc implementation is. It's not a
constant as it may be a runtime function call deciding the level.
Level 0 - disables all automatic consistency checks.
Level 1 - is afraid that the caller may have corrupted the chunks passed to
free and realloc and possibly their neighbors if chunk unification
is considered.
Level 2 - is afraid the heap has been damaged, so it verifies the entire heap
before all heap operatations.
Level 3 - is afraid that malloc itself is buggy and consistency checks the
entire heap both before and after all heap operations. */
#if 1 /* Constant paranoia, better for code optimization. */
#define PARANOIA 1
#else /* Dynamic paranoia, better for debugging. */
#define PARANOIA_DEFAULT 1
#define PARANOIA heap_get_paranoia()
#endif
/* TODO: Proper logic here. */
#if !defined(PARANOIA_DEFAULT)
#if PARANOIA < 3
#undef assert
#define assert(x) do { ((void) 0); } while ( 0 )
#define HEAP_NO_ASSERT
#endif
#endif
/* A magic value to identify a heap part edge. The value is selected such
that it is not appropriately aligned like a real heap chunk pointer, such
that there are no ambiguous cases. */
#if __WORDSIZE == 32
#define HEAP_PART_MAGIC 0xBEEFBEEF
#elif __WORDSIZE == 64
#define HEAP_PART_MAGIC 0xBEEFBEEFBEEFBEEF
#else
#warning "You need to implement HEAP_CHUNK_MAGIC for your native word width"
#endif
/* A magic value to detect wheter a chunk is used. The value is selected such
that it is not appropriately aligned like a real heap chunk pointer, such
that there are no ambiguous cases. */
#if __WORDSIZE == 32
#define HEAP_CHUNK_MAGIC 0xDEADDEAD
#elif __WORDSIZE == 64
#define HEAP_CHUNK_MAGIC 0xDEADDEADDEADDEAD
#else
#warning "You need to implement HEAP_CHUNK_MAGIC for your native word width"
#endif
/* The heap is split into a number of parts that each consists of a number of
of chunks (used and unused). The heap normally is just a single part, but if
the address space gets too fragmented, it may not be possible to extend the
existing part. In each part of the heap, there exists no neighboring chunks
that are both unused (they would be combined into a single chunk. It is
possible to fully and unambiguously traverse the entire heap by following it
as a linked list of objects (sometimes with implied relative locations). */
/* This structure describes the current heap allocation state. */
struct heap_state
{
struct heap_part* current_part;
struct heap_chunk* bin[sizeof(size_t) * 8];
size_t bin_filled_bitmap;
};
/* This structure is at the very beginning of each heap part. The size variable
includes the size of the surrounding structures. The first chunk or the end
of the opart follows immediately (use the magic value to determine which). */
struct heap_part
{
size_t unused[3]; /* Alignment. */
struct heap_part* part_next;
size_t part_magic;
size_t part_size;
};
static_assert(sizeof(struct heap_part) * 8 == 6 * __WORDSIZE,
"sizeof(struct heap_part) * 8 == 6 * __WORDSIZE");
static_assert(alignof(struct heap_part) * 8 == __WORDSIZE,
"alignof(struct heap_part) * 8 == __WORDSIZE");
/* This structure immediately preceeds all heap allocations. The size variable
includes the size of the surrounding structures. */
struct heap_chunk
{
size_t chunk_size;
union
{
size_t chunk_magic;
struct heap_chunk* chunk_next;
};
};
static_assert(sizeof(struct heap_chunk) * 8 == 2 * __WORDSIZE,
"sizeof(struct heap_chunk) * 8 == 2 * __WORDSIZE");
static_assert(alignof(struct heap_chunk) * 8 == __WORDSIZE,
"alignof(struct heap_chunk) * 8 == __WORDSIZE");
/* Inbetween these structures come the heap allocation. */
/* This structure immediately follows all heap allocations. The size variable
includes the size of the surrounding structures. */
struct heap_chunk_post
{
union
{
size_t chunk_magic;
struct heap_chunk* chunk_prev;
};
size_t chunk_size;
};
static_assert(sizeof(struct heap_chunk_post) * 8 == 2 * __WORDSIZE,
"sizeof(struct heap_chunk_post) * 8 == 2 * __WORDSIZE");
static_assert(alignof(struct heap_chunk_post) * 8 == __WORDSIZE,
"alignof(struct heap_chunk_post) * 8 == __WORDSIZE");
/* This structure is at the very end of each heap part. The size variable
includes the size of the surrounding structures. The first chunk in the heap
part follows immediately. */
struct heap_part_post
{
size_t part_size;
size_t part_magic;
struct heap_part* part_prev;
size_t unused[3]; /* Alignment. */
};
static_assert(sizeof(struct heap_part_post) * 8 == 6 * __WORDSIZE,
"sizeof(struct heap_part_post) * 8 == 6 * __WORDSIZE");
static_assert(alignof(struct heap_part_post) * 8 == __WORDSIZE,
"alignof(struct heap_part_post) * 8 == __WORDSIZE");
/* Global secret variables used internally by the heap. */
extern struct heap_state __heap_state;
/* Internal heap functions. */
bool __heap_expand_current_part(size_t);
void __heap_lock(void);
void __heap_unlock(void);
#if !defined(HEAP_NO_ASSERT)
void __heap_verify(void);
#endif
#if !defined(HEAP_NO_ASSERT)
/* Utility function to verify addresses are well-aligned. */
__attribute__((unused)) static inline
bool heap_is_pointer_aligned(void* addr, size_t size, size_t alignment)
{
/* It is assumed that alignment is a power of two. */
if ( ((uintptr_t) addr) & (alignment - 1) )
return false;
if ( ((uintptr_t) size) & (alignment - 1) )
return false;
return true;
}
#define HEAP_IS_POINTER_ALIGNED(object, size) \
heap_is_pointer_aligned((object), (size), alignof(*(object)))
/* Utility function to trap bad memory accesses. */
__attribute__((unused)) static inline
void heap_test_usable_memory(void* addr, size_t size)
{
for ( size_t i = 0; i < size; i++ )
(void) ((volatile unsigned char*) addr)[i];
}
#endif
/* Utility functions for accessing the most significant bit set. */
__attribute__((unused)) static inline
size_t heap_bsr(size_t value)
{
/* TODO: There currently is no builtin bsr function. */
return (sizeof(size_t) * 8 - 1) - __builtin_clzl(value);
}
/* Utility functions for accessing the least significant bit set. */
__attribute__((unused)) static inline
size_t heap_bsf(size_t value)
{
return __builtin_ctzl(value);
}
/* Utility function for getting the minimum size of entries in a bin. */
__attribute__((unused)) static inline
size_t heap_size_of_bin(size_t bin)
{
assert(bin < sizeof(size_t) * 8);
return (size_t) 1 << bin;
}
/* Utility function for determining whether a size even has a bin. */
__attribute__((unused)) static inline
bool heap_size_has_bin(size_t size)
{
return size <= heap_size_of_bin(sizeof(size_t) * 8 - 1);
}
/* Utility function for determining which bin a particular size belongs to. */
__attribute__((unused)) static inline
size_t heap_chunk_size_to_bin(size_t size)
{
assert(size != 0);
assert(heap_size_has_bin(size));
for ( size_t i = 8 * sizeof(size_t) - 1; true; i-- )
if ( heap_size_of_bin(i) <= size )
return i;
assert(false);
__builtin_unreachable();
}
/* Utility function for determining the smallest bin from which a allocation can
be satisfied. This is not the same as heap_chunk_size_to_bin() as it rounds
downwards while this rounds upwards. */
__attribute__((unused)) static inline
size_t heap_bin_for_allocation(size_t size)
{
assert(heap_size_has_bin(size));
/* TODO: Use the bsr or bsf instructions here whatever it was! */
for ( size_t i = 0; i < 8 * sizeof(size_t); i++ )
if ( size <= (size_t) 1 << i )
return i;
assert(false);
__builtin_unreachable();
}
/* Rounds an allocation size up to a multiple of what malloc offers. */
__attribute__((unused)) static inline size_t heap_align(size_t value)
{
static_assert(sizeof(struct heap_chunk) + sizeof(struct heap_chunk_post) == 4 * __WORDSIZE / 8,
"sizeof(struct heap_chunk) + sizeof(struct heap_chunk_post) == 4 * __WORDSIZE / 8");
size_t mask = 4 * sizeof(size_t) - 1;
return -(-value & ~mask);
}
/* Returns the trailing structure following the given part. */
__attribute__((unused)) static inline
struct heap_part_post* heap_part_to_post(struct heap_part* part)
{
assert(HEAP_IS_POINTER_ALIGNED(part, part->part_size));
return (struct heap_part_post*)
((uintptr_t) part + part->part_size - sizeof(struct heap_part_post));
}
/* Returns the part associated with the part trailing structure. */
__attribute__((unused)) static inline
struct heap_part* heap_post_to_part(struct heap_part_post* post_part)
{
assert(HEAP_IS_POINTER_ALIGNED(post_part, post_part->part_size));
return (struct heap_part*)
((uintptr_t) post_part - post_part->part_size + sizeof(struct heap_part));
}
/* Returns the ending address of a heap part. */
__attribute__((unused)) static inline
void* heap_part_end(struct heap_part* part)
{
assert(HEAP_IS_POINTER_ALIGNED(part, part->part_size));
return (uint8_t*) part + part->part_size;
}
/* Returns whether this chunk is currently in use. */
__attribute__((unused)) static inline
bool heap_chunk_is_used(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
return chunk->chunk_magic == HEAP_CHUNK_MAGIC;
}
/* Returns the trailing structure following the given chunk. */
__attribute__((unused)) static inline
struct heap_chunk_post* heap_chunk_to_post(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
return (struct heap_chunk_post*)
((uintptr_t) chunk + chunk->chunk_size - sizeof(struct heap_chunk_post));
}
/* Returns the chunk associated with the chunk trailing structure. */
__attribute__((unused)) static inline
struct heap_chunk* heap_post_to_chunk(struct heap_chunk_post* post_chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(post_chunk, post_chunk->chunk_size));
return (struct heap_chunk*)
((uintptr_t) post_chunk - post_chunk->chunk_size + sizeof(struct heap_chunk));
}
/* Returns the data inside the chunk. */
__attribute__((unused)) static inline
uint8_t* heap_chunk_to_data(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
return (uint8_t*) chunk + sizeof(struct heap_chunk);
}
/* Returns the data inside the chunk. */
__attribute__((unused)) static inline
size_t heap_chunk_data_size(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
return chunk->chunk_size - (sizeof(struct heap_chunk) + sizeof(struct heap_chunk_post));
}
/* Returns the chunk whose data starts at the given address. */
__attribute__((unused)) static inline
struct heap_chunk* heap_data_to_chunk(uint8_t* data)
{
struct heap_chunk* chunk =
(struct heap_chunk*) (data - sizeof(struct heap_chunk));
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
return chunk;
}
/* Returns the chunk to the left of this one or NULL if none. */
__attribute__((unused)) static inline
struct heap_chunk* heap_chunk_left(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
struct heap_chunk_post* left_post = (struct heap_chunk_post*)
((uintptr_t) chunk - sizeof(struct heap_chunk_post));
assert(HEAP_IS_POINTER_ALIGNED(left_post, 0));
if ( left_post->chunk_magic == HEAP_PART_MAGIC )
return NULL;
return heap_post_to_chunk(left_post);
}
/* Returns the chunk to the right of this one or NULL if none. */
__attribute__((unused)) static inline
struct heap_chunk* heap_chunk_right(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
struct heap_chunk* right = (struct heap_chunk*)
((uintptr_t) chunk + chunk->chunk_size);
assert(HEAP_IS_POINTER_ALIGNED(right, 0));
if ( right->chunk_magic == HEAP_PART_MAGIC )
return NULL;
return right;
}
/* Formats an used chunk at the given location. */
__attribute__((unused)) static inline
struct heap_chunk* heap_chunk_format(uint8_t* addr, size_t size)
{
assert(heap_is_pointer_aligned(addr, size, alignof(struct heap_chunk)));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(addr, size);
#endif
struct heap_chunk* chunk = (struct heap_chunk*) addr;
chunk->chunk_size = size;
chunk->chunk_magic = HEAP_CHUNK_MAGIC;
struct heap_chunk_post* post = heap_chunk_to_post(chunk);
post->chunk_size = size;
post->chunk_magic = HEAP_CHUNK_MAGIC;
return chunk;
}
/* Returns the first chunk in a part. */
__attribute__((unused)) static inline
struct heap_chunk* heap_part_first_chunk(struct heap_part* part)
{
assert(HEAP_IS_POINTER_ALIGNED(part, part->part_size));
struct heap_chunk* chunk =
(struct heap_chunk*) ((uintptr_t) part + sizeof(struct heap_part));
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
if ( chunk->chunk_magic == HEAP_PART_MAGIC )
return NULL;
return chunk;
}
/* Returns the last chunk in a part. */
__attribute__((unused)) static inline
struct heap_chunk* heap_part_last_chunk(struct heap_part* part)
{
assert(HEAP_IS_POINTER_ALIGNED(part, part->part_size));
struct heap_part_post* post = heap_part_to_post(part);
assert(HEAP_IS_POINTER_ALIGNED(post, post->part_size));
struct heap_chunk_post* chunk_post =
(struct heap_chunk_post*) ((uintptr_t) post - sizeof(struct heap_chunk_post));
assert(HEAP_IS_POINTER_ALIGNED(chunk_post, chunk_post->chunk_size));
if ( chunk_post->chunk_magic == HEAP_PART_MAGIC )
return NULL;
return heap_post_to_chunk(chunk_post);
}
/* Inserts a chunk into the heap and marks it as unused. */
__attribute__((unused)) static inline
void heap_insert_chunk(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
struct heap_chunk_post* chunk_post = heap_chunk_to_post(chunk);
assert(HEAP_IS_POINTER_ALIGNED(chunk_post, chunk_post->chunk_size));
assert(chunk->chunk_size == chunk_post->chunk_size);
/* Decide which bin this chunk belongs to. */
assert(heap_size_has_bin(chunk->chunk_size));
size_t chunk_bin = heap_chunk_size_to_bin(chunk->chunk_size);
/* Insert the chunk into this bin, destroying its magic values as used. */
if ( __heap_state.bin[chunk_bin] )
heap_chunk_to_post(__heap_state.bin[chunk_bin])->chunk_prev = chunk;
chunk->chunk_next = __heap_state.bin[chunk_bin];
chunk_post->chunk_prev = NULL;
__heap_state.bin[chunk_bin] = chunk;
__heap_state.bin_filled_bitmap |= heap_size_of_bin(chunk_bin);
}
/* Removes a chunk from the heap and marks it as used. */
__attribute__((unused)) static inline
void heap_remove_chunk(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
assert(chunk->chunk_magic != HEAP_CHUNK_MAGIC);
assert(chunk->chunk_magic != HEAP_PART_MAGIC);
struct heap_chunk_post* chunk_post = heap_chunk_to_post(chunk);
assert(HEAP_IS_POINTER_ALIGNED(chunk_post, chunk_post->chunk_size));
assert(chunk->chunk_size == chunk_post->chunk_size);
assert(chunk_post->chunk_magic != HEAP_CHUNK_MAGIC);
assert(chunk_post->chunk_magic != HEAP_PART_MAGIC);
/* Decide which bin this chunk belongs to. */
assert(heap_size_has_bin(chunk->chunk_size));
size_t chunk_bin = heap_chunk_size_to_bin(chunk->chunk_size);
assert(__heap_state.bin_filled_bitmap & heap_size_of_bin(chunk_bin));
/* Unlink the chunk from its current bin. */
if ( chunk == __heap_state.bin[chunk_bin] )
{
assert(!chunk_post->chunk_prev);
if ( !(__heap_state.bin[chunk_bin] = chunk->chunk_next) )
__heap_state.bin_filled_bitmap ^= heap_size_of_bin(chunk_bin);
}
else
{
assert(chunk_post->chunk_prev);
heap_chunk_to_post(chunk)->chunk_prev->chunk_next = chunk->chunk_next;
}
if ( chunk->chunk_next )
heap_chunk_to_post(chunk->chunk_next)->chunk_prev = chunk_post->chunk_prev;
/* Mark the chunk as used. */
chunk->chunk_magic = HEAP_CHUNK_MAGIC;
chunk_post->chunk_magic = HEAP_CHUNK_MAGIC;
}
/* Decides whether the chunk can be split into two. */
__attribute__((unused)) static inline
bool heap_can_split_chunk(struct heap_chunk* chunk, size_t needed)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
assert(needed <= chunk->chunk_size);
size_t minimum_chunk_size = sizeof(struct heap_chunk) +
sizeof(struct heap_chunk_post);
return minimum_chunk_size <= chunk->chunk_size - needed;
}
/* Splits a chunk into two - assumes it can be split. */
__attribute__((unused)) static inline
void heap_split_chunk(struct heap_chunk* chunk, size_t needed)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(chunk, chunk->chunk_size);
#endif
assert(heap_can_split_chunk(chunk, needed));
bool chunk_is_used = heap_chunk_is_used(chunk);
if ( !chunk_is_used )
heap_remove_chunk(chunk);
size_t surplus_amount = chunk->chunk_size - needed;
heap_chunk_format((uint8_t*) chunk, needed);
struct heap_chunk* surplus =
heap_chunk_format((uint8_t*) chunk + needed, surplus_amount);
if ( !chunk_is_used )
heap_insert_chunk(chunk);
heap_insert_chunk(surplus);
}
/* Combine a chunk with its neighbors if they are all unused. */
__attribute__((unused)) static inline
struct heap_chunk* heap_chunk_combine_neighbors(struct heap_chunk* chunk)
{
assert(HEAP_IS_POINTER_ALIGNED(chunk, chunk->chunk_size));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(chunk, chunk->chunk_size);
#endif
struct heap_chunk* left_chunk = heap_chunk_left(chunk);
struct heap_chunk* right_chunk = heap_chunk_right(chunk);
if ( right_chunk )
{
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(right_chunk, right_chunk->chunk_size);
#endif
assert(HEAP_IS_POINTER_ALIGNED(right_chunk, right_chunk->chunk_size));
}
/* Attempt to combine with both the left and right chunk. */
if ( (left_chunk && !heap_chunk_is_used(left_chunk)) &&
(right_chunk && !heap_chunk_is_used(right_chunk)) )
{
assert(HEAP_IS_POINTER_ALIGNED(left_chunk, left_chunk->chunk_size));
assert(HEAP_IS_POINTER_ALIGNED(right_chunk, right_chunk->chunk_size));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(left_chunk, left_chunk->chunk_size);
heap_test_usable_memory(right_chunk, right_chunk->chunk_size);
#endif
heap_remove_chunk(chunk);
heap_remove_chunk(left_chunk);
heap_remove_chunk(right_chunk);
size_t result_size = left_chunk->chunk_size + chunk->chunk_size + right_chunk->chunk_size;
struct heap_chunk* result = heap_chunk_format((uint8_t*) left_chunk, result_size);
heap_insert_chunk(result);
return result;
}
/* Attempt to combine with the left chunk. */
if ( left_chunk && !heap_chunk_is_used(left_chunk) )
{
assert(HEAP_IS_POINTER_ALIGNED(left_chunk, left_chunk->chunk_size));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(left_chunk, left_chunk->chunk_size);
#endif
heap_remove_chunk(chunk);
heap_remove_chunk(left_chunk);
size_t result_size = left_chunk->chunk_size + chunk->chunk_size;
struct heap_chunk* result = heap_chunk_format((uint8_t*) left_chunk, result_size);
heap_insert_chunk(result);
return result;
}
/* Attempt to combine with the right chunk. */
if ( right_chunk && !heap_chunk_is_used(right_chunk) )
{
assert(HEAP_IS_POINTER_ALIGNED(right_chunk, right_chunk->chunk_size));
#if !defined(HEAP_NO_ASSERT)
heap_test_usable_memory(right_chunk, right_chunk->chunk_size);
#endif
heap_remove_chunk(chunk);
heap_remove_chunk(right_chunk);
size_t result_size = chunk->chunk_size + right_chunk->chunk_size;
struct heap_chunk* result = heap_chunk_format((uint8_t*) chunk, result_size);
heap_insert_chunk(result);
return result;
}
return chunk;
}
#endif
__END_DECLS
#if __is_sortix_libc
#include <assert.h>
#endif
#endif

View File

@ -0,0 +1,183 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/__heap_expand_current_part.cpp
Attemps to extend the current part of the heap or create a new part.
*******************************************************************************/
#if __STDC_HOSTED__
#include <sys/mman.h>
#endif
#include <assert.h>
#include <malloc.h>
#include <stdint.h>
#include <unistd.h>
#if __is_sortix_kernel
#include <sortix/mman.h>
#endif
#if __is_sortix_kernel
#include <sortix/kernel/addralloc.h>
#include <sortix/kernel/kernel.h>
#include <sortix/kernel/memorymanagement.h>
#endif
extern "C" bool __heap_expand_current_part(size_t requested_expansion)
{
// Determine the current page size.
#if __is_sortix_kernel
size_t page_size = Sortix::Page::Size();
#else
size_t page_size = getpagesize();
#endif
// Determine how much memory and how many pages are required. The worst
// case is that we'll need to create a new part, so allocate enough
// memory for that case.
size_t needed_expansion =
sizeof(struct heap_part) + requested_expansion + sizeof(heap_part_post);
size_t needed_expansion_pages =
needed_expansion / page_size + (needed_expansion % page_size ? 1 : 0);
// Allocate at least a few pages to prevent inefficient allocations.
size_t minimum_part_pages = 0;
if ( __heap_state.current_part )
minimum_part_pages = __heap_state.current_part->part_size / page_size;
if ( 256 < minimum_part_pages )
minimum_part_pages = 256;
if ( minimum_part_pages < 4 )
minimum_part_pages = 4;
if ( needed_expansion_pages < minimum_part_pages )
needed_expansion_pages = minimum_part_pages;
// Calculate exactly how many bytes are added to the heap.
size_t num_bytes = needed_expansion_pages * page_size;
#if __is_sortix_kernel
// Decide where we would like to add memory to the heap.
uintptr_t mapto = Sortix::GetHeapUpper();
assert(!__heap_state.current_part ||
(uintptr_t) heap_part_end(__heap_state.current_part) == mapto);
void* mapping = (void*) mapto;
// Attempt to allocate the needed virtual address space such that we can put
// memory there to extend the heap with.
if ( !(num_bytes = Sortix::ExpandHeap(num_bytes)) )
return false;
// Attempt to map actual memory at our new virtual addresses.
int prot = PROT_KREAD | PROT_KWRITE;
enum Sortix::page_usage page_usage = Sortix::PAGE_USAGE_KERNEL_HEAP;
if ( !Sortix::Memory::MapRange(mapto, num_bytes, prot, page_usage) )
return false;
Sortix::Memory::Flush();
bool ideal_allocation = true;
#else
// Decide where we'd like to allocation memory. Ideally, we'd like to extend
// an existing part, but if there is none, then simply somewhere near the
// start of the address space (so it can grow upwards) will do.
void* suggestion = __heap_state.current_part ?
heap_part_end(__heap_state.current_part) :
(void*) (0x1 * page_size);
// Attempt to allocate memory for the new part.
void* mapping = mmap(suggestion, num_bytes, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
// We are out of memory if we can't make a suitable allocation.
if ( mapping == MAP_FAILED )
return false;
bool ideal_allocation = mapping == suggestion;
#endif
// Extend the current part if we managed to do our ideal allocation.
if ( __heap_state.current_part && ideal_allocation )
{
struct heap_part* part = __heap_state.current_part;
struct heap_part_post* old_post = heap_part_to_post(part);
struct heap_part* part_prev = old_post->part_prev;
uint8_t* new_chunk_begin = (uint8_t*) mapping - sizeof(struct heap_part_post);
size_t new_chunk_size = num_bytes;
// Remove the last chunk in the part from the heap if it is unused, we
// will then simply include its bytes in our new chunk below.
struct heap_chunk* last_chunk;
if ( (last_chunk = heap_part_last_chunk(part)) &&
!heap_chunk_is_used(last_chunk) )
{
heap_remove_chunk(last_chunk);
new_chunk_begin = (uint8_t*) last_chunk;
new_chunk_size += last_chunk->chunk_size;
}
// Extend the part by moving the end part structure further ahead.
part->part_size += num_bytes;
struct heap_part_post* post = heap_part_to_post(part);
post->part_magic = HEAP_PART_MAGIC;
post->part_prev = part_prev;
post->part_size = part->part_size;
post->unused[0] = 0;
post->unused[1] = 0;
post->unused[2] = 0;
// Format a new heap chunk that contains the new space, possibly unified
// with whatever unused space previously existed.
assert(requested_expansion <= new_chunk_size);
heap_insert_chunk(heap_chunk_format(new_chunk_begin, new_chunk_size));
return true;
}
// Format a new part in our new allocation.
struct heap_part* part = (struct heap_part*) mapping;
part->unused[0] = 0;
part->unused[1] = 0;
part->unused[2] = 0;
part->part_size = num_bytes;
part->part_magic = HEAP_PART_MAGIC;
struct heap_part_post* post = heap_part_to_post(part);
post->part_magic = HEAP_PART_MAGIC;
post->part_size = part->part_size;
post->unused[0] = 0;
post->unused[1] = 0;
post->unused[2] = 0;
// Insert our part into the part linked list.
if ( (part->part_next = __heap_state.current_part) )
heap_part_to_post(__heap_state.current_part)->part_prev = part;
post->part_prev = NULL;
// Insert a new chunk into the heap containing the unused space in the part.
uint8_t* new_chunk_begin = (uint8_t*) mapping + sizeof(struct heap_part);
size_t new_chunk_size =
num_bytes - (sizeof(struct heap_part) + sizeof(struct heap_part_post));
assert(requested_expansion <= new_chunk_size);
heap_insert_chunk(heap_chunk_format(new_chunk_begin, new_chunk_size));
__heap_state.current_part = part;
return true;
}

View File

@ -0,0 +1,47 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/__heap_lock.cpp
Locks the dynamic heap.
*******************************************************************************/
#include <malloc.h>
#if __STDC_HOSTED__
#include <pthread.h>
#endif
#if __is_sortix_kernel
#include <sortix/kernel/kthread.h>
#endif
#if __STDC_HOSTED__
extern "C" pthread_mutex_t __heap_mutex;
#elif __is_sortix_kernel
extern "C" Sortix::kthread_mutex_t __heap_mutex;
#endif
extern "C" void __heap_lock(void)
{
#if __STDC_HOSTED__
pthread_mutex_lock(&__heap_mutex);
#elif __is_sortix_kernel
Sortix::kthread_mutex_lock(&__heap_mutex);
#endif
}

View File

@ -0,0 +1,47 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/__heap_unlock.cpp
Unlocks the dynamic heap.
*******************************************************************************/
#include <malloc.h>
#if __STDC_HOSTED__
#include <pthread.h>
#endif
#if __is_sortix_kernel
#include <sortix/kernel/kthread.h>
#endif
#if __STDC_HOSTED__
extern "C" pthread_mutex_t __heap_mutex;
#elif __is_sortix_kernel
extern "C" Sortix::kthread_mutex_t __heap_mutex;
#endif
extern "C" void __heap_unlock(void)
{
#if __STDC_HOSTED__
pthread_mutex_unlock(&__heap_mutex);
#elif __is_sortix_kernel
Sortix::kthread_mutex_unlock(&__heap_mutex);
#endif
}

View File

@ -0,0 +1,42 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/__heap_verify.cpp
Perform a heap consistency check.
*******************************************************************************/
#include <assert.h>
#include <malloc.h>
#include <stdlib.h>
extern "C" void __heap_verify()
{
for ( size_t i = 0; i < sizeof(size_t) * 8 - 1; i++ )
{
if ( __heap_state.bin_filled_bitmap & heap_size_of_bin(i) )
{
assert(__heap_state.bin[i] != NULL);
}
else
{
assert(__heap_state.bin[i] == NULL);
}
}
}

View File

@ -0,0 +1,45 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/heap_get_paranoia.cpp
Returns how paranoid the heap implementation should be.
*******************************************************************************/
#include <malloc.h>
#include <stdlib.h>
extern "C" int heap_get_paranoia(void)
{
#if defined(PARANOIA_DEFAULT) && !__STDC_HOSTED__
return PARANOIA_DEFAULT;
#elif defined(PARANOIA_DEFAULT) && __STDC_HOSTED__
static int cached_paranoia = -1;
if ( cached_paranoia < 0 )
{
if ( const char* paranoia_str = getenv("LIBC_MALLOC_PARANOIA") )
cached_paranoia = atoi(paranoia_str);
else
cached_paranoia = PARANOIA_DEFAULT;
}
return cached_paranoia;
#else
return PARANOIA;
#endif
}

41
libc/malloc/heap_init.cpp Normal file
View File

@ -0,0 +1,41 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
malloc/heap_init.cpp
Initializes the dynamic heap.
*******************************************************************************/
#include <malloc.h>
#if __STDC_HOSTED__
#include <pthread.h>
#endif
#include <string.h>
#if __is_sortix_kernel
#include <sortix/kernel/kthread.h>
#endif
extern "C" { struct heap_state __heap_state; }
#if __STDC_HOSTED__
extern "C" { pthread_mutex_t __heap_mutex = PTHREAD_MUTEX_INITIALIZER; }
#elif __is_sortix_kernel
extern "C" { Sortix::kthread_mutex_t __heap_mutex = Sortix::KTHREAD_MUTEX_INITIALIZER; }
#endif

57
libc/stdlib/free.cpp Normal file
View File

@ -0,0 +1,57 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
stdlib/free.cpp
Returns a chunk of memory to the dynamic memory heap.
*******************************************************************************/
#include <malloc.h>
#include <stdlib.h>
#if __is_sortix_kernel
#include <sortix/kernel/kernel.h>
#endif
#if defined(HEAP_NO_ASSERT)
#define __heap_verify() ((void) 0)
#undef assert
#define assert(x) do { ((void) 0); } while ( 0 )
#endif
extern "C" void free(void* addr)
{
if ( !addr )
return;
__heap_lock();
__heap_verify();
// Retrieve the chunk that contains this allocation.
struct heap_chunk* chunk = heap_data_to_chunk((uint8_t*) addr);
// Return the chunk to the heap.
heap_insert_chunk(chunk);
// Combine the chunk with its left and right neighbors if they are unused.
heap_chunk_combine_neighbors(chunk);
__heap_verify();
__heap_unlock();
}

View File

@ -1,729 +0,0 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
stdlib/heap.cpp
Functions that allocate/free memory from a dynamic memory heap.
*******************************************************************************/
#include <sys/mman.h>
#if __STDC_HOSTED__
#include <error.h>
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
#endif
#include <assert.h>
#include <errno.h>
#include <malloc.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#define PARANOIA 1
#if defined(__is_sortix_kernel)
#include <sortix/kernel/decl.h>
#include <sortix/kernel/addralloc.h>
#include <sortix/kernel/kthread.h>
#include <sortix/kernel/log.h>
#include <sortix/kernel/memorymanagement.h>
#include <sortix/kernel/panic.h>
#endif
//
// This first section is just magic compiler/platform stuff, you should
// skip ahead to the actual algorithm.
//
#if defined(__x86_64__)
const size_t MAGIC = 0xDEADDEADDEADDEADUL;
const size_t ALIGNMENT = 16UL;
#else
const size_t MAGIC = 0xDEADDEADUL;
const size_t ALIGNMENT = 8UL;
#endif
const size_t PAGESIZE = 4UL * 1024UL; // 4 KiB
const size_t NUMBINS = 8UL * sizeof(size_t);
static uintptr_t wilderness;
#if defined(__is_sortix_kernel)
static uintptr_t GetHeapStart()
{
return Sortix::GetHeapLower();
}
static void FreeMemory(uintptr_t where, size_t bytes)
{
assert(Sortix::Page::IsAligned(where + bytes));
while ( bytes )
{
addr_t page = Sortix::Memory::Unmap(where);
Sortix::Page::Put(page, Sortix::PAGE_USAGE_KERNEL_HEAP);
bytes -= PAGESIZE;
where += PAGESIZE;
}
}
static bool AllocateMemory(uintptr_t where, size_t bytes)
{
assert(Sortix::Page::IsAligned(where + bytes));
uintptr_t pos = where;
while ( bytes )
{
addr_t page = Sortix::Page::Get(Sortix::PAGE_USAGE_KERNEL_HEAP);
if ( !page )
{
FreeMemory(where, pos-where);
return false;
}
if ( !Sortix::Memory::Map(page, pos, PROT_KREAD | PROT_KWRITE) )
{
Sortix::Page::Put(page, Sortix::PAGE_USAGE_KERNEL_HEAP);
FreeMemory(where, pos-where);
return false;
}
bytes -= PAGESIZE;
pos += PAGESIZE;
}
return true;
}
static bool ExtendHeap(size_t bytesneeded)
{
size_t got_bytes = Sortix::ExpandHeap(bytesneeded);
if ( !got_bytes )
return false;
assert(bytesneeded <= got_bytes);
if ( !AllocateMemory(wilderness, got_bytes) )
{
Sortix::ShrinkHeap(got_bytes);
return false;
}
return true;
}
#else
static uintptr_t GetHeapStart()
{
uintptr_t base = (uintptr_t) sbrk(0);
uintptr_t unaligned = base % ALIGNMENT;
if ( unaligned )
{
sbrk(ALIGNMENT-unaligned);
}
uintptr_t result = (uintptr_t) sbrk(0);
return result;
}
static bool ExtendHeap(size_t bytesneeded)
{
void* newheapend = sbrk(bytesneeded);
return newheapend != (void*) -1UL;
}
#endif
// TODO: BitScanForward and BitScanReverse are x86 instructions, but
// directly using them messes with the optimizer. Once possible, use
// the inline assembly instead of the C-version of the functions.
// Returns the index of the most significant set bit.
inline size_t BSR(size_t Value)
{
#if 1
assert(Value > 0);
for ( size_t I = 8*sizeof(size_t); I > 0; I-- )
{
if ( Value & ( 1UL << (I-1) ) ) { return I-1; }
}
return 0;
#else
size_t Result;
asm("bsr %0, %1" : "=r"(Result) : "r"(Value));
return Result;
#endif
}
// Returns the index of the least significant set bit.
inline size_t BSF(size_t Value)
{
#if 1
assert(Value > 0);
for ( size_t I = 0; I < 8*sizeof(size_t); I++ )
{
if ( Value & ( 1UL << I ) ) { return I; }
}
return 0;
#else
size_t Result;
asm("bsf %0, %1" : "=r"(Result) : "r"(Value));
return Result;
#endif
}
//
// Now for some helper functions and structures.
//
struct Chunk;
struct Trailer;
#if __STDC_HOSTED__
pthread_mutex_t heaplock = PTHREAD_MUTEX_INITIALIZER;
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_t heaplock = Sortix::KTHREAD_MUTEX_INITIALIZER;
#endif
// The location where the heap originally grows from.
uintptr_t heapstart;
// If heap grows down: Location of the first mapped page.
// If heap grows up: Location of the first not-mapped page.
#if 0 /* forward declared abvce */
static uintptr_t wilderness;
#endif
// How many bytes remain in the wilderness.
size_t wildernesssize = 0;
// How many bytes are the heap allow to grow to (including wilderness).
size_t heapmaxsize = SIZE_MAX;
// How many bytes are currently used for chunks in the heap, which
// excludes the wilderness.
size_t heapsize = 0;
// How many bytes of actual storage the heap contains.
size_t heapstorage = 0;
// bins[N] contain a linked list of chunks that are at least 2^(N+1)
// bytes, but less than 2^(N+2) bytes. By selecting the proper bin in
// constant time, we can allocate chunks in constant time.
Chunk* bins[NUMBINS];
// Bit N is set if bin[N] contains a chunk.
size_t bincontainschunks = 0;
static bool IsGoodHeapPointer(void* ptr, size_t size)
{
uintptr_t ptrlower = (uintptr_t) ptr;
uintptr_t ptrupper = ptrlower + size;
uintptr_t heaplower = heapstart;
uintptr_t heapupper = wilderness;
return heaplower <= ptrlower && ptrupper <= heapupper;
}
// A preamble to every chunk providing meta-information.
struct Chunk
{
public:
size_t size; // Includes size of Chunk and Trailer
union
{
size_t magic;
Chunk* nextunused;
};
public:
bool IsUsed() { return magic == MAGIC; }
Trailer* GetTrailer();
Chunk* LeftNeighbor();
Chunk* RightNeighbor();
bool IsSane();
};
// A trailer to every chunk providing meta-information.
struct Trailer
{
public:
union
{
size_t magic;
Chunk* prevunused;
};
size_t size; // Includes size of Chunk and Trailer
public:
bool IsUsed() { return magic == MAGIC; }
Chunk* GetChunk();
};
const size_t OVERHEAD = sizeof(Chunk) + sizeof(Trailer);
// This is how a real chunk actually looks:
//struct RealChunk
//{
// Chunk header;
// byte data[...];
// Trailer footer;
// };
Trailer* Chunk::GetTrailer()
{
return (Trailer*) (((uintptr_t) this) + size - sizeof(Trailer));
}
Chunk* Chunk::LeftNeighbor()
{
Trailer* trailer = (Trailer*) (((uintptr_t) this) - sizeof(Trailer));
return trailer->GetChunk();
}
Chunk* Chunk::RightNeighbor()
{
return (Chunk*) (((uintptr_t) this) + size);
}
Chunk* Trailer::GetChunk()
{
return (Chunk*) (((uintptr_t) this) + sizeof(Trailer) - size);
}
bool Chunk::IsSane()
{
if ( !IsGoodHeapPointer(this, sizeof(*this)) )
return false;
if ( !size ) { return false; }
size_t binindex = BSR(size);
Trailer* trailer = GetTrailer();
if ( !IsGoodHeapPointer(trailer, sizeof(*trailer)) )
return false;
if ( trailer->size != size ) { return false; }
if ( IsUsed() )
{
if ( bins[binindex] == this ) { return false; }
if ( magic != MAGIC || trailer->magic != magic ) { return false; }
}
if ( !IsUsed() )
{
if ( ((uintptr_t) nextunused) & (ALIGNMENT-1UL) ) { return false; }
if ( ((uintptr_t) trailer->prevunused) & (ALIGNMENT-1UL) ) { return false; }
if ( nextunused && !IsGoodHeapPointer(nextunused->GetTrailer(),
sizeof(Trailer)) )
return false;
if ( nextunused && nextunused->GetTrailer()->prevunused != this ) { return false; }
if ( trailer->prevunused )
{
if ( !IsGoodHeapPointer(trailer->prevunused,
sizeof(*trailer->prevunused)) )
return false;
if ( bins[binindex] == this ) { return false; }
if ( trailer->prevunused->nextunused != this ) { return false; }
}
if ( !trailer->prevunused )
{
if ( bins[binindex] != this ) { return false; }
if ( !(bincontainschunks & (1UL << binindex)) ) { return false; }
}
}
return true;
}
static void InsertChunk(Chunk* chunk)
{
// Insert the chunk into the right bin.
size_t binindex = BSR(chunk->size);
chunk->GetTrailer()->prevunused = NULL;
chunk->nextunused = bins[binindex];
if ( chunk->nextunused )
{
assert(chunk->nextunused->IsSane());
chunk->nextunused->GetTrailer()->prevunused = chunk;
}
bins[binindex] = chunk;
bincontainschunks |= (1UL << binindex);
assert(chunk->IsSane());
}
__attribute__((unused))
static bool ValidateHeap()
{
bool foundbin[NUMBINS];
for ( size_t i = 0; i < NUMBINS; i++ ) { foundbin[i] = false; }
Chunk* chunk = (Chunk*) heapstart;
while ( (uintptr_t) chunk < wilderness - wildernesssize )
{
size_t timesfound = 0;
for ( size_t i = 0; i < NUMBINS; i++ )
{
if ( chunk == bins[i] ) { foundbin[i] = true; timesfound++; }
}
if ( 1 < timesfound ) { return false; }
if ( !chunk->IsSane() ) { return false; }
chunk = chunk->RightNeighbor();
}
for ( size_t i = 0; i < NUMBINS; i++ )
{
if ( !bins[i] )
{
if ( foundbin[i] ) { return false; }
continue;
}
if ( !foundbin[i] ) { return false; }
if ( !bins[i]->IsSane() ) { return false; }
}
return true;
}
// Attempts to expand the wilderness such that it contains at least
// bytesneeded bytes. This is done by mapping new pages onto into the
// virtual address-space.
static bool ExpandWilderness(size_t bytesneeded)
{
if ( bytesneeded <= wildernesssize ) { return true; }
// Delayed initialization of the heap.
if ( heapstart == 0 && wilderness == 0 && !heapsize )
heapstart = wilderness = GetHeapStart();
bytesneeded -= wildernesssize;
// Align the increase on page boundaries.
const size_t PAGEMASK = ~(PAGESIZE - 1UL);
bytesneeded = ( bytesneeded + PAGESIZE - 1UL ) & PAGEMASK;
assert(bytesneeded >= PAGESIZE);
// TODO: Overflow MAY happen here!
if ( heapmaxsize <= heapsize + wildernesssize + bytesneeded )
return errno = ENOMEM, true;
uintptr_t newwilderness = wilderness + bytesneeded;
// Attempt to map pages so our wilderness grows.
if ( !ExtendHeap(bytesneeded) )
return false;
wildernesssize += bytesneeded;
wilderness = newwilderness;
return true;
}
extern "C" void* malloc(size_t size)
{
#if __STDC_HOSTED__
pthread_mutex_lock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_lock(&heaplock);
#endif
#if 2 <= PARANOIA
assert(ValidateHeap());
#endif
// The size field keeps both the allocation and meta information.
size += OVERHEAD;
// Round up to nearest alignment.
size = (size + ALIGNMENT - 1UL) & (~(ALIGNMENT-1UL));
// Find the index of the smallest usable bin.
size_t minbinindex = BSR(size-1UL)+1UL;
// Make a bitmask that filter away all bins that are too small.
size_t minbinmask = ~((1UL << minbinindex) - 1UL);
// Figure out which bins are usable for our chunk.
size_t availablebins = bincontainschunks & minbinmask;
if ( availablebins )
{
// Find the smallest available bin.
size_t binindex = BSF(availablebins);
Chunk* chunk = bins[binindex];
assert(chunk->IsSane());
bins[binindex] = chunk->nextunused;
size_t binsize = 1UL << binindex;
// Mark the bin as empty if we emptied it.
if ( !bins[binindex] )
{
bincontainschunks ^= binsize;
}
else
{
Trailer* trailer = bins[binindex]->GetTrailer();
trailer->prevunused = NULL;
}
assert(!bins[binindex] || bins[binindex]->IsSane());
// If we don't use the entire chunk.
size_t original_chunk_size = chunk->size;
if ( OVERHEAD <= original_chunk_size - size )
{
size_t left = original_chunk_size - size;
chunk->size -= left;
chunk->GetTrailer()->size = chunk->size;
Chunk* leftchunk = chunk->RightNeighbor();
leftchunk->size = left;
Trailer* lefttrailer = leftchunk->GetTrailer();
lefttrailer->size = left;
InsertChunk(leftchunk);
}
chunk->magic = MAGIC;
chunk->GetTrailer()->magic = MAGIC;
heapstorage += chunk->size;
#if 3 <= PARANOIA
assert(ValidateHeap());
#endif
uintptr_t result = ((uintptr_t) chunk) + sizeof(Chunk);
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
return (void*) result;
}
// If no bins are available, try to allocate from the wilderness.
// Check if the wilderness can meet our requirements.
if ( wildernesssize < size && !ExpandWilderness(size) )
{
errno = ENOMEM;
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
return NULL;
}
// Carve a new chunk out of the wilderness and initialize it.
Chunk* chunk = (Chunk*) (wilderness - wildernesssize);
assert(size <= wildernesssize);
wildernesssize -= size;
heapsize += size;
assert(IsGoodHeapPointer(chunk, sizeof(*chunk)));
chunk->size = size;
Trailer* trailer = chunk->GetTrailer();
assert(IsGoodHeapPointer(trailer, sizeof(*trailer)));
trailer->size = size;
chunk->magic = MAGIC;
trailer->magic = MAGIC;
heapstorage += chunk->size;
#if 3 <= PARANOIA
assert(ValidateHeap());
#endif
uintptr_t result = ((uintptr_t) chunk) + sizeof(Chunk);
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
return (void*) result;
}
static bool IsLeftmostChunk(Chunk* chunk)
{
return heapstart <= (uintptr_t) chunk;
}
static bool IsRightmostChunk(Chunk* chunk)
{
return heapstart + heapsize <= (uintptr_t) chunk + chunk->size;
}
// Removes a chunk from its bin.
static void UnlinkChunk(Chunk* chunk)
{
assert(chunk->IsSane());
Trailer* trailer = chunk->GetTrailer();
if ( trailer->prevunused )
{
assert(trailer->prevunused->IsSane());
trailer->prevunused->nextunused = chunk->nextunused;
if ( chunk->nextunused )
{
assert(chunk->nextunused->IsSane());
chunk->nextunused->GetTrailer()->prevunused = trailer->prevunused;
}
}
else
{
if ( chunk->nextunused )
{
assert(chunk->nextunused->IsSane());
chunk->nextunused->GetTrailer()->prevunused = NULL;
}
size_t binindex = BSR(chunk->size);
assert(bins[binindex] == chunk);
bins[binindex] = chunk->nextunused;
if ( !bins[binindex] ) { bincontainschunks ^= 1UL << binindex; }
else { assert(bins[binindex]->IsSane()); }
}
}
// Transforms a chunk and its neighbors into a single chunk if possible.
static void UnifyNeighbors(Chunk** chunk)
{
if ( !IsLeftmostChunk(*chunk) )
{
Chunk* neighbor = (*chunk)->LeftNeighbor();
if ( !neighbor->IsUsed() )
{
size_t size = neighbor->size;
size_t chunksize = (*chunk)->size;
UnlinkChunk(neighbor);
*chunk = neighbor;
(*chunk)->size = size + chunksize;
(*chunk)->GetTrailer()->size = (*chunk)->size;
}
}
if ( !IsRightmostChunk(*chunk) )
{
Chunk* neighbor = (*chunk)->RightNeighbor();
if ( !neighbor->IsUsed() )
{
UnlinkChunk(neighbor);
(*chunk)->size += neighbor->size;
(*chunk)->GetTrailer()->size = (*chunk)->size;
}
}
}
extern "C" void free(void* addr)
{
#if __STDC_HOSTED__
pthread_mutex_lock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_lock(&heaplock);
#endif
#if 2 <= PARANOIA
assert(ValidateHeap());
#endif
if ( !addr)
{
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
return;
}
Chunk* chunk = (Chunk*) ((uintptr_t) addr - sizeof(Chunk));
#if __STDC_HOSTED__
if ( !IsGoodHeapPointer(addr, 1) ||
!IsGoodHeapPointer(chunk, sizeof(*chunk)) )
{
error(0, 0, "attempted to free(3) non-heap pointer: %p", addr);
abort();
}
if ( !chunk->IsUsed() )
{
error(0, 0, "attempted to free(3) area that doesn't appear to be "
"allocated: %p + 0x%zx", chunk, chunk->size);
abort();
}
#endif
assert(chunk->IsUsed());
assert(chunk->IsSane());
heapstorage -= chunk->size;
UnifyNeighbors(&chunk);
bool nexttowilderness = IsRightmostChunk(chunk);
// If possible, let the wilderness regain the memory.
if ( nexttowilderness )
{
heapsize -= chunk->size;
wildernesssize += chunk->size;
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
return;
}
InsertChunk(chunk);
#if 3 <= PARANOIA
assert(ValidateHeap());
#endif
#if __STDC_HOSTED__
pthread_mutex_unlock(&heaplock);
#elif defined(__is_sortix_kernel)
Sortix::kthread_mutex_unlock(&heaplock);
#endif
}
// TODO: Implement this function properly.
extern "C" void* realloc(void* ptr, size_t size)
{
if ( !ptr ) { return malloc(size); }
Chunk* chunk = (Chunk*) ((uintptr_t) ptr - sizeof(Chunk));
assert(chunk->IsUsed());
assert(chunk->IsSane());
size_t allocsize = chunk->size - OVERHEAD;
if ( size < allocsize ) { return ptr; }
void* newptr = malloc(size);
if ( !newptr ) { return NULL; }
memcpy(newptr, ptr, allocsize);
free(ptr);
return newptr;
}

104
libc/stdlib/malloc.cpp Normal file
View File

@ -0,0 +1,104 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
stdlib/malloc.cpp
Allocates a chunk of memory from the dynamic memory heap.
*******************************************************************************/
#include <errno.h>
#include <malloc.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#if __is_sortix_kernel
#include <sortix/kernel/kernel.h>
#endif
#if defined(HEAP_NO_ASSERT)
#define __heap_verify() ((void) 0)
#undef assert
#define assert(x) do { ((void) 0); } while ( 0 )
#endif
extern "C" void* malloc(size_t original_size)
{
if ( !heap_size_has_bin(original_size) )
return errno = ENOMEM, (void*) NULL;
// Decide how big an allocation we would like to make.
size_t chunk_outer_size = sizeof(struct heap_chunk) +
sizeof(struct heap_chunk_post);
size_t chunk_inner_size = heap_align(original_size);
size_t chunk_size = chunk_outer_size + chunk_inner_size;
if ( !heap_size_has_bin(chunk_size) )
return errno = ENOMEM, (void*) NULL;
// Decide which bins are large enough for our allocation.
size_t smallest_desirable_bin = heap_bin_for_allocation(chunk_size);
size_t smallest_desirable_bin_size = heap_size_of_bin(smallest_desirable_bin);
size_t desirable_bins = ~0UL << smallest_desirable_bin;
__heap_lock();
__heap_verify();
// Determine whether there are any bins that we can use.
size_t usable_bins = desirable_bins & __heap_state.bin_filled_bitmap;
// If there are no usable bins, attempt to expand the current part of the
// heap or create a new part.
if ( !usable_bins && __heap_expand_current_part(smallest_desirable_bin_size) )
usable_bins = desirable_bins & __heap_state.bin_filled_bitmap;
// If we failed to expand the current part or make a new one - then we are
// officially out of memory until someone deallocates something.
if ( !usable_bins )
{
__heap_verify();
__heap_unlock();
return (void*) NULL;
}
// Pick the smallest of the usable bins.
size_t bin_index = heap_bsf(usable_bins);
// Pick the first element of this bins linked list. This is our allocation.
struct heap_chunk* result_chunk = __heap_state.bin[bin_index];
assert(result_chunk);
assert(HEAP_IS_POINTER_ALIGNED(result_chunk, result_chunk->chunk_size));
assert(chunk_size <= result_chunk->chunk_size);
// Mark our chosen chunk as used and remove it from its bin.
heap_remove_chunk(result_chunk);
// If our chunk is larger than what we really needed and it is possible to
// split the chunk into two, then we should split off a part of it and
// return it to the heap for further allocation.
if ( heap_can_split_chunk(result_chunk, chunk_size) )
heap_split_chunk(result_chunk, chunk_size);
__heap_verify();
__heap_unlock();
// Return the inner data associated with the chunk to the caller.
return heap_chunk_to_data(result_chunk);
}

127
libc/stdlib/realloc.cpp Normal file
View File

@ -0,0 +1,127 @@
/*******************************************************************************
Copyright(C) Jonas 'Sortie' Termansen 2011, 2012, 2013, 2014.
This file is part of the Sortix C Library.
The Sortix C Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
The Sortix C Library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the Sortix C Library. If not, see <http://www.gnu.org/licenses/>.
stdlib/realloc.cpp
Reallocates a chunk of memory from the dynamic memory heap.
*******************************************************************************/
#include <assert.h>
#include <errno.h>
#include <malloc.h>
#include <stdlib.h>
#include <string.h>
#if __is_sortix_kernel
#include <sortix/kernel/kernel.h>
#endif
#if defined(HEAP_NO_ASSERT)
#define __heap_verify() ((void) 0)
#undef assert
#define assert(x) do { ((void) 0); } while ( 0 )
#endif
extern "C" void* realloc(void* ptr, size_t requested_size)
{
if ( !ptr )
return malloc(requested_size);
if ( !heap_size_has_bin(requested_size) )
return errno = ENOMEM, (void*) NULL;
// Decide how big an allocation we would like to make.
size_t requested_chunk_outer_size = sizeof(struct heap_chunk) +
sizeof(struct heap_chunk_post);
size_t requested_chunk_inner_size = heap_align(requested_size);
size_t requested_chunk_size = requested_chunk_outer_size +
requested_chunk_inner_size;
if ( !heap_size_has_bin(requested_chunk_size) )
return errno = ENOMEM, (void*) NULL;
__heap_lock();
__heap_verify();
// Retrieve the chunk that contains this allocation.
struct heap_chunk* chunk = heap_data_to_chunk((uint8_t*) ptr);
assert(chunk->chunk_magic == HEAP_CHUNK_MAGIC);
assert(heap_chunk_to_post(chunk)->chunk_magic == HEAP_CHUNK_MAGIC);
assert(heap_chunk_to_post(chunk)->chunk_size == chunk->chunk_size);
// Do nothing if the chunk already has the ideal size.
if ( chunk->chunk_size == requested_chunk_size )
{
__heap_verify();
__heap_unlock();
return heap_chunk_to_data(chunk);
}
// If the ideal size is smaller than the current, attempt the shrink the
// allocation if a new chunk can be created.
if ( requested_chunk_size < chunk->chunk_size )
{
assert(requested_chunk_size <= chunk->chunk_size);
if ( heap_can_split_chunk(chunk, requested_chunk_size) )
heap_split_chunk(chunk, requested_chunk_size);
__heap_verify();
__heap_unlock();
return heap_chunk_to_data(chunk);
}
// TODO: What if the right neighbor is the part edge?
// If we need to expand the chunk, attempt to combine it with its right
// neighbor if it is large enough.
struct heap_chunk* right;
if ( (right = heap_chunk_right(chunk)) &&
!heap_chunk_is_used(right) &&
requested_chunk_size <= chunk->chunk_size + right->chunk_size )
{
heap_remove_chunk(right);
heap_chunk_format((uint8_t*) chunk, chunk->chunk_size + right->chunk_size);
assert(requested_chunk_size <= chunk->chunk_size);
if ( heap_can_split_chunk(chunk, requested_chunk_size) )
heap_split_chunk(chunk, requested_chunk_size);
__heap_verify();
__heap_unlock();
return heap_chunk_to_data(chunk);
}
// It appears that we cannot retain the orignal allocation location and we
// will have to relocate the allocation elsewhere to expand it.
size_t orignal_ptr_size = heap_chunk_data_size(chunk);
__heap_verify();
__heap_unlock();
assert(orignal_ptr_size < requested_size);
void* result = malloc(requested_size);
if ( !result )
return (void*) NULL;
memcpy(result, ptr, orignal_ptr_size);
free(ptr);
return result;
}