wrap more memory mapping implementation details
This commit is contained in:
parent
1cb28531a8
commit
d779d41721
13
malloc.c
13
malloc.c
@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <sys/mman.h>
|
|
||||||
|
|
||||||
#include <malloc.h>
|
#include <malloc.h>
|
||||||
|
|
||||||
@ -40,7 +39,7 @@ static void *allocate_pages(size_t usable_size, size_t guard_size, bool unprotec
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
void *usable = (char *)real + guard_size;
|
void *usable = (char *)real + guard_size;
|
||||||
if (unprotect && memory_protect(usable, usable_size, PROT_READ|PROT_WRITE)) {
|
if (unprotect && memory_protect_rw(usable, usable_size)) {
|
||||||
memory_unmap(real, real_size);
|
memory_unmap(real, real_size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -81,7 +80,7 @@ static void *allocate_pages_aligned(size_t usable_size, size_t alignment, size_t
|
|||||||
size_t trail_size = alloc_size - lead_size - usable_size;
|
size_t trail_size = alloc_size - lead_size - usable_size;
|
||||||
void *base = (char *)usable + lead_size;
|
void *base = (char *)usable + lead_size;
|
||||||
|
|
||||||
if (memory_protect(base, usable_size, PROT_READ|PROT_WRITE)) {
|
if (memory_protect_rw(base, usable_size)) {
|
||||||
memory_unmap(real, real_alloc_size);
|
memory_unmap(real, real_alloc_size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -203,7 +202,7 @@ static struct slab_metadata *alloc_metadata(struct size_class *c, size_t slab_si
|
|||||||
if (allocate > metadata_max) {
|
if (allocate > metadata_max) {
|
||||||
allocate = metadata_max;
|
allocate = metadata_max;
|
||||||
}
|
}
|
||||||
if (memory_protect(c->slab_info, allocate * sizeof(struct slab_metadata), PROT_READ|PROT_WRITE)) {
|
if (memory_protect_rw(c->slab_info, allocate * sizeof(struct slab_metadata))) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
c->metadata_allocated = allocate;
|
c->metadata_allocated = allocate;
|
||||||
@ -333,7 +332,7 @@ static inline void *slab_allocate(size_t requested_size) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *slab = get_slab(c, slab_size, metadata);
|
void *slab = get_slab(c, slab_size, metadata);
|
||||||
if (requested_size != 0 && memory_protect(slab, slab_size, PROT_READ|PROT_WRITE)) {
|
if (requested_size != 0 && memory_protect_rw(slab, slab_size)) {
|
||||||
c->metadata_count--;
|
c->metadata_count--;
|
||||||
pthread_mutex_unlock(&c->mutex);
|
pthread_mutex_unlock(&c->mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -636,14 +635,14 @@ COLD static void init_slow_path(void) {
|
|||||||
fatal_error("failed to allocate slab metadata");
|
fatal_error("failed to allocate slab metadata");
|
||||||
}
|
}
|
||||||
c->metadata_allocated = PAGE_SIZE / sizeof(struct slab_metadata);
|
c->metadata_allocated = PAGE_SIZE / sizeof(struct slab_metadata);
|
||||||
if (memory_protect(c->slab_info, c->metadata_allocated * sizeof(struct slab_metadata), PROT_READ|PROT_WRITE)) {
|
if (memory_protect_rw(c->slab_info, c->metadata_allocated * sizeof(struct slab_metadata))) {
|
||||||
fatal_error("failed to allocate initial slab info");
|
fatal_error("failed to allocate initial slab info");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_store_explicit(&ro.initialized, true, memory_order_release);
|
atomic_store_explicit(&ro.initialized, true, memory_order_release);
|
||||||
|
|
||||||
if (memory_protect(&ro, sizeof(ro), PROT_READ)) {
|
if (memory_protect_ro(&ro, sizeof(ro))) {
|
||||||
fatal_error("failed to protect allocator data");
|
fatal_error("failed to protect allocator data");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
memory.c
10
memory.c
@ -24,7 +24,7 @@ int memory_unmap(void *ptr, size_t size) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int memory_protect(void *ptr, size_t size, int prot) {
|
static int memory_protect_prot(void *ptr, size_t size, int prot) {
|
||||||
int ret = mprotect(ptr, size, prot);
|
int ret = mprotect(ptr, size, prot);
|
||||||
if (unlikely(ret) && errno != ENOMEM) {
|
if (unlikely(ret) && errno != ENOMEM) {
|
||||||
fatal_error("non-ENOMEM mprotect failure");
|
fatal_error("non-ENOMEM mprotect failure");
|
||||||
@ -32,6 +32,14 @@ int memory_protect(void *ptr, size_t size, int prot) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int memory_protect_rw(void *ptr, size_t size) {
|
||||||
|
return memory_protect_prot(ptr, size, PROT_READ|PROT_WRITE);
|
||||||
|
}
|
||||||
|
|
||||||
|
int memory_protect_ro(void *ptr, size_t size) {
|
||||||
|
return memory_protect_prot(ptr, size, PROT_READ);
|
||||||
|
}
|
||||||
|
|
||||||
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
|
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size) {
|
||||||
void *ptr = mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new);
|
void *ptr = mremap(old, old_size, new_size, MREMAP_MAYMOVE|MREMAP_FIXED, new);
|
||||||
if (unlikely(ptr == MAP_FAILED)) {
|
if (unlikely(ptr == MAP_FAILED)) {
|
||||||
|
3
memory.h
3
memory.h
@ -5,7 +5,8 @@
|
|||||||
|
|
||||||
void *memory_map(size_t size);
|
void *memory_map(size_t size);
|
||||||
int memory_unmap(void *ptr, size_t size);
|
int memory_unmap(void *ptr, size_t size);
|
||||||
int memory_protect(void *ptr, size_t size, int prot);
|
int memory_protect_rw(void *ptr, size_t size);
|
||||||
|
int memory_protect_ro(void *ptr, size_t size);
|
||||||
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
int memory_remap_fixed(void *old, size_t old_size, void *new, size_t new_size);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user