Skip to content

Commit 33e5d38

Browse files
jhawthornk0kubun
authored andcommitted
Map M:N thread stack chunks initially as PROT_NONE
Previously we initially mapped the full 512MB chunk as PROT_READ|PROD_WRITE and then set a guard page to PROT_NONE the first time a new thread stack is needed. Usually that's okay as we don't touch that memory until it is needed and so it doesn't count towards RSS. However, on Linux even with vm.overcommit_memory=0 (the default) if on a system (like a tiny cloud VM) with <512MB of RAM+swap that would error with. Thread#initialize': can't create Thread: Cannot allocate memory (ThreadError) This changes the chunk to be mapped initially with PROT_NONE, then instead of mapping the guard pages we map in the machine and VM stacks using mprotect. This ensures we don't commit stack memory until it is first used, and as a side benefit any stray pointers into unused stack should segfault. When a stack is freed/reused there is no change from the previous behaviour, we just use madvise and leave the same regions in place. [Bug #21944]
1 parent a9b84ad commit 33e5d38

1 file changed

Lines changed: 22 additions & 17 deletions

File tree

thread_pthread_mn.c

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ nt_alloc_thread_stack_chunk(void)
194194
mmap_flags |= MAP_STACK;
195195
#endif
196196

197-
const char *m = (void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
197+
const char *m = (void *)mmap(NULL, MSTACK_CHUNK_SIZE, PROT_NONE, mmap_flags, -1, 0);
198198
if (m == MAP_FAILED) {
199199
return NULL;
200200
}
@@ -213,6 +213,12 @@ nt_alloc_thread_stack_chunk(void)
213213

214214
VM_ASSERT(stack_count <= UINT16_MAX);
215215

216+
// Enable read/write for the header pages
217+
if (mprotect((void *)m, (size_t)header_page_cnt * MSTACK_PAGE_SIZE, PROT_READ | PROT_WRITE) != 0) {
218+
munmap((void *)m, MSTACK_CHUNK_SIZE);
219+
return NULL;
220+
}
221+
216222
struct nt_stack_chunk_header *ch = (struct nt_stack_chunk_header *)m;
217223

218224
ch->start_page = header_page_cnt;
@@ -241,7 +247,7 @@ nt_stack_chunk_get_msf(const rb_vm_t *vm, const char *mstack)
241247
return (struct nt_machine_stack_footer *)&mstack[msz - sizeof(struct nt_machine_stack_footer)];
242248
}
243249

244-
static void *
250+
static void
245251
nt_stack_chunk_get_stack(const rb_vm_t *vm, struct nt_stack_chunk_header *ch, size_t idx, void **vm_stack, void **machine_stack)
246252
{
247253
// TODO: only support stack going down
@@ -266,8 +272,6 @@ nt_stack_chunk_get_stack(const rb_vm_t *vm, struct nt_stack_chunk_header *ch, si
266272

267273
*vm_stack = (void *)vstack;
268274
*machine_stack = (void *)mstack;
269-
270-
return (void *)guard_page;
271275
}
272276

273277
RBIMPL_ATTR_MAYBE_UNUSED()
@@ -290,17 +294,6 @@ nt_stack_chunk_dump(void)
290294
}
291295
}
292296

293-
static int
294-
nt_guard_page(const char *p, size_t len)
295-
{
296-
if (mprotect((void *)p, len, PROT_NONE) != -1) {
297-
return 0;
298-
}
299-
else {
300-
return errno;
301-
}
302-
}
303-
304297
static int
305298
nt_alloc_stack(rb_vm_t *vm, void **vm_stack, void **machine_stack)
306299
{
@@ -319,8 +312,20 @@ nt_alloc_stack(rb_vm_t *vm, void **vm_stack, void **machine_stack)
319312
RUBY_DEBUG_LOG("uninitialized_stack_count:%d", ch->uninitialized_stack_count);
320313

321314
size_t idx = ch->stack_count - ch->uninitialized_stack_count--;
322-
void *guard_page = nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
323-
err = nt_guard_page(guard_page, MSTACK_PAGE_SIZE);
315+
316+
// The chunk was mapped PROT_NONE; enable the VM stack and
317+
// machine stack pages, leaving the guard page as PROT_NONE.
318+
char *stack_start = nt_stack_chunk_get_stack_start(ch, idx);
319+
size_t vm_stack_size = vm->default_params.thread_vm_stack_size;
320+
size_t mstack_size = nt_thread_stack_size() - vm_stack_size - MSTACK_PAGE_SIZE;
321+
322+
if (mprotect(stack_start, vm_stack_size, PROT_READ | PROT_WRITE) != 0 ||
323+
mprotect(stack_start + vm_stack_size + MSTACK_PAGE_SIZE, mstack_size, PROT_READ | PROT_WRITE) != 0) {
324+
err = errno;
325+
}
326+
else {
327+
nt_stack_chunk_get_stack(vm, ch, idx, vm_stack, machine_stack);
328+
}
324329
}
325330
else {
326331
nt_free_stack_chunks = ch->prev_free_chunk;

0 commit comments

Comments
 (0)