Skip to content

Commit 3d37e2b

Browse files
funny-falcontmm1
authored andcommitted
st: pool allocation
1 parent 1e025cb commit 3d37e2b

5 files changed

Lines changed: 266 additions & 3 deletions

File tree

common.mk

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -683,7 +683,8 @@ gc.$(OBJEXT): {$(VPATH)}gc.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
683683
{$(VPATH)}regex.h $(ENCODING_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
684684
{$(VPATH)}gc.h {$(VPATH)}io.h {$(VPATH)}eval_intern.h {$(VPATH)}util.h \
685685
{$(VPATH)}internal.h {$(VPATH)}constant.h \
686-
{$(VPATH)}thread.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h {$(VPATH)}debug.h
686+
{$(VPATH)}thread.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h {$(VPATH)}debug.h \
687+
{$(VPATH)}pool_alloc.inc.h {$(VPATH)}pool_alloc.h
687688
hash.$(OBJEXT): {$(VPATH)}hash.c $(RUBY_H_INCLUDES) {$(VPATH)}util.h \
688689
$(ENCODING_H_INCLUDES) {$(VPATH)}internal.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h
689690
inits.$(OBJEXT): {$(VPATH)}inits.c $(RUBY_H_INCLUDES) \
@@ -747,7 +748,7 @@ signal.$(OBJEXT): {$(VPATH)}signal.c $(RUBY_H_INCLUDES) \
747748
$(VM_CORE_H_INCLUDES) {$(VPATH)}vm_opts.h {$(VPATH)}internal.h {$(VPATH)}ruby_atomic.h {$(VPATH)}eval_intern.h
748749
sprintf.$(OBJEXT): {$(VPATH)}sprintf.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \
749750
{$(VPATH)}regex.h {$(VPATH)}vsnprintf.c $(ENCODING_H_INCLUDES) {$(VPATH)}internal.h
750-
st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES)
751+
st.$(OBJEXT): {$(VPATH)}st.c $(RUBY_H_INCLUDES) {$(VPATH)}pool_alloc.h
751752
strftime.$(OBJEXT): {$(VPATH)}strftime.c $(RUBY_H_INCLUDES) \
752753
{$(VPATH)}timev.h $(ENCODING_H_INCLUDES)
753754
string.$(OBJEXT): {$(VPATH)}string.c $(RUBY_H_INCLUDES) {$(VPATH)}re.h \

gc.c

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#include "vm_core.h"
2323
#include "internal.h"
2424
#include "gc.h"
25+
#include "pool_alloc.h"
2526
#include "constant.h"
2627
#include "ruby_atomic.h"
2728
#include "probes.h"
@@ -409,6 +410,23 @@ typedef struct rb_heap_struct {
409410
size_t total_slots; /* total slot count (page_length * HEAP_OBJ_LIMIT) */
410411
} rb_heap_t;
411412

413+
#ifdef POOL_ALLOC_API
414+
#define POOL_ALLOC_PART 1
415+
#include "pool_alloc.inc.h"
416+
#undef POOL_ALLOC_PART
417+
418+
typedef struct pool_layout_t pool_layout_t;
419+
struct pool_layout_t {
420+
pool_header
421+
p6, /* st_table && st_table_entry */
422+
p11; /* st_table.bins init size */
423+
} pool_layout = {
424+
INIT_POOL(void*[6]),
425+
INIT_POOL(void*[11])
426+
};
427+
static void pool_finalize_header(pool_header *header);
428+
#endif
429+
412430
typedef struct rb_objspace {
413431
struct {
414432
size_t limit;
@@ -422,6 +440,10 @@ typedef struct rb_objspace {
422440
rb_heap_t eden_heap;
423441
rb_heap_t tomb_heap; /* heap for zombies and ghosts */
424442

443+
#ifdef POOL_ALLOC_API
444+
pool_layout_t *pool_headers;
445+
#endif
446+
425447
struct {
426448
struct heap_page **sorted;
427449
size_t used;
@@ -592,7 +614,12 @@ struct heap_page {
592614
#define ruby_initial_gc_stress gc_params.gc_stress
593615
VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
594616
#else
595-
static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT_MIN}};
617+
static rb_objspace_t rb_objspace = {
618+
{GC_MALLOC_LIMIT_MIN}
619+
#ifdef POOL_ALLOC_API
620+
, &pool_layout
621+
#endif
622+
};
596623
VALUE *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
597624
#endif
598625

@@ -870,6 +897,10 @@ rb_objspace_alloc(void)
870897

871898
malloc_limit = gc_params.malloc_limit_min;
872899

900+
#ifdef POOL_ALLOC_API
901+
objspace->pool_headers = (pool_layout_t*) malloc(sizeof(pool_layout));
902+
memcpy(objspace->pool_headers, &pool_layout, sizeof(pool_layout));
903+
#endif
873904
return objspace;
874905
}
875906
#endif
@@ -911,6 +942,13 @@ rb_objspace_free(rb_objspace_t *objspace)
911942
objspace->eden_heap.pages = NULL;
912943
}
913944
free_stack_chunks(&objspace->mark_stack);
945+
#ifdef POOL_ALLOC_API
946+
if (objspace->pool_headers) {
947+
pool_finalize_header(&objspace->pool_headers->p6);
948+
pool_finalize_header(&objspace->pool_headers->p11);
949+
free(objspace->pool_headers);
950+
}
951+
#endif
914952
free(objspace);
915953
}
916954
#endif
@@ -6256,6 +6294,28 @@ ruby_mimfree(void *ptr)
62566294
free(mem);
62576295
}
62586296

6297+
#ifdef POOL_ALLOC_API
6298+
#define POOL_ALLOC_PART 2
6299+
#include "pool_alloc.inc.h"
6300+
#undef POOL_ALLOC_PART
6301+
#endif
6302+
6303+
void
6304+
ruby_xpool_free(void *ptr)
6305+
{
6306+
pool_free_entry((void**)ptr);
6307+
}
6308+
6309+
#define DEFINE_POOL_MALLOC(pnts) \
6310+
void * \
6311+
ruby_xpool_malloc_##pnts##p() \
6312+
{ \
6313+
return pool_alloc_entry(&rb_objspace.pool_headers->p##pnts); \
6314+
}
6315+
DEFINE_POOL_MALLOC(6)
6316+
DEFINE_POOL_MALLOC(11)
6317+
#undef DEFINE_POOL_MALLOC
6318+
62596319
#if MALLOC_ALLOCATED_SIZE
62606320
/*
62616321
* call-seq:

pool_alloc.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#ifndef POOL_ALLOC_H
2+
#define POOL_ALLOC_H
3+
4+
#define POOL_ALLOC_API
5+
#ifdef POOL_ALLOC_API
6+
void ruby_xpool_free(void *ptr);
7+
void *ruby_xpool_malloc_6p();
8+
void *ruby_xpool_malloc_11p();
9+
#endif
10+
11+
#endif

pool_alloc.inc.h

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
/*
2+
* this is generic pool allocator
3+
* you should define following macroses:
4+
* ITEM_NAME - unique identifier, which allows to hold functions in a namespace
5+
* ITEM_TYPEDEF(name) - passed to typedef to localize item type
6+
* free_entry - desired name of function for free entry
7+
* alloc_entry - defired name of function for allocate entry
8+
*/
9+
10+
#if POOL_ALLOC_PART == 1
11+
#ifdef HEAP_ALIGN_LOG
12+
#define DEFAULT_POOL_SIZE (1 << HEAP_ALIGN_LOG)
13+
#else
14+
#define DEFAULT_POOL_SIZE (sizeof(void*) * 2048)
15+
#endif
16+
typedef unsigned int pool_holder_counter;
17+
18+
typedef struct pool_entry_list pool_entry_list;
19+
typedef struct pool_holder pool_holder;
20+
21+
typedef struct pool_header {
22+
pool_holder *first;
23+
pool_holder *_black_magick;
24+
pool_holder_counter size; /* size of entry in sizeof(void*) items */
25+
pool_holder_counter total;
26+
} pool_header;
27+
28+
struct pool_holder {
29+
pool_holder_counter free, total;
30+
pool_header *header;
31+
void *freep;
32+
pool_holder *fore, *back;
33+
void *data[1];
34+
};
35+
#define POOL_DATA_SIZE(pool_size) (((pool_size) - sizeof(void*) * 6 - offsetof(pool_holder, data)) / sizeof(void*))
36+
#define POOL_ENTRY_SIZE(item_type) ((sizeof(item_type) - 1) / sizeof(void*) + 1)
37+
#define POOL_HOLDER_COUNT(pool_size, item_type) (POOL_DATA_SIZE(pool_size)/POOL_ENTRY_SIZE(item_type))
38+
#define INIT_POOL(item_type) {NULL, NULL, POOL_ENTRY_SIZE(item_type), POOL_HOLDER_COUNT(DEFAULT_POOL_SIZE, item_type)}
39+
40+
#elif POOL_ALLOC_PART == 2
41+
static pool_holder *
42+
pool_holder_alloc(pool_header *header)
43+
{
44+
pool_holder *holder;
45+
pool_holder_counter i, size, count;
46+
register void **ptr;
47+
48+
size_t sz = offsetof(pool_holder, data) +
49+
header->size * header->total * sizeof(void*);
50+
#define objspace (&rb_objspace)
51+
objspace_malloc_prepare(objspace, DEFAULT_POOL_SIZE);
52+
if (header->first != NULL) return header->first;
53+
TRY_WITH_GC(holder = (pool_holder*) aligned_malloc(DEFAULT_POOL_SIZE, sz));
54+
malloc_increase += DEFAULT_POOL_SIZE;
55+
#if CALC_EXACT_MALLOC_SIZE
56+
objspace->malloc_params.allocated_size += DEFAULT_POOL_SIZE;
57+
objspace->malloc_params.allocations++;
58+
#endif
59+
#undef objspace
60+
61+
size = header->size;
62+
count = header->total;
63+
holder->free = count;
64+
holder->total = count;
65+
holder->header = header;
66+
holder->fore = NULL;
67+
holder->back = NULL;
68+
holder->freep = &holder->data;
69+
ptr = holder->data;
70+
for(i = count - 1; i; i-- ) {
71+
ptr = *ptr = ptr + size;
72+
}
73+
*ptr = NULL;
74+
header->first = holder;
75+
return holder;
76+
}
77+
78+
static inline void
79+
pool_holder_unchaing(pool_header *header, pool_holder *holder)
80+
{
81+
register pool_holder *fore = holder->fore, *back = holder->back;
82+
holder->fore = NULL;
83+
holder->back = NULL;
84+
if (fore != NULL) fore->back = back;
85+
else header->_black_magick = back;
86+
if (back != NULL) back->fore = fore;
87+
else header->first = fore;
88+
}
89+
90+
static inline pool_holder *
91+
entry_holder(void **entry)
92+
{
93+
return (pool_holder*)(((uintptr_t)entry) & ~(DEFAULT_POOL_SIZE - 1));
94+
}
95+
96+
static inline void
97+
pool_free_entry(void **entry)
98+
{
99+
pool_holder *holder = entry_holder(entry);
100+
pool_header *header = holder->header;
101+
102+
if (holder->free++ == 0) {
103+
register pool_holder *first = header->first;
104+
if (first == NULL) {
105+
header->first = holder;
106+
} else {
107+
holder->back = first;
108+
holder->fore = first->fore;
109+
first->fore = holder;
110+
if (holder->fore)
111+
holder->fore->back = holder;
112+
else
113+
header->_black_magick = holder;
114+
}
115+
} else if (holder->free == holder->total && header->first != holder ) {
116+
pool_holder_unchaing(header, holder);
117+
aligned_free(holder);
118+
#if CALC_EXACT_MALLOC_SIZE
119+
rb_objspace.malloc_params.allocated_size -= DEFAULT_POOL_SIZE;
120+
rb_objspace.malloc_params.allocations--;
121+
#endif
122+
return;
123+
}
124+
125+
*entry = holder->freep;
126+
holder->freep = entry;
127+
}
128+
129+
static inline void*
130+
pool_alloc_entry(pool_header *header)
131+
{
132+
pool_holder *holder = header->first;
133+
void **result;
134+
if (holder == NULL) {
135+
holder = pool_holder_alloc(header);
136+
}
137+
138+
result = holder->freep;
139+
holder->freep = *result;
140+
141+
if (--holder->free == 0) {
142+
pool_holder_unchaing(header, holder);
143+
}
144+
145+
return result;
146+
}
147+
148+
static void
149+
pool_finalize_header(pool_header *header)
150+
{
151+
if (header->first) {
152+
aligned_free(header->first);
153+
header->first = NULL;
154+
}
155+
}
156+
#endif

st.c

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include "st.h"
88
#else
99
#include "ruby/ruby.h"
10+
#include "pool_alloc.h"
1011
#endif
1112

1213
#include <stdio.h>
@@ -88,6 +89,39 @@ static void rehash(st_table *);
8889
#define do_hash_bin(key,table) (do_hash((key), (table))%(table)->num_bins)
8990

9091
/* preparation for possible allocation improvements */
92+
#ifdef POOL_ALLOC_API
93+
#define st_alloc_entry() (st_table_entry *)ruby_xpool_malloc_6p()
94+
#define st_free_entry(entry) ruby_xpool_free(entry)
95+
#define st_alloc_table() (st_table *)ruby_xpool_malloc_6p()
96+
#define st_dealloc_table(table) ruby_xpool_free(table)
97+
static inline st_table_entry **
98+
st_alloc_bins(st_index_t size)
99+
{
100+
st_table_entry **result;
101+
if (size == 11) {
102+
result = (st_table_entry **) ruby_xpool_malloc_11p();
103+
memset(result, 0, 11 * sizeof(st_table_entry *));
104+
}
105+
else
106+
result = (st_table_entry **) ruby_xcalloc(size, sizeof(st_table_entry*));
107+
return result;
108+
}
109+
static inline void
110+
st_free_bins(st_table_entry **bins, st_index_t size)
111+
{
112+
if (size == 11)
113+
ruby_xpool_free(bins);
114+
else
115+
ruby_xfree(bins);
116+
}
117+
static inline st_table_entry**
118+
st_realloc_bins(st_table_entry **bins, st_index_t newsize, st_index_t oldsize)
119+
{
120+
st_table_entry **new_bins = st_alloc_bins(newsize);
121+
st_free_bins(bins, oldsize);
122+
return new_bins;
123+
}
124+
#else
91125
#define st_alloc_entry() (st_table_entry *)malloc(sizeof(st_table_entry))
92126
#define st_free_entry(entry) free(entry)
93127
#define st_alloc_table() (st_table *)malloc(sizeof(st_table))
@@ -101,6 +135,7 @@ st_realloc_bins(st_table_entry **bins, st_index_t newsize, st_index_t oldsize)
101135
MEMZERO(bins, st_table_entry*, newsize);
102136
return bins;
103137
}
138+
#endif
104139

105140
/* Shortcut */
106141
#define bins as.big.bins

0 commit comments

Comments
 (0)