Browse Source

optimize code

master
CismonX 1 year ago
parent
commit
e9c4b35e81
No known key found for this signature in database GPG Key ID: 315D6652268C5007
  1. 110
      src/runtime.c
  2. 192
      src/vm_pool.c
  3. 162
      src/vm_pool.h
  4. 249
      src/vm_stack.c
  5. 147
      src/vm_stack.h

110
src/runtime.c

@ -28,11 +28,13 @@
#include <inttypes.h>
#include <arpa/inet.h>
static struct u6a_vm_ins* text;
static uint32_t text_len;
static char* rodata;
static uint32_t rodata_len;
static bool force_exec;
static struct u6a_vm_ins* text;
static uint32_t text_len;
static char* rodata;
static uint32_t rodata_len;
static bool force_exec;
static struct u6a_vm_stack_ctx stack_ctx;
static struct u6a_vm_pool_ctx pool_ctx;
static const struct u6a_vm_ins text_subst[] = {
{ .opcode = u6a_vo_la },
@ -66,39 +68,50 @@ static const char* info_runtime = "runtime";
#define VM_JMP(dest) \
ins = text + (dest); \
continue
#define VM_VAR_JMP \
U6A_VM_VAR_FN_REF(u6a_vf_j, ins - text)
#define VM_VAR_FINALIZE \
U6A_VM_VAR_FN_REF(u6a_vf_f, ins - text)
#define CHECK_FORCE(log_func, err_val) \
if (!force_exec) { \
log_func(err_runtime, err_val); \
goto runtime_error; \
}
#define STACK_PUSH1(fn_0) \
vm_var_fn_addref(fn_0); \
if (UNLIKELY(!u6a_vm_stack_push1(fn_0))) { \
goto runtime_error; \
#define VM_VAR_JMP U6A_VM_VAR_FN_REF(u6a_vf_j, ins - text)
#define VM_VAR_FINALIZE U6A_VM_VAR_FN_REF(u6a_vf_f, ins - text)
#define STACK_PUSH1(fn_0) \
vm_var_fn_addref(fn_0); \
if (UNLIKELY(!u6a_vm_stack_push1(&stack_ctx, fn_0))) { \
goto runtime_error; \
}
#define STACK_PUSH2(fn_0, fn_1) \
if (UNLIKELY(!u6a_vm_stack_push2(fn_0, fn_1))) { \
goto runtime_error; \
#define STACK_PUSH2(fn_0, fn_1) \
if (UNLIKELY(!u6a_vm_stack_push2(&stack_ctx, fn_0, fn_1))) { \
goto runtime_error; \
}
#define STACK_PUSH3(fn_0, fn_1, fn_2) \
if (UNLIKELY(!u6a_vm_stack_push3(fn_0, fn_1, fn_2))) { \
goto runtime_error; \
#define STACK_PUSH3(fn_0, fn_1, fn_2) \
if (UNLIKELY(!u6a_vm_stack_push3(&stack_ctx, fn_0, fn_1, fn_2))) { \
goto runtime_error; \
}
#define STACK_PUSH4(fn_0, fn_1, fn_2, fn_3) \
if (UNLIKELY(!u6a_vm_stack_push4(fn_0, fn_1, fn_2, fn_3))) { \
goto runtime_error; \
#define STACK_PUSH4(fn_0, fn_1, fn_2, fn_3) \
if (UNLIKELY(!u6a_vm_stack_push4(&stack_ctx, fn_0, fn_1, fn_2, fn_3))) { \
goto runtime_error; \
}
#define STACK_POP() \
vm_var_fn_free(top); \
top = u6a_vm_stack_top(); \
if (UNLIKELY(!u6a_vm_stack_pop())) { \
goto runtime_error; \
#define STACK_POP() \
vm_var_fn_free(top); \
top = u6a_vm_stack_top(&stack_ctx); \
if (UNLIKELY(!u6a_vm_stack_pop(&stack_ctx))) { \
goto runtime_error; \
}
#define STACK_XCH(fn_0) \
fn_0 = u6a_vm_stack_xch(&stack_ctx, fn_0); \
if (UNLIKELY(U6A_VM_VAR_FN_IS_EMPTY(fn_0))) { \
goto runtime_error; \
}
#define POOL_ALLOC1(v1) u6a_vm_pool_alloc1(&pool_ctx, v1)
#define POOL_ALLOC2(v1, v2) u6a_vm_pool_alloc2(&pool_ctx, v1, v2)
#define POOL_ALLOC2_PTR(v1, v2) u6a_vm_pool_alloc2_ptr(&pool_ctx, v1, v2)
#define POOL_GET1(offset) u6a_vm_pool_get1(pool_ctx.active_pool, offset)
#define POOL_GET2(offset) u6a_vm_pool_get2(pool_ctx.active_pool, offset)
#define POOL_GET2_SEPARATE(offset) u6a_vm_pool_get2_separate(&pool_ctx, offset)
static inline bool
read_bc_header(struct u6a_bc_header* restrict header, FILE* restrict input_stream) {
@ -124,7 +137,7 @@ read_bc_header(struct u6a_bc_header* restrict header, FILE* restrict input_strea
static inline struct u6a_vm_var_fn
vm_var_fn_addref(struct u6a_vm_var_fn var) {
if (var.token.fn & U6A_VM_FN_REF) {
u6a_vm_pool_addref(var.ref);
u6a_vm_pool_addref(pool_ctx.active_pool, var.ref);
}
return var;
}
@ -132,7 +145,7 @@ vm_var_fn_addref(struct u6a_vm_var_fn var) {
static inline void
vm_var_fn_free(struct u6a_vm_var_fn var) {
if (var.token.fn & U6A_VM_FN_REF) {
u6a_vm_pool_free(var.ref);
//u6a_vm_pool_free(pool_ctx.active_pool, var.ref);
}
}
@ -190,12 +203,14 @@ u6a_runtime_init(struct u6a_runtime_options* options) {
if (UNLIKELY(rodata_len != fread(rodata, sizeof(char), rodata_len, options->istream))) {
goto runtime_init_failed;
}
if (UNLIKELY(!u6a_vm_stack_init(options->stack_segment_size, err_runtime))) {
if (UNLIKELY(!u6a_vm_stack_init(&stack_ctx, options->stack_segment_size, err_runtime))) {
goto runtime_init_failed;
}
if (UNLIKELY(!u6a_vm_pool_init(options->pool_size, text_len, err_runtime))) {
if (UNLIKELY(!u6a_vm_pool_init(&pool_ctx, options->pool_size, text_len, err_runtime))) {
goto runtime_init_failed;
}
stack_ctx.pool_ctx = &pool_ctx;
pool_ctx.stack_ctx = &stack_ctx;
for (struct u6a_vm_ins* ins = text + text_subst_len; ins < text + text_len; ++ins) {
if (ins->opcode & U6A_VM_OP_OFFSET) {
ins->operand.offset = ntohl(ins->operand.offset);
@ -240,15 +255,15 @@ u6a_runtime_execute(FILE* restrict istream, FILE* restrict ostream) {
switch (func.token.fn) {
case u6a_vf_s:
vm_var_fn_addref(arg);
ACC_FN_REF(u6a_vf_s1, u6a_vm_pool_alloc1(arg));
ACC_FN_REF(u6a_vf_s1, POOL_ALLOC1(arg));
break;
case u6a_vf_s1:
vm_var_fn_addref(arg);
vm_var_fn_addref(u6a_vm_pool_get1(func.ref).fn);
ACC_FN_REF(u6a_vf_s2, u6a_vm_pool_alloc2(u6a_vm_pool_get1(func.ref).fn, arg));
vm_var_fn_addref(POOL_GET1(func.ref).fn);
ACC_FN_REF(u6a_vf_s2, POOL_ALLOC2(POOL_GET1(func.ref).fn, arg));
break;
case u6a_vf_s2:
tuple = u6a_vm_pool_get2(func.ref);
tuple = POOL_GET2(func.ref);
vm_var_fn_addref(tuple.v1.fn);
vm_var_fn_addref(tuple.v2.fn);
vm_var_fn_addref(arg);
@ -261,10 +276,10 @@ u6a_runtime_execute(FILE* restrict istream, FILE* restrict ostream) {
VM_JMP(0x00);
case u6a_vf_k:
vm_var_fn_addref(arg);
ACC_FN_REF(u6a_vf_k1, u6a_vm_pool_alloc1(arg));
ACC_FN_REF(u6a_vf_k1, POOL_ALLOC1(arg));
break;
case u6a_vf_k1:
ACC_FN(u6a_vm_pool_get1(func.ref).fn);
ACC_FN(POOL_GET1(func.ref).fn);
break;
case u6a_vf_i:
ACC_FN(arg);
@ -284,29 +299,29 @@ u6a_runtime_execute(FILE* restrict istream, FILE* restrict ostream) {
ACC_FN(top);
VM_JMP(0x03);
case u6a_vf_c:
cont = u6a_vm_stack_save();
cont = u6a_vm_stack_save(&stack_ctx);
if (UNLIKELY(cont == NULL)) {
goto runtime_error;
}
STACK_PUSH2(VM_VAR_JMP, vm_var_fn_addref(arg));
ACC_FN_REF(u6a_vf_c1, u6a_vm_pool_alloc2_ptr(cont, ins));
ACC_FN_REF(u6a_vf_c1, POOL_ALLOC2_PTR(cont, ins));
VM_JMP(0x03);
case u6a_vf_d:
vm_var_fn_addref(arg);
ACC_FN_REF(u6a_vf_d1_c, u6a_vm_pool_alloc1(arg));
ACC_FN_REF(u6a_vf_d1_c, POOL_ALLOC1(arg));
break;
case u6a_vf_c1:
tuple = u6a_vm_pool_get2_separate(func.ref);
u6a_vm_stack_resume(tuple.v1.ptr);
tuple = POOL_GET2_SEPARATE(func.ref);
u6a_vm_stack_resume(&stack_ctx, tuple.v1.ptr);
ins = tuple.v2.ptr;
ACC_FN(arg);
break;
case u6a_vf_d1_c:
STACK_PUSH2(VM_VAR_JMP, vm_var_fn_addref(u6a_vm_pool_get1(func.ref).fn));
STACK_PUSH2(VM_VAR_JMP, vm_var_fn_addref(POOL_GET1(func.ref).fn));
ACC_FN(arg);
VM_JMP(0x03);
case u6a_vf_d1_s:
tuple = u6a_vm_pool_get2(func.ref);
tuple = POOL_GET2(func.ref);
STACK_PUSH3(vm_var_fn_addref(arg), VM_VAR_FINALIZE, tuple.v1.fn);
ACC_FN(tuple.v2.fn);
VM_JMP(0x03);
@ -364,12 +379,9 @@ u6a_runtime_execute(FILE* restrict istream, FILE* restrict ostream) {
func = top;
STACK_POP();
arg = top;
ACC_FN_REF(u6a_vf_d1_s, u6a_vm_pool_alloc2(func, arg));
ACC_FN_REF(u6a_vf_d1_s, POOL_ALLOC2(func, arg));
} else {
acc = u6a_vm_stack_xch(acc);
if (UNLIKELY(U6A_VM_VAR_FN_IS_EMPTY(acc))) {
goto runtime_error;
}
STACK_XCH(acc);
}
break;
case u6a_vo_del:

192
src/vm_pool.c

@ -17,190 +17,46 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "vm_pool.h"
#include "vm_stack.h"
#include "vm_pool.h"
#include "logging.h"
#include <stddef.h>
#include <stdlib.h>
struct vm_pool_elem {
struct u6a_vm_var_tuple values;
uint32_t refcnt;
uint32_t flags;
};
#define POOL_ELEM_HOLDS_PTR ( 1 << 0 )
struct vm_pool {
uint32_t pos;
struct vm_pool_elem elems[];
};
struct vm_pool_elem_ptrs {
uint32_t pos;
struct vm_pool_elem* elems[];
};
static struct vm_pool* active_pool;
static struct vm_pool_elem_ptrs* holes;
static uint32_t pool_len;
static struct vm_pool_elem** fstack;
static uint32_t fstack_top;
const char* err_stage;
static inline struct vm_pool_elem*
vm_pool_elem_alloc() {
struct vm_pool_elem* new_elem;
if (holes->pos == UINT32_MAX) {
if (UNLIKELY(++active_pool->pos == pool_len)) {
u6a_err_vm_pool_oom(err_stage);
return NULL;
}
new_elem = active_pool->elems + active_pool->pos;
} else {
new_elem = holes->elems[holes->pos--];
}
new_elem->refcnt = 1;
return new_elem;
}
static inline struct vm_pool_elem*
vm_pool_elem_dup(struct vm_pool_elem* elem) {
struct vm_pool_elem* new_elem = vm_pool_elem_alloc();
if (UNLIKELY(new_elem == NULL)) {
return NULL;
}
*new_elem = *elem;
return new_elem;
}
static inline void
free_stack_push(struct u6a_vm_var_fn fn) {
if (fn.token.fn & U6A_VM_FN_REF) {
fstack[++fstack_top] = active_pool->elems + fn.ref;
}
}
static inline struct vm_pool_elem*
free_stack_pop() {
if (fstack_top == UINT32_MAX) {
return NULL;
}
return fstack[fstack_top--];
}
bool
u6a_vm_pool_init(uint32_t pool_len_, uint32_t ins_len, const char* err_stage_) {
const uint32_t pool_size = sizeof(struct vm_pool) + pool_len_ * sizeof(struct vm_pool_elem);
active_pool = malloc(pool_size);
if (UNLIKELY(active_pool == NULL)) {
u6a_err_bad_alloc(err_stage_, pool_size);
u6a_vm_pool_init(struct u6a_vm_pool_ctx* ctx, uint32_t pool_len, uint32_t ins_len, const char* err_stage) {
const uint32_t pool_size = sizeof(struct u6a_vm_pool) + pool_len * sizeof(struct u6a_vm_pool_elem);
ctx->active_pool = malloc(pool_size);
if (UNLIKELY(ctx->active_pool == NULL)) {
u6a_err_bad_alloc(err_stage, pool_size);
return false;
}
const uint32_t holes_size = sizeof(struct vm_pool_elem_ptrs) + pool_len_ * sizeof(struct vm_pool_elem*);
holes = malloc(holes_size);
if (UNLIKELY(holes == NULL)) {
u6a_err_bad_alloc(err_stage_, holes_size);
free(holes);
const uint32_t holes_size = sizeof(struct u6a_vm_pool_elem_ptrs) + pool_len * sizeof(struct u6a_vm_pool_elem*);
ctx->holes = malloc(holes_size);
if (UNLIKELY(ctx->holes == NULL)) {
u6a_err_bad_alloc(err_stage, holes_size);
free(ctx->active_pool);
return false;
}
const uint32_t free_stack_size = ins_len * sizeof(struct vm_pool_elem*);
fstack = malloc(free_stack_size);
if (UNLIKELY(fstack == NULL)) {
u6a_err_bad_alloc(err_stage_, free_stack_size);
free(active_pool);
free(holes);
ctx->fstack = malloc(free_stack_size);
if (UNLIKELY(ctx->fstack == NULL)) {
u6a_err_bad_alloc(err_stage, free_stack_size);
free(ctx->active_pool);
free(ctx->holes);
return false;
}
active_pool->pos = UINT32_MAX;
holes->pos = UINT32_MAX;
pool_len = pool_len_;
err_stage = err_stage_;
ctx->active_pool->pos = UINT32_MAX;
ctx->holes->pos = UINT32_MAX;
ctx->pool_len = pool_len;
ctx->err_stage = err_stage;
return true;
}
U6A_HOT uint32_t
u6a_vm_pool_alloc1(struct u6a_vm_var_fn v1) {
struct vm_pool_elem* elem = vm_pool_elem_alloc();
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.fn = v1, .v2.ptr = NULL };
elem->flags = 0;
return elem - active_pool->elems;
}
U6A_HOT uint32_t
u6a_vm_pool_alloc2(struct u6a_vm_var_fn v1, struct u6a_vm_var_fn v2) {
struct vm_pool_elem* elem = vm_pool_elem_alloc();
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.fn = v1, .v2.fn = v2 };
elem->flags = 0;
return elem - active_pool->elems;
}
U6A_HOT uint32_t
u6a_vm_pool_alloc2_ptr(void* v1, void* v2) {
struct vm_pool_elem* elem = vm_pool_elem_alloc();
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.ptr = v1, .v2.ptr = v2 };
elem->flags = POOL_ELEM_HOLDS_PTR;
return elem - active_pool->elems;
}
U6A_HOT union u6a_vm_var
u6a_vm_pool_get1(uint32_t offset) {
return active_pool->elems[offset].values.v1;
}
U6A_HOT struct u6a_vm_var_tuple
u6a_vm_pool_get2(uint32_t offset) {
return active_pool->elems[offset].values;
}
U6A_HOT struct u6a_vm_var_tuple
u6a_vm_pool_get2_separate(uint32_t offset) {
struct vm_pool_elem* elem = active_pool->elems + offset;
struct u6a_vm_var_tuple values = elem->values;
if (elem->refcnt > 1) {
// Continuation having more than 1 reference should be separated before reinstatement
values.v1.ptr = u6a_vm_stack_dup(values.v1.ptr);
}
return values;
}
U6A_HOT void
u6a_vm_pool_addref(uint32_t offset) {
++active_pool->elems[offset].refcnt;
}
U6A_HOT void
u6a_vm_pool_free(uint32_t offset) {
struct vm_pool_elem* elem = active_pool->elems + offset;
fstack_top = UINT32_MAX;
do {
if (--elem->refcnt == 0) {
holes->elems[++holes->pos] = elem;
if (elem->flags & POOL_ELEM_HOLDS_PTR) {
// Continuation destroyed before used
u6a_vm_stack_discard(elem->values.v1.ptr);
} else {
free_stack_push(elem->values.v2.fn);
free_stack_push(elem->values.v1.fn);
}
}
} while ((elem = free_stack_pop()));
}
void
u6a_vm_pool_destroy() {
free(active_pool);
free(holes);
free(fstack);
u6a_vm_pool_destroy(struct u6a_vm_pool_ctx* ctx) {
free(ctx->active_pool);
free(ctx->holes);
free(ctx->fstack);
}

162
src/vm_pool.h

@ -22,38 +22,164 @@
#include "common.h"
#include "vm_defs.h"
#include "logging.h"
#include <stdint.h>
#include <stdbool.h>
struct u6a_vm_pool_elem {
struct u6a_vm_var_tuple values;
uint32_t refcnt;
uint32_t flags;
};
#define U6A_VM_POOL_ELEM_HOLDS_PTR ( 1 << 0 )
struct u6a_vm_pool {
uint32_t pos;
struct u6a_vm_pool_elem elems[];
};
struct u6a_vm_pool_elem_ptrs {
uint32_t pos;
struct u6a_vm_pool_elem* elems[];
};
struct u6a_vm_pool_ctx {
struct u6a_vm_pool* active_pool;
struct u6a_vm_pool_elem_ptrs* holes;
struct u6a_vm_pool_elem** fstack;
struct u6a_vm_stack_ctx* stack_ctx;
uint32_t pool_len;
uint32_t fstack_top;
const char* err_stage;
};
static inline void
free_stack_push(struct u6a_vm_pool_ctx* ctx, struct u6a_vm_var_fn fn) {
if (fn.token.fn & U6A_VM_FN_REF) {
ctx->fstack[++ctx->fstack_top] = ctx->active_pool->elems + fn.ref;
}
}
static inline struct u6a_vm_pool_elem*
free_stack_pop(struct u6a_vm_pool_ctx* ctx) {
if (ctx->fstack_top == UINT32_MAX) {
return NULL;
}
return ctx->fstack[ctx->fstack_top--];
}
static inline struct u6a_vm_pool_elem*
vm_pool_elem_alloc(struct u6a_vm_pool_ctx* ctx) {
struct u6a_vm_pool* pool = ctx->active_pool;
struct u6a_vm_pool_elem_ptrs* holes = ctx->holes;
struct u6a_vm_pool_elem* new_elem;
if (ctx->holes->pos == UINT32_MAX) {
if (UNLIKELY(++pool->pos == ctx->pool_len)) {
u6a_err_vm_pool_oom(ctx->err_stage);
return NULL;
}
new_elem = pool->elems + pool->pos;
} else {
new_elem = holes->elems[holes->pos--];
}
new_elem->refcnt = 1;
return new_elem;
}
static inline struct u6a_vm_pool_elem*
vm_pool_elem_dup(struct u6a_vm_pool_ctx* ctx, struct u6a_vm_pool_elem* elem) {
struct u6a_vm_pool_elem* new_elem = vm_pool_elem_alloc(ctx);
if (UNLIKELY(new_elem == NULL)) {
return NULL;
}
*new_elem = *elem;
return new_elem;
}
bool
u6a_vm_pool_init(uint32_t pool_len, uint32_t ins_len, const char* err_stage);
u6a_vm_pool_init(struct u6a_vm_pool_ctx* ctx, uint32_t pool_len, uint32_t ins_len, const char* err_stage);
uint32_t
u6a_vm_pool_alloc1(struct u6a_vm_var_fn v1);
static inline uint32_t
u6a_vm_pool_alloc1(struct u6a_vm_pool_ctx* ctx, struct u6a_vm_var_fn v1) {
struct u6a_vm_pool_elem* elem = vm_pool_elem_alloc(ctx);
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.fn = v1, .v2.ptr = NULL };
elem->flags = 0;
return elem - ctx->active_pool->elems;
}
uint32_t
u6a_vm_pool_alloc2(struct u6a_vm_var_fn v1, struct u6a_vm_var_fn v2);
static inline uint32_t
u6a_vm_pool_alloc2(struct u6a_vm_pool_ctx* ctx, struct u6a_vm_var_fn v1, struct u6a_vm_var_fn v2) {
struct u6a_vm_pool_elem* elem = vm_pool_elem_alloc(ctx);
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.fn = v1, .v2.fn = v2 };
elem->flags = 0;
return elem - ctx->active_pool->elems;
}
uint32_t
u6a_vm_pool_alloc2_ptr(void* v1, void* v2);
static inline uint32_t
u6a_vm_pool_alloc2_ptr(struct u6a_vm_pool_ctx* ctx, void* v1, void* v2) {
struct u6a_vm_pool_elem* elem = vm_pool_elem_alloc(ctx);
if (UNLIKELY(elem == NULL)) {
return UINT32_MAX;
}
elem->values = (struct u6a_vm_var_tuple) { .v1.ptr = v1, .v2.ptr = v2 };
elem->flags = U6A_VM_POOL_ELEM_HOLDS_PTR;
return elem - ctx->active_pool->elems;
}
union u6a_vm_var
u6a_vm_pool_get1(uint32_t offset);
static inline union u6a_vm_var
u6a_vm_pool_get1(struct u6a_vm_pool* pool, uint32_t offset) {
return pool->elems[offset].values.v1;
}
struct u6a_vm_var_tuple
u6a_vm_pool_get2(uint32_t offset);
static inline struct u6a_vm_var_tuple
u6a_vm_pool_get2(struct u6a_vm_pool* pool, uint32_t offset) {
return pool->elems[offset].values;
}
struct u6a_vm_var_tuple
u6a_vm_pool_get2_separate(uint32_t offset);
static inline struct u6a_vm_var_tuple
u6a_vm_pool_get2_separate(struct u6a_vm_pool_ctx* ctx, uint32_t offset) {
struct u6a_vm_pool_elem* elem = ctx->active_pool->elems + offset;
struct u6a_vm_var_tuple values = elem->values;
if (elem->refcnt > 1) {
// Continuation having more than 1 reference should be separated before reinstatement
values.v1.ptr = u6a_vm_stack_dup(ctx->stack_ctx, values.v1.ptr);
}
return values;
}
void
u6a_vm_pool_addref(uint32_t offset);
static inline void
u6a_vm_pool_addref(struct u6a_vm_pool* pool, uint32_t offset) {
++pool->elems[offset].refcnt;
}
void
u6a_vm_pool_free(uint32_t offset);
static inline void
u6a_vm_pool_free(struct u6a_vm_pool_ctx* ctx, uint32_t offset) {
struct u6a_vm_pool_elem* elem = ctx->active_pool->elems + offset;
struct u6a_vm_pool_elem_ptrs* holes = ctx->holes;
ctx->fstack_top = UINT32_MAX;
do {
if (--elem->refcnt == 0) {
holes->elems[++holes->pos] = elem;
if (elem->flags & U6A_VM_POOL_ELEM_HOLDS_PTR) {
// Continuation destroyed before used
u6a_vm_stack_discard(ctx->stack_ctx, elem->values.v1.ptr);
} else {
free_stack_push(ctx, elem->values.v2.fn);
free_stack_push(ctx, elem->values.v1.fn);
}
}
} while ((elem = free_stack_pop(ctx)));
}
void
u6a_vm_pool_destroy();
u6a_vm_pool_destroy(struct u6a_vm_pool_ctx* ctx);
#endif

249
src/vm_stack.c

@ -25,24 +25,12 @@
#include <stdlib.h>
#include <string.h>
struct vm_stack {
struct vm_stack* prev;
uint32_t top;
uint32_t refcnt;
struct u6a_vm_var_fn elems[];
};
static struct vm_stack* active_stack;
static uint32_t stack_seg_len;
const char* err_stage;
static inline struct vm_stack*
vm_stack_create(struct vm_stack* prev, uint32_t top) {
const uint32_t size = sizeof(struct vm_stack) + stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct vm_stack* vs = malloc(size);
static inline struct u6a_vm_stack*
vm_stack_create(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* prev, uint32_t top) {
const uint32_t size = sizeof(struct u6a_vm_stack) + ctx->stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct u6a_vm_stack* vs = malloc(size);
if (UNLIKELY(vs == NULL)) {
u6a_err_bad_alloc(err_stage, size);
u6a_err_bad_alloc(ctx->err_stage, size);
return NULL;
}
vs->prev = prev;
@ -51,20 +39,20 @@ vm_stack_create(struct vm_stack* prev, uint32_t top) {
return vs;
}
static inline struct vm_stack*
vm_stack_dup(struct vm_stack* vs) {
const uint32_t size = sizeof(struct vm_stack) + stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct vm_stack* dup_stack = malloc(size);
static inline struct u6a_vm_stack*
vm_stack_dup(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
const uint32_t size = sizeof(struct u6a_vm_stack) + ctx->stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct u6a_vm_stack* dup_stack = malloc(size);
if (UNLIKELY(dup_stack == NULL)) {
u6a_err_bad_alloc(err_stage, size);
u6a_err_bad_alloc(ctx->err_stage, size);
return NULL;
}
memcpy(dup_stack, vs, sizeof(struct vm_stack) + (vs->top + 1) * sizeof(struct u6a_vm_var_fn));
memcpy(dup_stack, vs, sizeof(struct u6a_vm_stack) + (vs->top + 1) * sizeof(struct u6a_vm_var_fn));
dup_stack->refcnt = 0;
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
struct u6a_vm_var_fn elem = vs->elems[idx];
if (elem.token.fn & U6A_VM_FN_REF) {
u6a_vm_pool_addref(elem.ref);
u6a_vm_pool_addref(ctx->pool_ctx->active_pool, elem.ref);
}
}
if (vs->prev) {
@ -74,8 +62,8 @@ vm_stack_dup(struct vm_stack* vs) {
}
static inline void
vm_stack_free(struct vm_stack* vs) {
struct vm_stack* prev;
vm_stack_free(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
struct u6a_vm_stack* prev;
vs->refcnt = 1;
do {
prev = vs->prev;
@ -83,7 +71,7 @@ vm_stack_free(struct vm_stack* vs) {
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
struct u6a_vm_var_fn elem = vs->elems[idx];
if (elem.token.fn & U6A_VM_FN_REF) {
u6a_vm_pool_free(elem.ref);
u6a_vm_pool_free(ctx->pool_ctx, elem.ref);
}
}
free(vs);
@ -95,190 +83,139 @@ vm_stack_free(struct vm_stack* vs) {
}
bool
u6a_vm_stack_init(uint32_t stack_seg_len_, const char* err_stage_) {
stack_seg_len = stack_seg_len_;
err_stage = err_stage_;
active_stack = vm_stack_create(NULL, UINT32_MAX);
return active_stack != NULL;
}
U6A_HOT struct u6a_vm_var_fn
u6a_vm_stack_top() {
struct vm_stack* vs = active_stack;
if (UNLIKELY(vs->top == UINT32_MAX)) {
vs = vs->prev;
if (UNLIKELY(vs == NULL)) {
return U6A_VM_VAR_FN_EMPTY;
}
active_stack = vs;
}
return vs->elems[vs->top];
u6a_vm_stack_init(struct u6a_vm_stack_ctx* ctx, uint32_t stack_seg_len, const char* err_stage) {
ctx->stack_seg_len = stack_seg_len;
ctx->err_stage = err_stage;
ctx->active_stack = vm_stack_create(ctx, NULL, UINT32_MAX);
return ctx->active_stack != NULL;
}
// Boilerplates below. If only we have C++ templates here... (macros just make things nastier)
U6A_HOT bool
u6a_vm_stack_push1(struct u6a_vm_var_fn v0) {
struct vm_stack* vs = active_stack;
if (LIKELY(vs->top + 1 < stack_seg_len)) {
vs->elems[++vs->top] = v0;
return true;
}
active_stack = vm_stack_create(vs, 0);
if (UNLIKELY(active_stack == NULL)) {
active_stack = vs;
u6a_vm_stack_push1_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 0);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
return false;
}
++vs->refcnt;
active_stack->elems[0] = v0;
ctx->active_stack->elems[0] = v0;
return true;
}
U6A_HOT bool
u6a_vm_stack_push2(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1) {
struct vm_stack* vs = active_stack;
if (LIKELY(vs->top + 2 < stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
return true;
}
active_stack = vm_stack_create(vs, 1);
if (UNLIKELY(active_stack == NULL)) {
active_stack = vs;
u6a_vm_stack_push2_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 1);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
return false;
}
++vs->refcnt;
active_stack->elems[0] = v0;
active_stack->elems[1] = v1;
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
return true;
}
U6A_HOT bool
u6a_vm_stack_push3(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1, struct u6a_vm_var_fn v2) {
struct vm_stack* vs = active_stack;
if (LIKELY(vs->top + 3 < stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
vs->elems[++vs->top] = v2;
return true;
}
active_stack = vm_stack_create(vs, 2);
if (UNLIKELY(active_stack == NULL)) {
active_stack = vs;
u6a_vm_stack_push3_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2)
{
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 2);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
return false;
}
++vs->refcnt;
active_stack->elems[0] = v0;
active_stack->elems[1] = v1;
active_stack->elems[2] = v2;
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
ctx->active_stack->elems[2] = v2;
return true;
}
U6A_HOT bool
u6a_vm_stack_push4(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3) {
struct vm_stack* vs = active_stack;
if (LIKELY(vs->top + 4 < stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
vs->elems[++vs->top] = v2;
vs->elems[++vs->top] = v3;
return true;
}
active_stack = vm_stack_create(vs, 3);
if (UNLIKELY(active_stack == NULL)) {
active_stack = vs;
u6a_vm_stack_push4_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3)
{
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 3);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
return false;
}
++vs->refcnt;
active_stack->elems[0] = v0;
active_stack->elems[1] = v1;
active_stack->elems[2] = v2;
active_stack->elems[3] = v3;
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
ctx->active_stack->elems[2] = v2;
ctx->active_stack->elems[3] = v3;
return true;
}
U6A_HOT bool
u6a_vm_stack_pop() {
struct vm_stack* vs = active_stack;
if (LIKELY(vs->top-- != UINT32_MAX)) {
return true;
}
active_stack = vs->prev;
if (UNLIKELY(active_stack == NULL)) {
u6a_err_stack_underflow(err_stage);
active_stack = vs;
u6a_vm_stack_pop_split_(struct u6a_vm_stack_ctx* ctx) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vs->prev;
if (UNLIKELY(ctx->active_stack == NULL)) {
u6a_err_stack_underflow(ctx->err_stage);
ctx->active_stack = vs;
return false;
}
if (--active_stack->refcnt > 0) {
active_stack = vm_stack_dup(active_stack);
if (--ctx->active_stack->refcnt > 0) {
ctx->active_stack = vm_stack_dup(ctx, ctx->active_stack);
}
if (UNLIKELY(active_stack == NULL)) {
active_stack = vs;
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
return false;
}
free(vs);
--active_stack->top;
--ctx->active_stack->top;
return true;
}
struct u6a_vm_var_fn
u6a_vm_stack_xch(struct u6a_vm_var_fn v0) {
struct vm_stack* vs = active_stack;
U6A_HOT struct u6a_vm_var_fn
u6a_vm_stack_xch_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
struct u6a_vm_var_fn elem;
// XCH on segmented stacks is inefficient, perhaps there's a better solution?
if (LIKELY(vs->top != 0 && vs->top != UINT32_MAX)) {
elem = vs->elems[vs->top - 1];
vs->elems[vs->top - 1] = v0;
} else {
struct vm_stack* prev = vs->prev;
struct u6a_vm_stack* prev = vs->prev;
if (UNLIKELY(prev == NULL)) {
u6a_err_stack_underflow(ctx->err_stage);
return U6A_VM_VAR_FN_EMPTY;
}
if (--prev->refcnt > 0) {
prev = vm_stack_dup(ctx, prev);
if (UNLIKELY(prev == NULL)) {
u6a_err_stack_underflow(err_stage);
return U6A_VM_VAR_FN_EMPTY;
}
if (--prev->refcnt > 0) {
prev = vm_stack_dup(prev);
if (UNLIKELY(prev == NULL)) {
return U6A_VM_VAR_FN_EMPTY;
}
}
if (vs->top == 0) {
++prev->refcnt;
vs->prev = prev;
elem = prev->elems[prev->top];
prev->elems[prev->top] = v0;
} else {
free(vs);
active_stack = prev;
elem = prev->elems[prev->top - 1];
prev->elems[prev->top - 1] = v0;
}
}
if (vs->top == 0) {
++prev->refcnt;
vs->prev = prev;
elem = prev->elems[prev->top];
prev->elems[prev->top] = v0;
} else {
free(vs);
ctx->active_stack = prev;
elem = prev->elems[prev->top - 1];
prev->elems[prev->top - 1] = v0;
}
return elem;
}
void*
u6a_vm_stack_save() {
return vm_stack_dup(active_stack);
}
void*
u6a_vm_stack_dup(void* ptr) {
return vm_stack_dup(ptr);
}
void
u6a_vm_stack_resume(void* ptr) {
u6a_vm_stack_destroy();
active_stack = ptr;
struct u6a_vm_stack*
u6a_vm_stack_dup(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
return vm_stack_dup(ctx, vs);
}
void
u6a_vm_stack_discard(void* ptr) {
vm_stack_free(ptr);
u6a_vm_stack_discard(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
vm_stack_free(ctx, vs);
}
void
u6a_vm_stack_destroy() {
vm_stack_free(active_stack);
u6a_vm_stack_destroy(struct u6a_vm_stack_ctx* ctx) {
vm_stack_free(ctx, ctx->active_stack);
}

147
src/vm_stack.h

@ -26,46 +26,147 @@
#include <stdint.h>
#include <stdbool.h>
bool
u6a_vm_stack_init(uint32_t stack_seg_len, const char* err_stage);
struct u6a_vm_var_fn
u6a_vm_stack_top();
struct u6a_vm_stack {
struct u6a_vm_stack* prev;
uint32_t top;
uint32_t refcnt;
struct u6a_vm_var_fn elems[];
};
struct u6a_vm_stack_ctx {
struct u6a_vm_stack* active_stack;
uint32_t stack_seg_len;
struct u6a_vm_pool_ctx* pool_ctx;
const char* err_stage;
};
bool
u6a_vm_stack_push1(struct u6a_vm_var_fn v0);
u6a_vm_stack_init(struct u6a_vm_stack_ctx* ctx, uint32_t stack_seg_len, const char* err_stage);
static inline struct u6a_vm_var_fn
u6a_vm_stack_top(struct u6a_vm_stack_ctx* ctx) {
struct u6a_vm_stack* vs = ctx->active_stack;
if (UNLIKELY(vs->top == UINT32_MAX)) {
vs = vs->prev;
if (UNLIKELY(vs == NULL)) {
return U6A_VM_VAR_FN_EMPTY;
}
ctx->active_stack = vs;
}
return vs->elems[vs->top];
}
bool
u6a_vm_stack_push2(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1);
// Functions push3 and push4 are made for the s2 function to alleviate overhead caused by hot split
u6a_vm_stack_push1_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0);
static inline bool
u6a_vm_stack_push1(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
if (LIKELY(vs->top + 1 < ctx->stack_seg_len)) {
vs->elems[++vs->top] = v0;
return true;
}
return u6a_vm_stack_push1_split_(ctx, v0);
}
bool
u6a_vm_stack_push3(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1, struct u6a_vm_var_fn v2);
u6a_vm_stack_push2_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1);
static inline bool
u6a_vm_stack_push2(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1) {
struct u6a_vm_stack* vs = ctx->active_stack;
if (LIKELY(vs->top + 2 < ctx->stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
return true;
}
return u6a_vm_stack_push2_split_(ctx, v0, v1);
}
bool
u6a_vm_stack_push4(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3);
u6a_vm_stack_push3_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2);
static inline bool
u6a_vm_stack_push3(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2)
{
struct u6a_vm_stack* vs = ctx->active_stack;
if (LIKELY(vs->top + 3 < ctx->stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
vs->elems[++vs->top] = v2;
return true;
}
return u6a_vm_stack_push3_split_(ctx, v0, v1, v2);
}
bool
u6a_vm_stack_pop();
u6a_vm_stack_push4_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3);
static inline bool
u6a_vm_stack_push4(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3)
{
struct u6a_vm_stack* vs = ctx->active_stack;
if (LIKELY(vs->top + 4 < ctx->stack_seg_len)) {
vs->elems[++vs->top] = v0;
vs->elems[++vs->top] = v1;
vs->elems[++vs->top] = v2;
vs->elems[++vs->top] = v3;
return true;
}
return u6a_vm_stack_push4_split_(ctx, v0, v1, v2, v3);
}
struct u6a_vm_var_fn
u6a_vm_stack_xch(struct u6a_vm_var_fn v1);
bool
u6a_vm_stack_pop_split_(struct u6a_vm_stack_ctx* ctx);
void*
u6a_vm_stack_save();
static inline bool
u6a_vm_stack_pop(struct u6a_vm_stack_ctx* ctx) {
struct u6a_vm_stack* vs = ctx->active_stack;
if (LIKELY(vs->top-- != UINT32_MAX)) {
return true;
}
return u6a_vm_stack_pop_split_(ctx);
}
void*
u6a_vm_stack_dup(void* ptr);
struct u6a_vm_var_fn
u6a_vm_stack_xch_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0);
static inline struct u6a_vm_var_fn
u6a_vm_stack_xch(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
struct u6a_vm_var_fn elem;
// XCH on segmented stacks is inefficient, perhaps there's a better solution?
if (LIKELY(vs->top != 0 && vs->top != UINT32_MAX)) {
elem = vs->elems[vs->top - 1];
vs->elems[vs->top - 1] = v0;
} else {
elem = u6a_vm_stack_xch_split_(ctx, v0);
}
return elem;
}
struct u6a_vm_stack*
u6a_vm_stack_dup(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs);
static inline struct u6a_vm_stack*
u6a_vm_stack_save(struct u6a_vm_stack_ctx* ctx) {
return u6a_vm_stack_dup(ctx, ctx->active_stack);
}
void
u6a_vm_stack_resume(void* ptr);
u6a_vm_stack_discard(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs);
void
u6a_vm_stack_discard(void* ptr);
u6a_vm_stack_destroy(struct u6a_vm_stack_ctx* ctx);
void
u6a_vm_stack_destroy();
static inline void
u6a_vm_stack_resume(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
u6a_vm_stack_destroy(ctx);
ctx->active_stack = vs;
}
#endif

Loading…
Cancel
Save