u6a/src/vm_stack.c

222 lines
6.8 KiB
C
Raw Normal View History

2020-01-30 10:11:10 +00:00
/*
* vm_stack.c - Unlambda VM segmented stacks
*
* Copyright (C) 2020 CismonX <admin@cismon.net>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "vm_stack.h"
2020-02-04 18:47:45 +00:00
#include "vm_pool.h"
2020-02-05 16:59:11 +00:00
#include "logging.h"
2020-01-30 10:11:10 +00:00
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
2020-06-15 12:22:50 +00:00
static inline struct u6a_vm_stack*
vm_stack_create(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* prev, uint32_t top) {
const uint32_t size = sizeof(struct u6a_vm_stack) + ctx->stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct u6a_vm_stack* vs = malloc(size);
2020-01-30 10:11:10 +00:00
if (UNLIKELY(vs == NULL)) {
2020-06-15 12:22:50 +00:00
u6a_err_bad_alloc(ctx->err_stage, size);
2020-01-30 10:11:10 +00:00
return NULL;
}
2020-02-02 17:09:21 +00:00
vs->prev = prev;
vs->top = top;
2020-05-16 18:48:04 +00:00
vs->refcnt = 0;
2020-02-02 17:09:21 +00:00
return vs;
2020-01-30 10:11:10 +00:00
}
2020-06-15 12:22:50 +00:00
static inline struct u6a_vm_stack*
vm_stack_dup(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
const uint32_t size = sizeof(struct u6a_vm_stack) + ctx->stack_seg_len * sizeof(struct u6a_vm_var_fn);
struct u6a_vm_stack* dup_stack = malloc(size);
2020-01-30 10:11:10 +00:00
if (UNLIKELY(dup_stack == NULL)) {
2020-06-15 12:22:50 +00:00
u6a_err_bad_alloc(ctx->err_stage, size);
2020-01-30 10:11:10 +00:00
return NULL;
}
2020-06-15 12:22:50 +00:00
memcpy(dup_stack, vs, sizeof(struct u6a_vm_stack) + (vs->top + 1) * sizeof(struct u6a_vm_var_fn));
2020-05-16 18:48:04 +00:00
dup_stack->refcnt = 0;
2020-02-04 18:47:45 +00:00
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
struct u6a_vm_var_fn elem = vs->elems[idx];
if (elem.token.fn & U6A_VM_FN_REF) {
2020-06-15 12:22:50 +00:00
u6a_vm_pool_addref(ctx->pool_ctx->active_pool, elem.ref);
2020-02-04 18:47:45 +00:00
}
}
if (vs->prev) {
++vs->prev->refcnt;
}
2020-01-30 10:11:10 +00:00
return dup_stack;
}
static inline void
2020-06-15 12:22:50 +00:00
vm_stack_free(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
struct u6a_vm_stack* prev;
2020-05-18 17:44:55 +00:00
vs->refcnt = 1;
2020-01-30 10:11:10 +00:00
do {
prev = vs->prev;
if (--vs->refcnt == 0) {
2020-02-04 18:47:45 +00:00
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
struct u6a_vm_var_fn elem = vs->elems[idx];
if (elem.token.fn & U6A_VM_FN_REF) {
2020-06-15 12:22:50 +00:00
u6a_vm_pool_free(ctx->pool_ctx, elem.ref);
2020-02-04 18:47:45 +00:00
}
}
2020-01-30 10:11:10 +00:00
free(vs);
2020-02-04 18:47:45 +00:00
vs = prev;
2020-01-30 10:11:10 +00:00
} else {
break;
}
2020-02-04 18:47:45 +00:00
} while (vs);
2020-01-30 10:11:10 +00:00
}
bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_init(struct u6a_vm_stack_ctx* ctx, uint32_t stack_seg_len, const char* err_stage) {
ctx->stack_seg_len = stack_seg_len;
ctx->err_stage = err_stage;
ctx->active_stack = vm_stack_create(ctx, NULL, UINT32_MAX);
return ctx->active_stack != NULL;
2020-01-30 10:11:10 +00:00
}
// Boilerplates below. If only we have C++ templates here... (macros just make things nastier)
U6A_HOT bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_push1_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 0);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
}
++vs->refcnt;
2020-06-15 12:22:50 +00:00
ctx->active_stack->elems[0] = v0;
2020-01-30 10:11:10 +00:00
return true;
}
U6A_HOT bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_push2_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 1);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
}
++vs->refcnt;
2020-06-15 12:22:50 +00:00
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
2020-01-30 10:11:10 +00:00
return true;
}
U6A_HOT bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_push3_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2)
{
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 2);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
}
++vs->refcnt;
2020-06-15 12:22:50 +00:00
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
ctx->active_stack->elems[2] = v2;
2020-01-30 10:11:10 +00:00
return true;
}
U6A_HOT bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_push4_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1,
struct u6a_vm_var_fn v2, struct u6a_vm_var_fn v3)
{
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vm_stack_create(ctx, vs, 3);
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
}
++vs->refcnt;
2020-06-15 12:22:50 +00:00
ctx->active_stack->elems[0] = v0;
ctx->active_stack->elems[1] = v1;
ctx->active_stack->elems[2] = v2;
ctx->active_stack->elems[3] = v3;
2020-01-30 10:11:10 +00:00
return true;
}
U6A_HOT bool
2020-06-15 12:22:50 +00:00
u6a_vm_stack_pop_split_(struct u6a_vm_stack_ctx* ctx) {
struct u6a_vm_stack* vs = ctx->active_stack;
ctx->active_stack = vs->prev;
if (UNLIKELY(ctx->active_stack == NULL)) {
u6a_err_stack_underflow(ctx->err_stage);
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
}
2020-06-15 12:22:50 +00:00
if (--ctx->active_stack->refcnt > 0) {
ctx->active_stack = vm_stack_dup(ctx, ctx->active_stack);
2020-01-30 10:11:10 +00:00
}
2020-06-15 12:22:50 +00:00
if (UNLIKELY(ctx->active_stack == NULL)) {
ctx->active_stack = vs;
2020-01-30 10:11:10 +00:00
return false;
2020-05-16 19:23:47 +00:00
}
2020-01-30 10:11:10 +00:00
free(vs);
2020-06-15 12:22:50 +00:00
--ctx->active_stack->top;
2020-01-30 10:11:10 +00:00
return true;
}
2020-06-15 12:22:50 +00:00
U6A_HOT struct u6a_vm_var_fn
u6a_vm_stack_xch_split_(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_var_fn v0) {
struct u6a_vm_stack* vs = ctx->active_stack;
2020-05-16 19:23:47 +00:00
struct u6a_vm_var_fn elem;
// XCH on segmented stacks is inefficient, perhaps there's a better solution?
2020-06-15 12:22:50 +00:00
struct u6a_vm_stack* prev = vs->prev;
if (UNLIKELY(prev == NULL)) {
u6a_err_stack_underflow(ctx->err_stage);
return U6A_VM_VAR_FN_EMPTY;
}
if (--prev->refcnt > 0) {
prev = vm_stack_dup(ctx, prev);
2020-05-16 19:23:47 +00:00
if (UNLIKELY(prev == NULL)) {
2020-06-05 08:54:30 +00:00
return U6A_VM_VAR_FN_EMPTY;
2020-05-16 18:48:04 +00:00
}
2020-06-15 12:22:50 +00:00
}
if (vs->top == 0) {
++prev->refcnt;
vs->prev = prev;
elem = prev->elems[prev->top];
prev->elems[prev->top] = v0;
} else {
free(vs);
ctx->active_stack = prev;
elem = prev->elems[prev->top - 1];
prev->elems[prev->top - 1] = v0;
2020-05-16 18:48:04 +00:00
}
2020-02-08 16:51:31 +00:00
return elem;
2020-01-30 10:11:10 +00:00
}
2020-06-15 12:22:50 +00:00
struct u6a_vm_stack*
u6a_vm_stack_dup(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
return vm_stack_dup(ctx, vs);
2020-01-30 10:11:10 +00:00
}
void
2020-06-15 12:22:50 +00:00
u6a_vm_stack_discard(struct u6a_vm_stack_ctx* ctx, struct u6a_vm_stack* vs) {
vm_stack_free(ctx, vs);
2020-01-30 10:11:10 +00:00
}
void
2020-06-15 12:22:50 +00:00
u6a_vm_stack_destroy(struct u6a_vm_stack_ctx* ctx) {
vm_stack_free(ctx, ctx->active_stack);
2020-01-30 10:11:10 +00:00
}