2020-01-30 10:11:10 +00:00
|
|
|
/*
|
|
|
|
* vm_stack.c - Unlambda VM segmented stacks
|
|
|
|
*
|
|
|
|
* Copyright (C) 2020 CismonX <admin@cismon.net>
|
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "vm_stack.h"
|
2020-02-04 18:47:45 +00:00
|
|
|
#include "vm_pool.h"
|
2020-02-05 16:59:11 +00:00
|
|
|
#include "logging.h"
|
2020-01-30 10:11:10 +00:00
|
|
|
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
struct vm_stack {
|
|
|
|
struct vm_stack* prev;
|
|
|
|
uint32_t top;
|
|
|
|
uint32_t refcnt;
|
|
|
|
struct u6a_vm_var_fn elems[];
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct vm_stack* active_stack;
|
|
|
|
static uint32_t stack_seg_len;
|
|
|
|
|
2020-02-05 16:59:11 +00:00
|
|
|
const char* err_stage;
|
|
|
|
|
2020-01-30 10:11:10 +00:00
|
|
|
static inline struct vm_stack*
|
|
|
|
vm_stack_create(struct vm_stack* prev, uint32_t top) {
|
2020-02-02 17:09:21 +00:00
|
|
|
const uint32_t size = sizeof(struct vm_stack) + stack_seg_len * sizeof(struct u6a_vm_var_fn);
|
2020-01-30 10:11:10 +00:00
|
|
|
struct vm_stack* vs = malloc(size);
|
|
|
|
if (UNLIKELY(vs == NULL)) {
|
2020-02-05 16:59:11 +00:00
|
|
|
u6a_err_bad_alloc(err_stage, size);
|
2020-01-30 10:11:10 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-02-02 17:09:21 +00:00
|
|
|
vs->prev = prev;
|
|
|
|
vs->top = top;
|
2020-05-16 18:48:04 +00:00
|
|
|
vs->refcnt = 0;
|
2020-02-02 17:09:21 +00:00
|
|
|
return vs;
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct vm_stack*
|
2020-02-04 18:47:45 +00:00
|
|
|
vm_stack_dup(struct vm_stack* vs) {
|
2020-02-02 17:09:21 +00:00
|
|
|
const uint32_t size = sizeof(struct vm_stack) + stack_seg_len * sizeof(struct u6a_vm_var_fn);
|
2020-01-30 10:11:10 +00:00
|
|
|
struct vm_stack* dup_stack = malloc(size);
|
|
|
|
if (UNLIKELY(dup_stack == NULL)) {
|
2020-02-05 16:59:11 +00:00
|
|
|
u6a_err_bad_alloc(err_stage, size);
|
2020-01-30 10:11:10 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-02-02 17:09:21 +00:00
|
|
|
memcpy(dup_stack, vs, sizeof(struct vm_stack) + (vs->top + 1) * sizeof(struct u6a_vm_var_fn));
|
2020-05-16 18:48:04 +00:00
|
|
|
dup_stack->refcnt = 0;
|
2020-02-04 18:47:45 +00:00
|
|
|
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
|
|
|
|
struct u6a_vm_var_fn elem = vs->elems[idx];
|
|
|
|
if (elem.token.fn & U6A_VM_FN_REF) {
|
|
|
|
u6a_vm_pool_addref(elem.ref);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (vs->prev) {
|
|
|
|
++vs->prev->refcnt;
|
|
|
|
}
|
2020-01-30 10:11:10 +00:00
|
|
|
return dup_stack;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
vm_stack_free(struct vm_stack* vs) {
|
|
|
|
struct vm_stack* prev;
|
2020-05-18 17:44:55 +00:00
|
|
|
vs->refcnt = 1;
|
2020-01-30 10:11:10 +00:00
|
|
|
do {
|
|
|
|
prev = vs->prev;
|
|
|
|
if (--vs->refcnt == 0) {
|
2020-02-04 18:47:45 +00:00
|
|
|
for (uint32_t idx = vs->top; idx < UINT32_MAX; --idx) {
|
|
|
|
struct u6a_vm_var_fn elem = vs->elems[idx];
|
|
|
|
if (elem.token.fn & U6A_VM_FN_REF) {
|
|
|
|
u6a_vm_pool_free(elem.ref);
|
|
|
|
}
|
|
|
|
}
|
2020-01-30 10:11:10 +00:00
|
|
|
free(vs);
|
2020-02-04 18:47:45 +00:00
|
|
|
vs = prev;
|
2020-01-30 10:11:10 +00:00
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2020-02-04 18:47:45 +00:00
|
|
|
} while (vs);
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2020-02-05 16:59:11 +00:00
|
|
|
u6a_vm_stack_init(uint32_t stack_seg_len_, const char* err_stage_) {
|
2020-01-30 10:11:10 +00:00
|
|
|
stack_seg_len = stack_seg_len_;
|
2020-02-05 16:59:11 +00:00
|
|
|
err_stage = err_stage_;
|
2020-02-08 16:51:31 +00:00
|
|
|
active_stack = vm_stack_create(NULL, UINT32_MAX);
|
2020-01-30 10:11:10 +00:00
|
|
|
return active_stack != NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
U6A_HOT struct u6a_vm_var_fn
|
|
|
|
u6a_vm_stack_top() {
|
|
|
|
struct vm_stack* vs = active_stack;
|
|
|
|
if (UNLIKELY(vs->top == UINT32_MAX)) {
|
|
|
|
vs = vs->prev;
|
|
|
|
if (UNLIKELY(vs == NULL)) {
|
2020-06-05 08:54:30 +00:00
|
|
|
return U6A_VM_VAR_FN_EMPTY;
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
active_stack = vs;
|
|
|
|
}
|
|
|
|
return vs->elems[vs->top];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Boilerplates below. If only we have C++ templates here... (macros just make things nastier)
|
|
|
|
|
|
|
|
U6A_HOT bool
|
|
|
|
u6a_vm_stack_push1(struct u6a_vm_var_fn v0) {
|
|
|
|
struct vm_stack* vs = active_stack;
|
|
|
|
if (LIKELY(vs->top + 1 < stack_seg_len)) {
|
|
|
|
vs->elems[++vs->top] = v0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
active_stack = vm_stack_create(vs, 0);
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++vs->refcnt;
|
|
|
|
active_stack->elems[0] = v0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
U6A_HOT bool
|
|
|
|
u6a_vm_stack_push2(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1) {
|
|
|
|
struct vm_stack* vs = active_stack;
|
|
|
|
if (LIKELY(vs->top + 2 < stack_seg_len)) {
|
|
|
|
vs->elems[++vs->top] = v0;
|
|
|
|
vs->elems[++vs->top] = v1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
active_stack = vm_stack_create(vs, 1);
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++vs->refcnt;
|
|
|
|
active_stack->elems[0] = v0;
|
|
|
|
active_stack->elems[1] = v1;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
U6A_HOT bool
|
|
|
|
u6a_vm_stack_push3(struct u6a_vm_var_fn v0, struct u6a_vm_var_tuple v12) {
|
|
|
|
struct vm_stack* vs = active_stack;
|
|
|
|
if (LIKELY(vs->top + 3 < stack_seg_len)) {
|
|
|
|
vs->elems[++vs->top] = v0;
|
|
|
|
vs->elems[++vs->top] = v12.v2.fn;
|
|
|
|
vs->elems[++vs->top] = v12.v1.fn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
active_stack = vm_stack_create(vs, 2);
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++vs->refcnt;
|
|
|
|
active_stack->elems[0] = v0;
|
|
|
|
active_stack->elems[1] = v12.v2.fn;
|
|
|
|
active_stack->elems[2] = v12.v1.fn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
U6A_HOT bool
|
|
|
|
u6a_vm_stack_push4(struct u6a_vm_var_fn v0, struct u6a_vm_var_fn v1, struct u6a_vm_var_tuple v23) {
|
|
|
|
struct vm_stack* vs = active_stack;
|
|
|
|
if (LIKELY(vs->top + 4 < stack_seg_len)) {
|
|
|
|
vs->elems[++vs->top] = v0;
|
|
|
|
vs->elems[++vs->top] = v1;
|
|
|
|
vs->elems[++vs->top] = v23.v2.fn;
|
|
|
|
vs->elems[++vs->top] = v23.v1.fn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
active_stack = vm_stack_create(vs, 3);
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
++vs->refcnt;
|
|
|
|
active_stack->elems[0] = v0;
|
|
|
|
active_stack->elems[1] = v1;
|
|
|
|
active_stack->elems[2] = v23.v2.fn;
|
|
|
|
active_stack->elems[3] = v23.v1.fn;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
U6A_HOT bool
|
|
|
|
u6a_vm_stack_pop() {
|
|
|
|
struct vm_stack* vs = active_stack;
|
2020-05-16 18:48:04 +00:00
|
|
|
if (LIKELY(vs->top-- != UINT32_MAX)) {
|
2020-01-30 10:11:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
active_stack = vs->prev;
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
2020-02-05 16:59:11 +00:00
|
|
|
u6a_err_stack_underflow(err_stage);
|
2020-01-30 10:11:10 +00:00
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
|
|
|
}
|
2020-04-21 03:09:17 +00:00
|
|
|
if (--active_stack->refcnt > 0) {
|
2020-02-04 18:47:45 +00:00
|
|
|
active_stack = vm_stack_dup(active_stack);
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
if (UNLIKELY(active_stack == NULL)) {
|
|
|
|
active_stack = vs;
|
|
|
|
return false;
|
2020-05-16 19:23:47 +00:00
|
|
|
}
|
2020-01-30 10:11:10 +00:00
|
|
|
free(vs);
|
2020-04-21 03:09:17 +00:00
|
|
|
--active_stack->top;
|
2020-01-30 10:11:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct u6a_vm_var_fn
|
|
|
|
u6a_vm_stack_xch(struct u6a_vm_var_fn v0) {
|
|
|
|
struct vm_stack* vs = active_stack;
|
2020-05-16 19:23:47 +00:00
|
|
|
struct u6a_vm_var_fn elem;
|
|
|
|
// XCH on segmented stacks is inefficient, perhaps there's a better solution?
|
|
|
|
if (LIKELY(vs->top != 0 && vs->top != UINT32_MAX)) {
|
|
|
|
elem = vs->elems[vs->top - 1];
|
|
|
|
vs->elems[vs->top - 1] = v0;
|
|
|
|
} else {
|
|
|
|
struct vm_stack* prev = vs->prev;
|
|
|
|
if (UNLIKELY(prev == NULL)) {
|
2020-06-05 08:54:30 +00:00
|
|
|
u6a_err_stack_underflow(err_stage);
|
|
|
|
return U6A_VM_VAR_FN_EMPTY;
|
2020-05-16 18:48:04 +00:00
|
|
|
}
|
2020-05-16 19:23:47 +00:00
|
|
|
if (--prev->refcnt > 0) {
|
2020-06-03 08:07:29 +00:00
|
|
|
prev = vm_stack_dup(prev);
|
|
|
|
if (UNLIKELY(prev == NULL)) {
|
2020-06-05 08:54:30 +00:00
|
|
|
return U6A_VM_VAR_FN_EMPTY;
|
2020-06-03 08:07:29 +00:00
|
|
|
}
|
2020-05-16 19:23:47 +00:00
|
|
|
}
|
|
|
|
if (vs->top == 0) {
|
|
|
|
++prev->refcnt;
|
|
|
|
vs->prev = prev;
|
|
|
|
elem = prev->elems[prev->top];
|
|
|
|
prev->elems[prev->top] = v0;
|
|
|
|
} else {
|
|
|
|
free(vs);
|
|
|
|
active_stack = prev;
|
|
|
|
elem = prev->elems[prev->top - 1];
|
|
|
|
prev->elems[prev->top - 1] = v0;
|
|
|
|
}
|
2020-05-16 18:48:04 +00:00
|
|
|
}
|
2020-02-08 16:51:31 +00:00
|
|
|
return elem;
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void*
|
|
|
|
u6a_vm_stack_save() {
|
2020-02-04 18:47:45 +00:00
|
|
|
return vm_stack_dup(active_stack);
|
|
|
|
}
|
|
|
|
|
|
|
|
void*
|
|
|
|
u6a_vm_stack_dup(void* ptr) {
|
|
|
|
return vm_stack_dup(ptr);
|
2020-01-30 10:11:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
u6a_vm_stack_resume(void* ptr) {
|
|
|
|
u6a_vm_stack_destroy();
|
|
|
|
active_stack = ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
u6a_vm_stack_discard(void* ptr) {
|
|
|
|
vm_stack_free(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
u6a_vm_stack_destroy() {
|
|
|
|
vm_stack_free(active_stack);
|
|
|
|
}
|