Merge branch 'master' into ravi-distro

ravi-distro
Dibyendu Majumdar 4 years ago
commit c28b956a3e

@ -11,6 +11,7 @@ option(NO_JIT "Controls whether JIT should be disabled, default is OFF" OFF)
option(STATIC_BUILD "Build static version of Ravi, default is OFF" OFF)
option(COMPUTED_GOTO "Controls whether the interpreter switch will use computed gotos on gcc/clang, default is ON" ON)
option(LTESTS "Controls whether ltests are enabled in Debug mode; note requires Debug build" ON)
option(ASAN "Controls whether address sanitizer should be enabled" OFF)
# By default on non-Windows platforms we enable MIR JIT
if (NOT WIN32
@ -57,11 +58,6 @@ set(LLVM_JIT_SRCS src/ravi_llvmjit.cpp src/ravi_llvmtypes.cpp
src/ravi_llvmreturn.cpp src/ravi_llvmload.cpp src/ravi_llvmforloop.cpp
src/ravi_llvmarith1.cpp src/ravi_llvmcall.cpp src/ravi_llvmtable.cpp
src/ravi_llvmarith2.cpp src/ravi_llvmtforcall.cpp src/ravi_llvmrest.cpp)
# MIR sources
#if (MIR_JIT)
# add_subdirectory(mir)
# set(MIRJIT_LIBRARIES c2mir)
#endif()
set(MIR_HEADERS mir/mir.h mir/mir-gen.h mir/mir-varr.h mir/mir-dlist.h mir/mir-htab.h
mir/mir-hash.h mir/mir-bitmap.h)
set(MIR_SRCS mir/mir.c mir/mir-gen.c)
@ -85,6 +81,14 @@ check_c_compiler_flag("-march=native" COMPILER_OPT_ARCH_NATIVE_SUPPORTED)
if (COMPILER_OPT_ARCH_NATIVE_SUPPORTED AND NOT CMAKE_C_FLAGS MATCHES "-march=")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
endif()
if (ASAN)
set(CMAKE_REQUIRED_FLAGS "-fsanitize=address")
check_c_compiler_flag("-fsanitize=address" COMPILER_ASAN_SUPPORTED)
if (COMPILER_ASAN_SUPPORTED AND NOT CMAKE_C_FLAGS_DEBUG MATCHES "-fsanitize=address")
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fsanitize=address")
endif()
endif()
if (LLVM_JIT)
find_package(LLVM REQUIRED CONFIG)
@ -117,10 +121,6 @@ if (LLVM_JIT)
else ()
if (MIR_JIT)
message(STATUS "MIRJIT enabled")
# set(JIT_SRCS ${MIR_JIT_SRCS})
# set_property(SOURCE ${MIR_JIT_SRCS}
# APPEND
# PROPERTY INCLUDE_DIRECTORIES "${CMAKE_SOURCE_DIR}/mir;${CMAKE_SOURCE_DIR}/mir/c2mir")
set(JIT_SRCS ${MIR_SRCS} ${C2MIR_SRCS} ${MIR_JIT_SRCS})
set_property(SOURCE ${MIR_SRCS} ${C2MIR_SRCS} ${MIR_JIT_SRCS}
APPEND

@ -6,7 +6,7 @@
# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT =======================
# Your platform. See PLATS for possible values.
PLAT= none
PLAT= guess
# Where to install. The installation starts in the src and doc directories,
# so take care if INSTALL_TOP is not an absolute path. See the local target.
@ -38,7 +38,7 @@ RM= rm -f
# == END OF USER SETTINGS -- NO NEED TO CHANGE ANYTHING BELOW THIS LINE =======
# Convenience platforms targets.
PLATS= aix bsd c89 freebsd generic linux macosx mingw posix solaris
PLATS= guess aix bsd c89 freebsd generic linux linux-noreadline macosx mingw posix solaris
# What to install.
TO_BIN= ravi

@ -639,6 +639,9 @@ LUA_API int ravi_list_code(lua_State *L);
/* Returns a table with various system limits */
LUA_API int ravi_get_limits(lua_State *L);
/* Options */
LUA_API const char *raviV_options(struct lua_State *L);
/* Following are for debugging purposes only */
LUAI_DDEC int ravi_parser_debug;
LUA_API void ravi_set_debuglevel(int level);

@ -61,15 +61,19 @@
#if defined(LUA_USE_LINUX)
#define LUA_USE_POSIX
#define LUA_USE_DLOPEN /* needs an extra library: -ldl */
#ifndef LUA_USE_READLINE
#define LUA_USE_READLINE /* needs some extra libraries */
#endif
#endif
#if defined(LUA_USE_MACOSX)
#define LUA_USE_POSIX
#define LUA_USE_DLOPEN /* MacOS does not need -ldl */
#ifndef LUA_USE_READLINE
#define LUA_USE_READLINE /* needs an extra library: -lreadline */
#endif
#endif
/*
@ -256,9 +260,11 @@
#endif /* } */
/* more often than not the libs go together with the core */
/*
** More often than not the libs go together with the core.
*/
#define LUALIB_API LUA_API
#define LUAMOD_API LUALIB_API
#define LUAMOD_API LUA_API
/*
@ -277,8 +283,7 @@
*/
#if defined(__GNUC__) && ((__GNUC__*100 + __GNUC_MINOR__) >= 302) && \
defined(__ELF__) /* { */
/** RAVI change **/
#define LUAI_FUNC /* __attribute__((visibility("hidden")))*/ extern
#define LUAI_FUNC __attribute__((visibility("internal"))) extern
#else /* }{ */
#define LUAI_FUNC extern
#endif /* } */

@ -96,9 +96,6 @@ void raviV_dumpASM(struct lua_State *L, struct Proto *p);
/* Return JIT backend identifier */
const char *raviV_jit_id(struct lua_State *L);
/* Options */
const char *raviV_options(struct lua_State *L);
#ifdef __cplusplus
}
#endif

@ -445,6 +445,8 @@ DEF_VARR (label_ref_t);
DEF_VARR (MIR_code_reloc_t);
#define MOVDQA_CODE 0
struct target_ctx {
unsigned char alloca_p, stack_arg_func_p, leaf_p;
int start_sp_from_bp_offset;
@ -745,19 +747,24 @@ static void target_make_prolog_epilog (gen_ctx_t gen_ctx, bitmap_t used_hard_reg
MIR_func_t func;
MIR_insn_t anchor, new_insn;
MIR_op_t sp_reg_op, fp_reg_op;
int64_t bp_saved_reg_offset, start;
size_t i, n, service_area_size, saved_hard_regs_num, stack_slots_size, block_size;
int64_t bp_saved_reg_offset, offset;
size_t i, service_area_size, saved_hard_regs_size, stack_slots_size, block_size;
assert (curr_func_item->item_type == MIR_func_item);
func = curr_func_item->u.func;
for (i = saved_hard_regs_num = 0; i <= MAX_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) saved_hard_regs_num++;
if (leaf_p && !alloca_p && !stack_arg_func_p && saved_hard_regs_num == 0 && !func->vararg_p
for (i = saved_hard_regs_size = 0; i <= R15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i))
saved_hard_regs_size += 8;
#ifdef _WIN64
for (; i <= XMM15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i))
saved_hard_regs_size += 16;
#endif
if (leaf_p && !alloca_p && !stack_arg_func_p && saved_hard_regs_size == 0 && !func->vararg_p
&& stack_slots_num == 0)
return;
sp_reg_op.mode = fp_reg_op.mode = MIR_OP_HARD_REG;
sp_reg_op.u.hard_reg = SP_HARD_REG;
fp_reg_op.u.hard_reg = FP_HARD_REG;
sp_reg_op = _MIR_new_hard_reg_op (ctx, SP_HARD_REG);
fp_reg_op = _MIR_new_hard_reg_op (ctx, FP_HARD_REG);
/* Prologue: */
anchor = DLIST_HEAD (MIR_insn_t, func->insns);
new_insn
@ -771,68 +778,75 @@ static void target_make_prolog_epilog (gen_ctx_t gen_ctx, bitmap_t used_hard_reg
service_area_size = func->vararg_p ? reg_save_area_size + 8 : 8;
stack_slots_size = stack_slots_num * 8;
/* stack slots, and saved regs as multiple of 16 bytes: */
block_size = (stack_slots_size + 8 * saved_hard_regs_num + 15) / 16 * 16;
block_size = (stack_slots_size + saved_hard_regs_size + 15) / 16 * 16;
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op,
MIR_new_int_op (ctx, block_size + service_area_size));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* sp -= block size + service_area_size */
if (func->vararg_p) {
bp_saved_reg_offset = block_size;
#ifndef _WIN64
start = block_size;
isave (gen_ctx, anchor, start, DI_HARD_REG);
isave (gen_ctx, anchor, start + 8, SI_HARD_REG);
isave (gen_ctx, anchor, start + 16, DX_HARD_REG);
isave (gen_ctx, anchor, start + 24, CX_HARD_REG);
isave (gen_ctx, anchor, start + 32, R8_HARD_REG);
isave (gen_ctx, anchor, start + 40, R9_HARD_REG);
dsave (gen_ctx, anchor, start + 48, XMM0_HARD_REG);
dsave (gen_ctx, anchor, start + 64, XMM1_HARD_REG);
dsave (gen_ctx, anchor, start + 80, XMM2_HARD_REG);
dsave (gen_ctx, anchor, start + 96, XMM3_HARD_REG);
dsave (gen_ctx, anchor, start + 112, XMM4_HARD_REG);
dsave (gen_ctx, anchor, start + 128, XMM5_HARD_REG);
dsave (gen_ctx, anchor, start + 144, XMM6_HARD_REG);
dsave (gen_ctx, anchor, start + 160, XMM7_HARD_REG);
#endif
if (func->vararg_p) {
offset = block_size;
isave (gen_ctx, anchor, offset, DI_HARD_REG);
isave (gen_ctx, anchor, offset + 8, SI_HARD_REG);
isave (gen_ctx, anchor, offset + 16, DX_HARD_REG);
isave (gen_ctx, anchor, offset + 24, CX_HARD_REG);
isave (gen_ctx, anchor, offset + 32, R8_HARD_REG);
isave (gen_ctx, anchor, offset + 40, R9_HARD_REG);
dsave (gen_ctx, anchor, offset + 48, XMM0_HARD_REG);
dsave (gen_ctx, anchor, offset + 64, XMM1_HARD_REG);
dsave (gen_ctx, anchor, offset + 80, XMM2_HARD_REG);
dsave (gen_ctx, anchor, offset + 96, XMM3_HARD_REG);
dsave (gen_ctx, anchor, offset + 112, XMM4_HARD_REG);
dsave (gen_ctx, anchor, offset + 128, XMM5_HARD_REG);
dsave (gen_ctx, anchor, offset + 144, XMM6_HARD_REG);
dsave (gen_ctx, anchor, offset + 160, XMM7_HARD_REG);
bp_saved_reg_offset += reg_save_area_size;
}
bp_saved_reg_offset = block_size + (func->vararg_p ? reg_save_area_size : 0);
#endif
/* Saving callee saved hard registers: */
for (i = n = 0; i <= MAX_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
MIR_insn_code_t code = MIR_MOV;
MIR_type_t type = MIR_T_I64;
offset = -bp_saved_reg_offset;
#ifdef _WIN64
if (i > R15_HARD_REG) {
code = MIR_DMOV;
type = MIR_T_D;
}
#else
assert (i <= R15_HARD_REG); /* xmm regs are always callee-clobbered */
for (i = XMM0_HARD_REG; i <= XMM15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
new_insn = _MIR_new_unspec_insn (ctx, 3, MIR_new_int_op (ctx, MOVDQA_CODE),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_D, offset, FP_HARD_REG,
MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_op (ctx, i));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* disp(sp) = saved hard reg */
offset += 16;
}
#endif
new_insn = MIR_new_insn (ctx, code,
_MIR_new_hard_reg_mem_op (ctx, type,
(int64_t) (n++ * 8) - bp_saved_reg_offset,
FP_HARD_REG, MIR_NON_HARD_REG, 1),
for (i = 0; i <= R15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
new_insn = MIR_new_insn (ctx, MIR_MOV,
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, offset, FP_HARD_REG,
MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_op (ctx, i));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* disp(sp) = saved hard reg */
offset += 8;
}
/* Epilogue: */
anchor = DLIST_TAIL (MIR_insn_t, func->insns);
/* Restoring hard registers: */
for (i = n = 0; i <= MAX_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
MIR_insn_code_t code = MIR_MOV;
MIR_type_t type = MIR_T_I64;
offset = -bp_saved_reg_offset;
#ifdef _WIN64
if (i > R15_HARD_REG) {
code = MIR_DMOV;
type = MIR_T_D;
}
for (i = XMM0_HARD_REG; i <= XMM15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
new_insn = _MIR_new_unspec_insn (ctx, 3, MIR_new_int_op (ctx, MOVDQA_CODE),
_MIR_new_hard_reg_op (ctx, i),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_D, offset, FP_HARD_REG,
MIR_NON_HARD_REG, 1));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* hard reg = disp(sp) */
offset += 16;
}
#endif
new_insn = MIR_new_insn (ctx, code, _MIR_new_hard_reg_op (ctx, i),
_MIR_new_hard_reg_mem_op (ctx, type,
(int64_t) (n++ * 8) - bp_saved_reg_offset,
FP_HARD_REG, MIR_NON_HARD_REG, 1));
for (i = 0; i <= R15_HARD_REG; i++)
if (!target_call_used_hard_reg_p (i) && bitmap_bit_p (used_hard_regs, i)) {
new_insn = MIR_new_insn (ctx, MIR_MOV, _MIR_new_hard_reg_op (ctx, i),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, offset, FP_HARD_REG,
MIR_NON_HARD_REG, 1));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* hard reg = disp(sp) */
offset += 8;
}
new_insn = MIR_new_insn (ctx, MIR_ADD, sp_reg_op, fp_reg_op, MIR_new_int_op (ctx, 8));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* sp = bp + 8 */
@ -854,6 +868,7 @@ struct pattern {
i[0-3] - immediate of size 8,16,32,64-bits
p[0-3] - reference
s - immediate 1, 2, 4, or 8 (scale)
c<number> - immediate integer <number>
m[0-3] - int (signed or unsigned) type memory of size 8,16,32,64-bits
ms[0-3] - signed int type memory of size 8,16,32,64-bits
mu[0-3] - unsigned int type memory of size 8,16,32,64-bits
@ -1045,7 +1060,7 @@ static const struct pattern patterns[] = {
{MIR_DMOV, "r r", "F2 Y 0F 10 r0 R1"}, /* movsd r0,r1 */
{MIR_DMOV, "r md", "F2 Y 0F 10 r0 m1"}, /* movsd r0,m64 */
{MIR_DMOV, "md r", "F2 Y 0F 11 r1 m0"}, /* movsd r0,m64 */
{MIR_DMOV, "md r", "F2 Y 0F 11 r1 m0"}, /* movsd m64,r0 */
{MIR_LDMOV, "mld h32", "DB /7 m0"}, /*only for ret and calls in given order: fstp m0 */
{MIR_LDMOV, "h32 mld", "DB /5 m1"}, /*only for ret and calls in given order: fld m1 */
@ -1053,6 +1068,13 @@ static const struct pattern patterns[] = {
{MIR_LDMOV, "h33 mld", "DB /5 m1; D9 C9"}, /*only for ret and calls: fld m1; fxch */
{MIR_LDMOV, "mld mld", "DB /5 m1; DB /7 m0"}, /* fld m1; fstp m0 */
#define STR(c) #c
#define STR_VAL(c) STR (c)
{MIR_UNSPEC, "c" STR_VAL (MOVDQA_CODE) " r r", "66 Y 0F 6F r1 R2"}, /* movdqa r0,r1 */
{MIR_UNSPEC, "c" STR_VAL (MOVDQA_CODE) " r md", "66 Y 0F 6F r1 m2"}, /* movdqa r0,m128 */
{MIR_UNSPEC, "c" STR_VAL (MOVDQA_CODE) " md r", "66 Y 0F 7F r2 m1"}, /* movdqa m128,r0 */
{MIR_EXT8, "r r", "X 0F BE r0 R1"}, /* movsx r0,r1 */
{MIR_EXT8, "r m0", "X 0F BE r0 m1"}, /* movsx r0,m1 */
{MIR_EXT16, "r r", "X 0F BF r0 R1"}, /* movsx r0,r1 */
@ -1239,6 +1261,22 @@ static int MIR_UNUSED uint16_p (int64_t v) { return 0 <= v && v <= UINT16_MAX; }
static int int32_p (int64_t v) { return INT32_MIN <= v && v <= INT32_MAX; }
static int uint32_p (int64_t v) { return 0 <= v && v <= UINT32_MAX; }
static int dec_value (int ch) { return '0' <= ch && ch <= '9' ? ch - '0' : -1; }
static uint64_t read_dec (const char **ptr) {
int v;
const char *p;
uint64_t res = 0;
for (p = *ptr; (v = dec_value (*p)) >= 0; p++) {
gen_assert ((res >> 60) == 0);
res = res * 10 + v;
}
gen_assert (p != *ptr);
*ptr = p - 1;
return res;
}
static int pattern_index_cmp (const void *a1, const void *a2) {
int i1 = *(const int *) a1, i2 = *(const int *) a2;
int c1 = (int) patterns[i1].code, c2 = (int) patterns[i2].code;
@ -1322,6 +1360,13 @@ static int pattern_match_p (gen_ctx_t gen_ctx, const struct pattern *pat, MIR_in
|| (op.u.i != 1 && op.u.i != 2 && op.u.i != 4 && op.u.i != 8))
return FALSE;
break;
case 'c': {
uint64_t n;
p++;
n = read_dec (&p);
if ((op.mode != MIR_OP_INT && op.mode != MIR_OP_UINT) || op.u.u != n) return FALSE;
break;
}
case 'm': {
MIR_type_t type, type2, type3 = MIR_T_BOUND;
int u_p, s_p;
@ -2004,6 +2049,9 @@ static void target_init (gen_ctx_t gen_ctx) {
VARR_CREATE (label_ref_t, label_refs, 0);
VARR_CREATE (uint64_t, abs_address_locs, 0);
VARR_CREATE (MIR_code_reloc_t, relocs, 0);
MIR_type_t res = MIR_T_D;
MIR_var_t args[] = {{MIR_T_D, "src"}};
_MIR_register_unspec_insn (gen_ctx->ctx, MOVDQA_CODE, "movdqa", 1, &res, 1, FALSE, args);
patterns_init (gen_ctx);
}

@ -6207,9 +6207,9 @@ void *MIR_gen (MIR_context_t ctx, MIR_item_t func_item) {
print_CFG (gen_ctx, TRUE, TRUE, TRUE, FALSE, output_bb_live_info);
});
}
ccp_clear (gen_ctx);
}
#endif /* #ifndef NO_CCP */
ccp_clear (gen_ctx);
}
make_io_dup_op_insns (gen_ctx);
target_machinize (gen_ctx);

@ -222,13 +222,11 @@ DEF_VARR (htab_ind_t)
\
static inline void HTAB_OP_DEF (T, foreach_elem) (HTAB (T) * htab, \
void (*func) (T el, void *arg), void *arg) { \
htab_ind_t *addr; \
htab_size_t i, size; \
htab_size_t i; \
HTAB_EL (T) * els_addr; \
\
HTAB_ASSERT (htab != NULL, "foreach_elem", T); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
size = VARR_LENGTH (HTAB_EL (T), htab->els); \
for (i = 0; i < htab->els_bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) func (els_addr[i].el, arg); \
}

@ -13,6 +13,7 @@ DEF_VARR (MIR_module_t);
DEF_VARR (size_t);
DEF_VARR (char);
DEF_VARR (uint8_t);
DEF_VARR (MIR_proto_t);
struct gen_ctx;
struct c2mir_ctx;
@ -28,6 +29,7 @@ struct MIR_context {
struct gen_ctx *gen_ctx; /* should be the 1st member */
struct c2mir_ctx *c2mir_ctx; /* should be the 2nd member */
MIR_error_func_t error_func;
VARR (MIR_proto_t) * unspec_protos; /* protos of unspec insns */
VARR (MIR_insn_t) * temp_insns, *temp_insns2;
VARR (MIR_op_t) * temp_insn_ops;
VARR (MIR_var_t) * temp_vars;
@ -56,6 +58,7 @@ struct MIR_context {
};
#define error_func ctx->error_func
#define unspec_protos ctx->unspec_protos
#define temp_insns ctx->temp_insns
#define temp_insns2 ctx->temp_insns2
#define temp_insn_ops ctx->temp_insn_ops
@ -312,6 +315,7 @@ static const struct insn_desc insn_descs[] = {
{MIR_VA_START, "va_start", {MIR_OP_INT, MIR_OP_BOUND}},
{MIR_VA_END, "va_end", {MIR_OP_INT, MIR_OP_BOUND}},
{MIR_LABEL, "label", {MIR_OP_BOUND}},
{MIR_UNSPEC, "unspec", {MIR_OP_BOUND}},
{MIR_INVALID_INSN, "invalid-insn", {MIR_OP_BOUND}},
};
@ -604,6 +608,7 @@ MIR_context_t MIR_init (void) {
(*error_func) (MIR_alloc_error, "Not enough memory for ctx");
string_init (&strings, &string_tab);
reg_init (ctx);
VARR_CREATE (MIR_proto_t, unspec_protos, 0);
VARR_CREATE (MIR_insn_t, temp_insns, 0);
VARR_CREATE (MIR_insn_t, temp_insns2, 0);
VARR_CREATE (MIR_op_t, temp_insn_ops, 0);
@ -722,6 +727,12 @@ void MIR_finish (MIR_context_t ctx) {
VARR_DESTROY (uint8_t, temp_data);
VARR_DESTROY (char, temp_string);
VARR_DESTROY (MIR_reg_t, inline_reg_map);
while (VARR_LENGTH (MIR_proto_t, unspec_protos) != 0) {
MIR_proto_t proto = VARR_POP (MIR_proto_t, unspec_protos);
VARR_DESTROY (MIR_var_t, proto->args);
free (proto);
}
VARR_DESTROY (MIR_proto_t, unspec_protos);
reg_finish (ctx);
string_finish (&strings, &string_tab);
vn_finish (ctx);
@ -1062,31 +1073,35 @@ MIR_item_t MIR_new_expr_data (MIR_context_t ctx, const char *name, MIR_item_t ex
return item;
}
static MIR_item_t new_proto_arr (MIR_context_t ctx, const char *name, size_t nres,
static MIR_proto_t create_proto (MIR_context_t ctx, const char *name, size_t nres,
MIR_type_t *res_types, size_t nargs, int vararg_p,
MIR_var_t *args) {
MIR_item_t proto_item, tab_item;
MIR_proto_t proto;
size_t i;
MIR_proto_t proto = malloc (sizeof (struct MIR_proto) + nres * sizeof (MIR_type_t));
if (curr_module == NULL)
(*error_func) (MIR_no_module_error, "Creating proto %s outside module", name);
proto_item = create_item (ctx, MIR_proto_item, "proto");
proto_item->u.proto = proto = malloc (sizeof (struct MIR_proto) + nres * sizeof (MIR_type_t));
if (proto == NULL) {
free (proto_item);
if (proto == NULL)
(*error_func) (MIR_alloc_error, "Not enough memory for creation of proto %s", name);
}
proto->name
= string_store (ctx, &strings, &string_tab, (MIR_str_t){strlen (name) + 1, name}).str.s;
proto->res_types = (MIR_type_t *) ((char *) proto + sizeof (struct MIR_proto));
memcpy (proto->res_types, res_types, nres * sizeof (MIR_type_t));
proto->nres = nres;
proto->vararg_p = vararg_p != 0;
VARR_CREATE (MIR_var_t, proto->args, nargs);
for (size_t i = 0; i < nargs; i++) VARR_PUSH (MIR_var_t, proto->args, args[i]);
return proto;
}
static MIR_item_t new_proto_arr (MIR_context_t ctx, const char *name, size_t nres,
MIR_type_t *res_types, size_t nargs, int vararg_p,
MIR_var_t *args) {
MIR_item_t proto_item, tab_item;
if (curr_module == NULL)
(*error_func) (MIR_no_module_error, "Creating proto %s outside module", name);
proto_item = create_item (ctx, MIR_proto_item, "proto");
proto_item->u.proto = create_proto (ctx, name, nres, res_types, nargs, vararg_p, args);
tab_item = add_item (ctx, proto_item);
mir_assert (tab_item == proto_item);
VARR_CREATE (MIR_var_t, proto->args, nargs);
for (i = 0; i < nargs; i++) VARR_PUSH (MIR_var_t, proto->args, args[i]);
return proto_item;
}
@ -1283,8 +1298,10 @@ void MIR_finish_func (MIR_context_t ctx) {
int expr_p = TRUE;
MIR_insn_t insn;
MIR_insn_code_t code;
const char *func_name;
if (curr_func == NULL) (*error_func) (MIR_no_func_error, "finish of non-existing function");
func_name = curr_func->name;
if (curr_func->vararg_p || curr_func->nargs != 0 || curr_func->nres != 1) expr_p = FALSE;
for (insn = DLIST_HEAD (MIR_insn_t, curr_func->insns); insn != NULL;
insn = DLIST_NEXT (MIR_insn_t, insn)) {
@ -1296,17 +1313,21 @@ void MIR_finish_func (MIR_context_t ctx) {
code = insn->code;
if (!curr_func->vararg_p && code == MIR_VA_START) {
curr_func = NULL;
(*error_func) (MIR_vararg_func_error, "va_start is not in vararg function");
(*error_func) (MIR_vararg_func_error, "func %s: va_start is not in vararg function",
func_name);
} else if (code == MIR_RET && actual_nops != curr_func->nres) {
curr_func = NULL;
(*error_func) (MIR_vararg_func_error,
"in instruction '%s': number of operands in return does not correspond number "
"of function returns. Expected %d, got %d",
insn_descs[code].name, curr_func->nres, actual_nops);
"func %s: in instruction '%s': number of operands in return does not "
"correspond number of function returns. Expected %d, got %d",
func_name, insn_descs[code].name, curr_func->nres, actual_nops);
} else if (MIR_call_code_p (code))
expr_p = FALSE;
for (i = 0; i < actual_nops; i++) {
if (MIR_call_code_p (code)) {
if (code == MIR_UNSPEC && i == 0) {
mir_assert (insn->ops[i].mode == MIR_OP_INT);
continue;
} else if (MIR_call_code_p (code)) {
if (i == 0) {
mir_assert (insn->ops[i].mode == MIR_OP_REF
&& insn->ops[i].u.ref->item_type == MIR_proto_item);
@ -1347,8 +1368,9 @@ void MIR_finish_func (MIR_context_t ctx) {
if (type2mode (rd->type) != MIR_OP_INT) {
curr_func = NULL;
(*error_func) (MIR_reg_type_error,
"in instruction '%s': base reg of non-integer type for operand #%d",
insn_descs[code].name, i + 1);
"func %s: in instruction '%s': base reg of non-integer type for operand "
"#%d",
func_name, insn_descs[code].name, i + 1);
}
}
if (insn->ops[i].u.mem.index != 0) {
@ -1357,8 +1379,9 @@ void MIR_finish_func (MIR_context_t ctx) {
if (type2mode (rd->type) != MIR_OP_INT) {
curr_func = NULL;
(*error_func) (MIR_reg_type_error,
"in instruction '%s': index reg of non-integer type for operand #%d",
insn_descs[code].name, i + 1);
"func %s: in instruction '%s': index reg of non-integer type for "
"operand #%d",
func_name, insn_descs[code].name, i + 1);
}
}
mode = type2mode (insn->ops[i].u.mem.type);
@ -1384,13 +1407,15 @@ void MIR_finish_func (MIR_context_t ctx) {
&& (mode == MIR_OP_UINT ? MIR_OP_INT : mode) != expected_mode) {
curr_func = NULL;
(*error_func) (MIR_op_mode_error,
"in instruction '%s': unexpected operand mode for operand #%d. Got '%s', "
"expected '%s'",
insn_descs[code].name, i + 1, mode_str (mode), mode_str (expected_mode));
"func %s: in instruction '%s': unexpected operand mode for operand #%d. Got "
"'%s', expected '%s'",
func_name, insn_descs[code].name, i + 1, mode_str (mode),
mode_str (expected_mode));
}
if (out_p && !can_be_out_p) {
curr_func = NULL;
(*error_func) (MIR_out_op_error, "in instruction '%s': wrong operand #%d for insn output",
(*error_func) (MIR_out_op_error,
"func %s; in instruction '%s': wrong operand #%d for insn output", func_name,
insn_descs[code].name, i + 1);
}
}
@ -1502,6 +1527,8 @@ void MIR_load_module (MIR_context_t ctx, MIR_module_t m) {
mir_assert (m != NULL);
for (MIR_item_t item = DLIST_HEAD (MIR_item_t, m->items); item != NULL;
item = DLIST_NEXT (MIR_item_t, item)) {
MIR_item_t first_item = item;
if (item->item_type == MIR_bss_item || item->item_type == MIR_data_item
|| item->item_type == MIR_ref_data_item || item->item_type == MIR_expr_data_item) {
item = load_bss_data_section (ctx, item, FALSE);
@ -1514,10 +1541,11 @@ void MIR_load_module (MIR_context_t ctx, MIR_module_t m) {
}
_MIR_redirect_thunk (ctx, item->addr, undefined_interface);
}
if (item->export_p) { /* update global item table */
mir_assert (item->item_type != MIR_export_item && item->item_type != MIR_import_item
&& item->item_type != MIR_forward_item);
setup_global (ctx, MIR_item_name (ctx, item), item->addr, item);
if (first_item->export_p) { /* update global item table */
mir_assert (first_item->item_type != MIR_export_item
&& first_item->item_type != MIR_import_item
&& first_item->item_type != MIR_forward_item);
setup_global (ctx, MIR_item_name (ctx, first_item), first_item->addr, first_item);
}
}
VARR_PUSH (MIR_module_t, modules_to_link, m);
@ -1677,23 +1705,32 @@ MIR_op_mode_t MIR_insn_op_mode (MIR_context_t ctx, MIR_insn_t insn, size_t nop,
*out_p = FALSE;
/* should be already checked in MIR_finish_func */
return nop == 0 && code == MIR_SWITCH ? MIR_OP_INT : insn->ops[nop].mode;
} else if (MIR_call_code_p (code)) {
MIR_op_t proto_op = insn->ops[0];
} else if (MIR_call_code_p (code) || code == MIR_UNSPEC) {
MIR_op_t proto_op;
MIR_proto_t proto;
size_t args_start;
mir_assert (proto_op.mode == MIR_OP_REF && proto_op.u.ref->item_type == MIR_proto_item);
proto = proto_op.u.ref->u.proto;
*out_p = 2 <= nop && nop < proto->nres + 2;
nargs = proto->nres + 2 + (proto->args == NULL ? 0 : VARR_LENGTH (MIR_var_t, proto->args));
if (code == MIR_UNSPEC) {
args_start = 1;
mir_assert (insn->ops[0].mode == MIR_OP_INT);
mir_assert (insn->ops[0].u.u < VARR_LENGTH (MIR_proto_t, unspec_protos));
proto = VARR_GET (MIR_proto_t, unspec_protos, insn->ops[0].u.u);
} else {
args_start = 2;
proto_op = insn->ops[0];
mir_assert (proto_op.mode == MIR_OP_REF && proto_op.u.ref->item_type == MIR_proto_item);
proto = proto_op.u.ref->u.proto;
}
*out_p = args_start <= nop && nop < proto->nres + args_start;
nargs
= proto->nres + args_start + (proto->args == NULL ? 0 : VARR_LENGTH (MIR_var_t, proto->args));
if (proto->vararg_p && nop >= nargs) return MIR_OP_UNDEF; /* unknown */
mir_assert (nops >= nargs && (proto->vararg_p || nops == nargs));
return (nop == 0
? insn->ops[nop].mode
: nop == 1
? MIR_OP_INT
: 2 <= nop && nop < proto->nres + 2
? type2mode (proto->res_types[nop - 2])
: type2mode (VARR_GET (MIR_var_t, proto->args, nop - 2 - proto->nres).type));
if (nop == 0) return insn->ops[nop].mode;
if (nop == 1 && code != MIR_UNSPEC) return MIR_OP_INT; /* call func addr */
if (args_start <= nop && nop < proto->nres + args_start)
return type2mode (proto->res_types[nop - args_start]);
return type2mode (VARR_GET (MIR_var_t, proto->args, nop - args_start - proto->nres).type);
}
mode = insn_descs[code].op_modes[nop];
*out_p = (mode & OUTPUT_FLAG) != 0;
@ -1747,25 +1784,34 @@ static MIR_insn_t new_insn1 (MIR_context_t ctx, MIR_insn_code_t code) {
MIR_insn_t MIR_new_insn_arr (MIR_context_t ctx, MIR_insn_code_t code, size_t nops, MIR_op_t *ops) {
MIR_insn_t insn;
MIR_proto_t proto;
size_t i = 0, expected_nops = insn_code_nops (ctx, code);
size_t args_start, i = 0, expected_nops = insn_code_nops (ctx, code);
mir_assert (ops != NULL);
if (!MIR_call_code_p (code) && code != MIR_RET && code != MIR_SWITCH && nops != expected_nops) {
if (!MIR_call_code_p (code) && code != MIR_UNSPEC && code != MIR_RET && code != MIR_SWITCH
&& nops != expected_nops) {
(*error_func) (MIR_ops_num_error, "wrong number of operands for insn %s",
insn_descs[code].name);
} else if (code == MIR_SWITCH) {
if (nops < 2) (*error_func) (MIR_ops_num_error, "number of MIR_SWITCH operands is less 2");
} else if (MIR_call_code_p (code)) {
if (nops < 2) (*error_func) (MIR_ops_num_error, "wrong number of call operands");
if (ops[0].mode != MIR_OP_REF || ops[0].u.ref->item_type != MIR_proto_item)
(*error_func) (MIR_call_op_error, "the 1st call operand should be a prototype");
proto = ops[0].u.ref->u.proto;
} else if (MIR_call_code_p (code) || code == MIR_UNSPEC) {
args_start = code == MIR_UNSPEC ? 1 : 2;
if (nops < args_start)
(*error_func) (MIR_ops_num_error, "wrong number of call/unspec operands");
if (code == MIR_UNSPEC) {
if (ops[0].mode != MIR_OP_INT || ops[0].u.u >= VARR_LENGTH (MIR_proto_t, unspec_protos))
(*error_func) (MIR_unspec_op_error, "the 1st unspec operand should be valid unspec code");
proto = VARR_GET (MIR_proto_t, unspec_protos, ops[0].u.u);
} else {
if (ops[0].mode != MIR_OP_REF || ops[0].u.ref->item_type != MIR_proto_item)
(*error_func) (MIR_call_op_error, "the 1st call operand should be a prototype");
proto = ops[0].u.ref->u.proto;
}
i = proto->nres;
if (proto->args != NULL) i += VARR_LENGTH (MIR_var_t, proto->args);
if (nops < i + 2 || (nops != i + 2 && !proto->vararg_p))
(*error_func) (MIR_call_op_error,
"number of call operands or results does not correspond to prototype %s",
proto->name);
if (nops < i + args_start || (nops != i + args_start && !proto->vararg_p))
(*error_func) (code == MIR_UNSPEC ? MIR_unspec_op_error : MIR_call_op_error,
"number of %s operands or results does not correspond to prototype %s",
code == MIR_UNSPEC ? "unspec" : "call", proto->name);
} else if (code == MIR_VA_ARG) {
if (ops[2].mode != MIR_OP_MEM)
(*error_func) (MIR_op_mode_error,
@ -1792,10 +1838,10 @@ MIR_insn_t MIR_new_insn (MIR_context_t ctx, MIR_insn_code_t code, ...) {
va_list argp;
size_t nops = insn_code_nops (ctx, code);
if (MIR_call_code_p (code) || code == MIR_RET || code == MIR_SWITCH)
if (MIR_call_code_p (code) || code == MIR_UNSPEC || code == MIR_RET || code == MIR_SWITCH)
(*error_func) (MIR_call_op_error,
"Use only MIR_new_insn_arr or MIR_new_{call,ret}_insn for creating a "
"call/ret/switch insn");
"Use only MIR_new_insn_arr or MIR_new_{call,unspec,ret}_insn for creating a "
"call/unspec/ret/switch insn");
va_start (argp, code);
return new_insn (ctx, code, nops, argp);
}
@ -1814,6 +1860,28 @@ MIR_insn_t MIR_new_ret_insn (MIR_context_t ctx, size_t nops, ...) {
return new_insn (ctx, MIR_RET, nops, argp);
}
MIR_insn_t _MIR_new_unspec_insn (MIR_context_t ctx, size_t nops, ...) {
va_list argp;
va_start (argp, nops);
return new_insn (ctx, MIR_UNSPEC, nops, argp);
}
void _MIR_register_unspec_insn (MIR_context_t ctx, uint64_t code, const char *name, size_t nres,
MIR_type_t *res_types, size_t nargs, int vararg_p,
MIR_var_t *args) {
MIR_proto_t proto;
while (VARR_LENGTH (MIR_proto_t, unspec_protos) <= code)
VARR_PUSH (MIR_proto_t, unspec_protos, NULL);
if ((proto = VARR_GET (MIR_proto_t, unspec_protos, code)) == NULL) {
VARR_SET (MIR_proto_t, unspec_protos, code,
create_proto (ctx, name, nres, res_types, nargs, vararg_p, args));
} else {
assert (strcmp (proto->name, name) == 0);
}
}
MIR_insn_t MIR_copy_insn (MIR_context_t ctx, MIR_insn_t insn) {
size_t size;
mir_assert (insn != NULL);
@ -2293,6 +2361,8 @@ void MIR_output_insn (MIR_context_t ctx, FILE *f, MIR_insn_t insn, MIR_func_t fu
fprintf (f, i == 0 ? "\t" : ", ");
MIR_output_op (ctx, f, insn->ops[i], func);
}
if (insn->code == MIR_UNSPEC)
fprintf (f, " # %s", VARR_GET (MIR_proto_t, unspec_protos, insn->ops[0].u.u)->name);
if (newline_p) fprintf (f, "\n");
}
@ -2519,6 +2589,7 @@ void MIR_simplify_op (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,
MIR_op_mode_t value_mode = op->value_mode;
int move_p = code == MIR_MOV || code == MIR_FMOV || code == MIR_DMOV || code == MIR_LDMOV;
if (code == MIR_UNSPEC && nop == 0) return; /* do nothing: it is an unspec code */
if (MIR_call_code_p (code)) {
if (nop == 0) return; /* do nothing: it is a prototype */
if (nop == 1 && op->mode == MIR_OP_REF
@ -3341,7 +3412,7 @@ static code_holder_t *get_last_code_holder (MIR_context_t ctx, size_t size) {
#ifndef __MIRC__
void _MIR_flush_code_cache (void *start, void *bound) {
#ifdef __GNUC__
__clear_cache (start, bound);
__builtin___clear_cache (start, bound);
#endif
}
#endif
@ -3773,6 +3844,8 @@ static size_t write_insn (MIR_context_t ctx, writer_func_t writer, MIR_func_t fu
MIR_insn_code_t code = insn->code;
size_t len;
if (code == MIR_UNSPEC)
(*error_func) (MIR_binary_io_error, "MIR_UNSPEC is not portable and can not be output");
if (code == MIR_LABEL) return write_lab (ctx, writer, insn);
nops = MIR_insn_nops (ctx, insn);
len = write_uint (ctx, writer, code);
@ -4583,6 +4656,8 @@ void MIR_read_with_func (MIR_context_t ctx, int (*const reader) (MIR_context_t))
if (insn_code >= MIR_LABEL)
(*error_func) (MIR_binary_io_error, "wrong insn code %d", insn_code);
if (insn_code == MIR_UNSPEC)
(*error_func) (MIR_binary_io_error, "UNSPEC is not portable and can not be read");
for (uint64_t i = 0; i < VARR_LENGTH (uint64_t, insn_label_string_nums); i++) {
lab = to_lab (ctx, VARR_GET (uint64_t, insn_label_string_nums, i));
MIR_append_insn (ctx, func, lab);
@ -5164,6 +5239,8 @@ void MIR_scan_string (MIR_context_t ctx, const char *str) {
if (!HTAB_DO (insn_name_t, insn_name_tab, in, HTAB_FIND, el))
scan_error (ctx, "Unknown insn %s", name);
insn_code = el.code;
if (insn_code == MIR_UNSPEC)
scan_error (ctx, "UNSPEC is not portable and can not be scanned", name);
for (n = 0; n < VARR_LENGTH (label_name_t, label_names); n++) {
label = create_label_desc (ctx, VARR_GET (label_name_t, label_names, n));
if (func != NULL) MIR_append_insn (ctx, func, label);

@ -51,8 +51,8 @@ static inline int mir_assert (int cond) { return 0 && cond; }
typedef enum MIR_error_type {
REP8 (ERR_EL, no, syntax, binary_io, alloc, finish, no_module, nested_module, no_func),
REP4 (ERR_EL, func, vararg_func, nested_func, wrong_param_value),
REP4 (ERR_EL, reserved_name, import_export, undeclared_func_reg, repeated_decl),
REP8 (ERR_EL, reg_type, unique_reg, undeclared_op_ref, ops_num, call_op, ret, op_mode, out_op),
REP5 (ERR_EL, reserved_name, import_export, undeclared_func_reg, repeated_decl, reg_type),
REP8 (ERR_EL, unique_reg, undeclared_op_ref, ops_num, call_op, unspec_op, ret, op_mode, out_op),
ERR_EL (invalid_insn)
} MIR_error_type_t;
@ -129,6 +129,7 @@ typedef enum {
INSN_EL (VA_START),
INSN_EL (VA_END), /* operand is va_list */
INSN_EL (LABEL), /* One immediate operand is unique label number */
INSN_EL (UNSPEC), /* First operand unspec code and the rest are args */
INSN_EL (INVALID_INSN),
INSN_EL (INSN_BOUND), /* Should be the last */
} MIR_insn_code_t;
@ -547,6 +548,10 @@ extern MIR_reg_t _MIR_new_temp_reg (MIR_context_t ctx, MIR_type_t type,
extern size_t _MIR_type_size (MIR_context_t ctx, MIR_type_t type);
extern MIR_op_mode_t _MIR_insn_code_op_mode (MIR_context_t ctx, MIR_insn_code_t code, size_t nop,
int *out_p);
extern MIR_insn_t _MIR_new_unspec_insn (MIR_context_t ctx, size_t nops, ...);
extern void _MIR_register_unspec_insn (MIR_context_t ctx, uint64_t code, const char *name,
size_t nres, MIR_type_t *res_types, size_t nargs,
int vararg_p, MIR_var_t *args);
extern void _MIR_duplicate_func_insns (MIR_context_t ctx, MIR_item_t func_item);
extern void _MIR_restore_func_insns (MIR_context_t ctx, MIR_item_t func_item);
extern void _MIR_simplify_insn (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,

@ -4,11 +4,12 @@
# == CHANGE THE SETTINGS BELOW TO SUIT YOUR ENVIRONMENT =======================
# Your platform. See PLATS for possible values.
PLAT= none
PLAT= guess
#CC= gcc -std=gnu99
CFLAGS= -O2 -Wall -DNDEBUG -fomit-frame-pointer $(SYSCFLAGS) $(MYCFLAGS) -I../include
CXXFLAGS=$(CFLAGS) -fno-rtti -Wno-sign-compare -std=c++14 -fno-exceptions -I../include
CC= gcc -std=gnu99
# Note for Ravi we need NDEBUG to disable LTESTS
# Also we enable some GCC specific flags in lvm.c via RAVI_ENABLE_GCC_FLAGS
CFLAGS= -O2 -DNDEBUG -DRAVI_ENABLE_GCC_FLAGS -Wall $(SYSCFLAGS) $(MYCFLAGS) -I../include
LDFLAGS= $(SYSLDFLAGS) $(MYLDFLAGS)
LIBS= -lm $(SYSLIBS) $(MYLIBS)
VPATH=../include
@ -16,6 +17,7 @@ VPATH=../include
AR= ar rcu
RANLIB= ranlib
RM= rm -f
UNAME= uname
SYSCFLAGS=
SYSLDFLAGS=
@ -28,13 +30,13 @@ MYOBJS=
# == END OF USER SETTINGS -- NO NEED TO CHANGE ANYTHING BELOW THIS LINE =======
PLATS= aix bsd c89 freebsd generic linux macosx mingw posix solaris
PLATS= guess aix bsd c89 freebsd generic linux linux-readline macosx mingw posix solaris
LUA_A= libravinojit.a
CORE_O= lapi.o lcode.o lctype.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o \
lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltests.o \
ltm.o lundump.o lvm.o lzio.o ravi_profile.o ravi_membuf.o \
ravi_jitshared.o ravi_nojit.o ravi_alloc.o ravi_jit.o $(CORE_CPP_O)
ravi_jitshared.o ravi_nojit.o ravi_alloc.o ravi_jit.o
LIB_O= lauxlib.o lbaselib.o lbitlib.o lcorolib.o ldblib.o liolib.o \
lmathlib.o loslib.o lstrlib.o ltablib.o lutf8lib.o loadlib.o linit.o \
bit.o
@ -78,15 +80,20 @@ echo:
@echo "AR= $(AR)"
@echo "RANLIB= $(RANLIB)"
@echo "RM= $(RM)"
@echo "UNAME= $(UNAME)"
# Convenience targets for popular platforms
ALL= all
none:
@echo "Please do 'make PLATFORM' where PLATFORM is one of these:"
help:
@echo "Do 'make PLATFORM' where PLATFORM is one of these:"
@echo " $(PLATS)"
aix:
guess:
@echo Guessing `$(UNAME)`
@$(MAKE) `$(UNAME)`
AIX aix:
$(MAKE) $(ALL) CC="xlc" CFLAGS="-O2 -DLUA_USE_POSIX -DLUA_USE_DLOPEN" SYSLIBS="-ldl" SYSLDFLAGS="-brtl -bexpall"
bsd:
@ -98,17 +105,21 @@ c89:
@echo '*** C89 does not guarantee 64-bit integers for Lua.'
@echo ''
freebsd:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_LINUX" SYSLIBS="-Wl,-E -lreadline"
FreeBSD NetBSD OpenBSD freebsd:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_LINUX -DLUA_USE_READLINE -I/usr/include/edit" SYSLIBS="-Wl,-E -ledit" CC="cc"
generic: $(ALL)
linux:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_LINUX" SYSLIBS="-Wl,-E -ldl -lreadline"
Linux linux: linux-readline
linux-noreadline:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_LINUX" SYSLIBS="-Wl,-E -ldl"
linux-readline:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_LINUX -DLUA_USE_READLINE" SYSLIBS="-Wl,-E -ldl -lreadline"
macosx:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_MACOSX" SYSLIBS="-lreadline" CC=cc
Darwin macos macosx:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_MACOSX -DLUA_USE_READLINE" SYSLIBS="-lreadline"
mingw:
$(MAKE) "LUA_A=ravi.dll" "LUA_T=ravi.exe" \
@ -118,7 +129,7 @@ mingw:
posix:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_POSIX"
solaris:
SunOS solaris:
$(MAKE) $(ALL) SYSCFLAGS="-DLUA_USE_POSIX -DLUA_USE_DLOPEN -D_REENTRANT" SYSLIBS="-ldl"
# list targets that do not create files (but not all makes understand .PHONY)

@ -1253,16 +1253,14 @@ static void finishgencycle (lua_State *L, global_State *g) {
/*
** Does a young collection. First, mark 'OLD1' objects. (Only survival
** and "recent old" lists can contain 'OLD1' objects. New lists cannot
** contain 'OLD1' objects, at most 'OLD0' objects that were already
** visited when marked old.) Then does the atomic step. Then,
** sweep all lists and advance pointers. Finally, finish the collection.
** Does a young collection. First, mark 'OLD1' objects. Then does the
** atomic step. Then, sweep all lists and advance pointers. Finally,
** finish the collection.
*/
static void youngcollection (lua_State *L, global_State *g) {
GCObject **psurvival; /* to point to first non-dead survival object */
lua_assert(g->gcstate == GCSpropagate);
markold(g, g->survival, g->reallyold);
markold(g, g->allgc, g->reallyold);
markold(g, g->finobj, g->finobjrold);
atomic(L);

@ -580,7 +580,7 @@ static int pmain (lua_State *L) {
}
luaL_openlibs(L); /* open standard libraries */
createargtable(L, argv, argc, script); /* create table 'arg' */
lua_gc(L, LUA_GCGEN, 0, 0); /* GC in generational mode */
//lua_gc(L, LUA_GCGEN, 0, 0); /* GC in generational mode */
if (!(args & has_E)) { /* no option '-E'? */
if (handle_luainit(L) != LUA_OK) /* run LUA_INIT */
return 0; /* error running LUA_INIT */

@ -36,6 +36,15 @@
#include "lvm.h"
#include "ravi_profile.h"
/*
** By default, use jump tables in the main interpreter loop on gcc
** and compatible compilers.
*/
#if !defined(RAVI_USE_COMPUTED_GOTO)
#if defined(__GNUC__) || defined(__clang__)
#define RAVI_USE_COMPUTED_GOTO
#endif
#endif
/* limit for table tag-method chains (to avoid loops) */
#define MAXTAGLOOP 2000
@ -1221,6 +1230,12 @@ int raviV_check_usertype(lua_State *L, TString *name, const TValue *o)
return (!ttisnil(metatab) && ttisLtable(metatab) && hvalue(metatab) == mt) || 0;
}
// When using computed gotos GCC generates worse code if crossjumping and gcse are enabled
// We normally set these flags via CMake but this is to help out when building via the
// supplied Makefile
#if defined(RAVI_ENABLE_GCC_FLAGS) && defined(__GNUC__) && !defined(__clang__)
__attribute((optimize("no-crossjumping,no-gcse")))
#endif
int luaV_execute (lua_State *L) {
#ifdef RAVI_USE_COMPUTED_GOTO

Loading…
Cancel
Save