issue #169 Update to latest MIR code

lua54-randomgen
Dibyendu Majumdar 4 years ago
parent 76f9bbdffb
commit 5f286815ed

@ -85,15 +85,6 @@ check_c_compiler_flag("-march=native" COMPILER_OPT_ARCH_NATIVE_SUPPORTED)
if (COMPILER_OPT_ARCH_NATIVE_SUPPORTED AND NOT CMAKE_C_FLAGS MATCHES "-march=")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
endif()
check_c_compiler_flag("-fno-common" COMPILER_OPT_NO_COMMON_SUPPORTED)
if (COMPILER_OPT_NO_COMMON_SUPPORTED AND NOT CMAKE_C_FLAGS MATCHES "-fno-common")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-common")
endif()
check_c_compiler_flag("-fno-stack-protector" COMPILER_OPT_NO_STACK_PROTECTOR_SUPPORTED)
if (COMPILER_OPT_NO_STACK_PROTECTOR_SUPPORTED AND NOT CMAKE_C_FLAGS MATCHES "-fno-stack-protector")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-stack-protector")
endif()
if (LLVM_JIT)
find_package(LLVM REQUIRED CONFIG)

@ -3,7 +3,7 @@
mkdir build
cd build
cmake3 -DCMAKE_INSTALL_PREFIX=$HOME/Software/llvm10 \
cmake3 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME/Software/llvm10 \
-DLLVM_TARGETS_TO_BUILD="X86" \
-DLLVM_BUILD_TOOLS=OFF \
-DLLVM_INCLUDE_TOOLS=OFF \
@ -11,5 +11,6 @@ cmake3 -DCMAKE_INSTALL_PREFIX=$HOME/Software/llvm10 \
-DLLVM_INCLUDE_EXAMPLES=OFF \
-DLLVM_BUILD_TESTS=OFF \
-DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_OPTIMIZED_TABLEGEN=ON \
..
cmake3 --build . --config Release --target install
make install

@ -189,17 +189,15 @@ static inline int bitmap_empty_p (const_bitmap_t bm) {
return TRUE;
}
static inline bitmap_el_t bitmap_el_max2 (bitmap_el_t el1, bitmap_el_t el2) {
return el1 < el2 ? el2 : el1;
}
static inline bitmap_el_t bitmap_el_max3 (bitmap_el_t el1, bitmap_el_t el2, bitmap_el_t el3) {
if (el1 <= el2) return el2 < el3 ? el3 : el2;
return el1 < el3 ? el3 : el1;
}
static inline bitmap_el_t bitmap_el_max4 (bitmap_el_t el1, bitmap_el_t el2, bitmap_el_t el3,
bitmap_el_t el4) {
if (el1 <= el2) return bitmap_el_max3 (el2, el3, el4);
return bitmap_el_max3 (el1, el3, el4);
}
/* Return the number of bits set in BM. */
static inline size_t bitmap_bit_count (const_bitmap_t bm) {
size_t i, len = VARR_LENGTH (bitmap_el_t, bm);
@ -223,7 +221,7 @@ static inline int bitmap_op2 (bitmap_t dst, const_bitmap_t src1, const_bitmap_t
src1_len = VARR_LENGTH (bitmap_el_t, src1);
src2_len = VARR_LENGTH (bitmap_el_t, src2);
len = bitmap_el_max3 (VARR_LENGTH (bitmap_el_t, dst), src1_len, src2_len);
len = bitmap_el_max2 (src1_len, src2_len);
bitmap_expand (dst, len * BITMAP_WORD_BITS);
dst_addr = VARR_ADDR (bitmap_el_t, dst);
src1_addr = VARR_ADDR (bitmap_el_t, src1);
@ -269,7 +267,7 @@ static inline int bitmap_op3 (bitmap_t dst, const_bitmap_t src1, const_bitmap_t
src1_len = VARR_LENGTH (bitmap_el_t, src1);
src2_len = VARR_LENGTH (bitmap_el_t, src2);
src3_len = VARR_LENGTH (bitmap_el_t, src3);
len = bitmap_el_max4 (VARR_LENGTH (bitmap_el_t, dst), src1_len, src2_len, src3_len);
len = bitmap_el_max3 (src1_len, src2_len, src3_len);
bitmap_expand (dst, len * BITMAP_WORD_BITS);
dst_addr = VARR_ADDR (bitmap_el_t, dst);
src1_addr = VARR_ADDR (bitmap_el_t, src1);

@ -702,15 +702,15 @@ static void target_machinize (MIR_context_t ctx) {
}
}
static void isave (MIR_context_t ctx, MIR_insn_t anchor, int disp, MIR_reg_t hard_reg) {
static void isave (MIR_context_t ctx, MIR_insn_t anchor, int disp, MIR_reg_t base, MIR_reg_t hard_reg) {
gen_mov (ctx, anchor, MIR_MOV,
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, disp, SP_HARD_REG, MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, disp, base, MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_op (ctx, hard_reg));
}
static void fsave (MIR_context_t ctx, MIR_insn_t anchor, int disp, MIR_reg_t hard_reg) {
static void fsave (MIR_context_t ctx, MIR_insn_t anchor, int disp, MIR_reg_t base, MIR_reg_t hard_reg) {
gen_mov (ctx, anchor, MIR_LDMOV,
_MIR_new_hard_reg_mem_op (ctx, MIR_T_LD, disp, SP_HARD_REG, MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_LD, disp, base, MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_op (ctx, hard_reg));
}
@ -719,7 +719,7 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
struct gen_ctx *gen_ctx = *gen_ctx_loc (ctx);
MIR_func_t func;
MIR_insn_t anchor, new_insn;
MIR_op_t sp_reg_op, fp_reg_op, treg_op;
MIR_op_t sp_reg_op, fp_reg_op, treg_op, treg_op2;
int64_t start;
int save_prev_stack_p;
size_t i, offset, frame_size, frame_size_after_saved_regs, saved_iregs_num, saved_fregs_num;
@ -755,14 +755,21 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
frame_size += stack_slots_num * 8;
if (frame_size % 16 != 0) frame_size = (frame_size + 15) / 16 * 16;
save_prev_stack_p = func->vararg_p || stack_arg_func_p;
treg_op = _MIR_new_hard_reg_op (ctx, R9_HARD_REG);
if (save_prev_stack_p) { /* prev stack pointer */
treg_op = _MIR_new_hard_reg_op (ctx, R9_HARD_REG);
gen_mov (ctx, anchor, MIR_MOV, treg_op, sp_reg_op);
frame_size += 16;
}
frame_size += 16; /* lr/fp */
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op, MIR_new_int_op (ctx, frame_size));
gen_add_insn_before (ctx, anchor, new_insn); /* sp = sp - frame_size */
if (frame_size < (1 << 12)) {
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op, MIR_new_int_op (ctx, frame_size));
} else {
treg_op2 = _MIR_new_hard_reg_op (ctx, R10_HARD_REG);
new_insn = MIR_new_insn (ctx, MIR_MOV, treg_op2, MIR_new_int_op (ctx, frame_size));
gen_add_insn_before (ctx, anchor, new_insn); /* t = frame_size */
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op, treg_op2);
}
gen_add_insn_before (ctx, anchor, new_insn); /* sp = sp - (frame_size|t) */
if (save_prev_stack_p)
gen_mov (ctx, anchor, MIR_MOV,
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 16, SP_HARD_REG, MIR_NON_HARD_REG, 1),
@ -775,23 +782,31 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
_MIR_new_hard_reg_op (ctx, FP_HARD_REG)); /* mem[sp] = fp */
gen_mov (ctx, anchor, MIR_MOV, fp_reg_op, sp_reg_op); /* fp = sp */
if (func->vararg_p) {
MIR_reg_t base = SP_HARD_REG;
start = (int64_t) frame_size - reg_save_area_size;
fsave (ctx, anchor, start, V0_HARD_REG);
fsave (ctx, anchor, start + 16, V1_HARD_REG);
fsave (ctx, anchor, start + 32, V2_HARD_REG);
fsave (ctx, anchor, start + 48, V3_HARD_REG);
fsave (ctx, anchor, start + 64, V4_HARD_REG);
fsave (ctx, anchor, start + 80, V5_HARD_REG);
fsave (ctx, anchor, start + 96, V6_HARD_REG);
fsave (ctx, anchor, start + 112, V7_HARD_REG);
isave (ctx, anchor, start + 128, R0_HARD_REG);
isave (ctx, anchor, start + 136, R1_HARD_REG);
isave (ctx, anchor, start + 144, R2_HARD_REG);
isave (ctx, anchor, start + 152, R3_HARD_REG);
isave (ctx, anchor, start + 160, R4_HARD_REG);
isave (ctx, anchor, start + 168, R5_HARD_REG);
isave (ctx, anchor, start + 176, R6_HARD_REG);
isave (ctx, anchor, start + 184, R7_HARD_REG);
if ((start + 184) >= (1 << 12)) {
new_insn = MIR_new_insn (ctx, MIR_MOV, treg_op, MIR_new_int_op (ctx, start));
gen_add_insn_before (ctx, anchor, new_insn); /* t = frame_size - reg_save_area_size */
start = 0;
base = R9_HARD_REG;
}
fsave (ctx, anchor, start, base, V0_HARD_REG);
fsave (ctx, anchor, start + 16, base, V1_HARD_REG);
fsave (ctx, anchor, start + 32, base, V2_HARD_REG);
fsave (ctx, anchor, start + 48, base, V3_HARD_REG);
fsave (ctx, anchor, start + 64, base, V4_HARD_REG);
fsave (ctx, anchor, start + 80, base, V5_HARD_REG);
fsave (ctx, anchor, start + 96, base, V6_HARD_REG);
fsave (ctx, anchor, start + 112, base, V7_HARD_REG);
isave (ctx, anchor, start + 128, base, R0_HARD_REG);
isave (ctx, anchor, start + 136, base, R1_HARD_REG);
isave (ctx, anchor, start + 144, base, R2_HARD_REG);
isave (ctx, anchor, start + 152, base, R3_HARD_REG);
isave (ctx, anchor, start + 160, base, R4_HARD_REG);
isave (ctx, anchor, start + 168, base, R5_HARD_REG);
isave (ctx, anchor, start + 176, base, R6_HARD_REG);
isave (ctx, anchor, start + 184, base, R7_HARD_REG);
}
/* Saving callee saved hard registers: */
offset = frame_size - frame_size_after_saved_regs;
@ -834,8 +849,14 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
/* Restore lr, sp, fp */
gen_mov (ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, LINK_HARD_REG),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 8, FP_HARD_REG, MIR_NON_HARD_REG, 1));
new_insn = MIR_new_insn (ctx, MIR_ADD, sp_reg_op, fp_reg_op, MIR_new_int_op (ctx, frame_size));
gen_add_insn_before (ctx, anchor, new_insn); /* sp = fp + frame_size */
if (frame_size < (1 << 12)) {
new_insn = MIR_new_insn (ctx, MIR_ADD, sp_reg_op, fp_reg_op, MIR_new_int_op (ctx, frame_size));
} else {
new_insn = MIR_new_insn (ctx, MIR_MOV, treg_op, MIR_new_int_op (ctx, frame_size));
gen_add_insn_before (ctx, anchor, new_insn); /* t = frame_size */
new_insn = MIR_new_insn (ctx, MIR_ADD, sp_reg_op, fp_reg_op, treg_op);
}
gen_add_insn_before (ctx, anchor, new_insn); /* sp = fp + (frame_size|t) */
gen_mov (ctx, anchor, MIR_MOV, fp_reg_op,
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 0, FP_HARD_REG, MIR_NON_HARD_REG, 1));
}
@ -995,8 +1016,7 @@ static const struct pattern patterns[] = {
{MIR_UEXT16, "r r", "53003c00:fffffc00 rd0 rn1"}, /* uxth wd, wn */
{MIR_UEXT32, "r r", "2a0003e0:7fe0ffe0 rd0 rm1"}, /* mov wd, wm */
// ??? add extended reg insns:
{MIR_ADD, "r r r", "8b000000:ff200000 rd0 rn1 rm2"}, /* add Rd,Rn,Rm*/
{MIR_ADD, "r r r", "8b206000:ffe0fc00 rd0 rn1 rm2"}, /* extended add Rd,Rn,Rm*/
{MIR_ADD, "r r I", "91000000:ff000000 rd0 rn1 I"}, /* add Rd,Rn,I,shift */
{MIR_ADDS, "r r r", "0b000000:ff200000 rd0 rn1 rm2"}, /* add Wd,Wn,Wm*/
{MIR_ADDS, "r r I", "11000000:ff000000 rd0 rn1 I"}, /* add Wd,Wn,I,shift */
@ -1004,7 +1024,7 @@ static const struct pattern patterns[] = {
{MIR_DADD, "r r r", "1e602800:ffe0fc00 vd0 vn1 vm2"}, /* fadd Dd,Dn,Dm*/
// ldadd is implemented through builtin
{MIR_SUB, "r r r", "cb000000:ff200000 rd0 rn1 rm2"}, /* sub Rd,Rn,Rm*/
{MIR_SUB, "r r r", "cb206000:ffe0fc00 rd0 rn1 rm2"}, /* extended sub Rd,Rn,Rm*/
{MIR_SUB, "r r I", "d1000000:ff000000 rd0 rn1 I"}, /* sub Rd,Rn,I,shift */
{MIR_SUBS, "r r r", "4b000000:ff200000 rd0 rn1 rm2"}, /* sub Wd,Wn,Wm*/
{MIR_SUBS, "r r I", "51000000:ff000000 rd0 rn1 I"}, /* sub Wd,Wn,I,shift */
@ -2057,7 +2077,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
for (insn = DLIST_HEAD (MIR_insn_t, curr_func_item->u.func->insns); insn != NULL;
insn = DLIST_NEXT (MIR_insn_t, insn)) {
if (insn->code == MIR_LABEL) {
set_label_disp (insn, VARR_LENGTH (uint8_t, result_code));
set_label_disp (ctx, insn, VARR_LENGTH (uint8_t, result_code));
} else {
replacement = find_insn_pattern_replacement (ctx, insn);
if (replacement == NULL) {
@ -2076,7 +2096,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
if (!lr.abs_addr_p) {
int64_t offset
= (int64_t) get_label_disp (lr.label) - (int64_t) lr.label_val_disp; /* pc offset */
= (int64_t) get_label_disp (ctx, lr.label) - (int64_t) lr.label_val_disp; /* pc offset */
gen_assert ((offset & 0x3) == 0);
if (lr.short_p)
*(uint32_t *) (VARR_ADDR (uint8_t, result_code) + lr.label_val_disp)
@ -2086,7 +2106,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
|= (offset / 4) & 0x3ffffff; /* 26-bit */
} else {
set_int64 (&VARR_ADDR (uint8_t, result_code)[lr.label_val_disp],
(int64_t) get_label_disp (lr.label), 8);
(int64_t) get_label_disp (ctx, lr.label), 8);
VARR_PUSH (uint64_t, abs_address_locs, lr.label_val_disp);
}
}

@ -46,8 +46,6 @@ const MIR_reg_t TEMP_LDOUBLE_HARD_REG2 = F12_HARD_REG;
static inline int target_hard_reg_type_ok_p (MIR_reg_t hard_reg, MIR_type_t type) {
assert (hard_reg <= MAX_HARD_REG);
/* For LD we need x87 stack regs and it is too complicated so no
hard register allocation for LD: */
if (type == MIR_T_LD) return FALSE;
return MIR_fp_type_p (type) ? F0_HARD_REG <= hard_reg && hard_reg <= F31_HARD_REG
: hard_reg < F0_HARD_REG;
@ -2248,14 +2246,14 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
}
}
if (insn->code == MIR_LABEL) {
set_label_disp (insn, VARR_LENGTH (uint8_t, result_code));
set_label_disp (ctx, insn, VARR_LENGTH (uint8_t, result_code));
} else {
int use_short_label_p = TRUE;
if (n_iter > 0 && MIR_branch_code_p (code)) {
MIR_label_t label = insn->ops[0].u.label;
int64_t offset
= (int64_t) get_label_disp (label) - (int64_t) VARR_LENGTH (uint8_t, result_code);
= (int64_t) get_label_disp (ctx, label) - (int64_t) VARR_LENGTH (uint8_t, result_code);
use_short_label_p = ((offset < 0 ? -offset : offset) & ~(int64_t) 0x7fff) == 0;
}
@ -2276,10 +2274,10 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
if (lr.abs_addr_p) {
set_int64 (&VARR_ADDR (uint8_t, result_code)[lr.label_val_disp],
(int64_t) get_label_disp (lr.label));
(int64_t) get_label_disp (ctx, lr.label));
VARR_PUSH (uint64_t, abs_address_locs, lr.label_val_disp);
} else if (lr.short_addr_p) { /* 14-bit relative addressing */
int64_t offset = (int64_t) get_label_disp (lr.label) - (int64_t) lr.label_val_disp;
int64_t offset = (int64_t) get_label_disp (ctx, lr.label) - (int64_t) lr.label_val_disp;
gen_assert ((offset & 0x3) == 0);
if (((offset < 0 ? -offset : offset) & ~(int64_t) 0x7fff) != 0) {
@ -2289,7 +2287,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
|= ((offset / 4) & 0x3fff) << 2;
}
} else { /* 24-bit relative address */
int64_t offset = (int64_t) get_label_disp (lr.label) - (int64_t) lr.label_val_disp;
int64_t offset = (int64_t) get_label_disp (ctx, lr.label) - (int64_t) lr.label_val_disp;
gen_assert ((offset & 0x3) == 0
&& ((offset < 0 ? -offset : offset) & ~(int64_t) 0x1ffffff) == 0);
*(uint32_t *) (VARR_ADDR (uint8_t, result_code) + lr.label_val_disp)

@ -2,7 +2,7 @@
Copyright (C) 2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
*/
// ??? More patterns (ult, ugt, ule, uge w/o branches).
// ??? More patterns (ult, ugt, ule, uge w/o branches, multi-insn combining).
static void fancy_abort (int code) {
if (!code) abort ();
@ -2097,7 +2097,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
}
}
if (insn->code == MIR_LABEL) {
set_label_disp (insn, VARR_LENGTH (uint8_t, result_code));
set_label_disp (ctx, insn, VARR_LENGTH (uint8_t, result_code));
} else {
replacement = find_insn_pattern_replacement (ctx, insn);
if (replacement == NULL) {
@ -2116,10 +2116,10 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
if (lr.abs_addr_p) {
set_int64 (&VARR_ADDR (uint8_t, result_code)[lr.label_val_disp],
(int64_t) get_label_disp (lr.label));
(int64_t) get_label_disp (ctx, lr.label));
VARR_PUSH (uint64_t, abs_address_locs, lr.label_val_disp);
} else { /* 32-bit relative address */
int64_t offset = (int64_t) get_label_disp (lr.label) - (int64_t) lr.label_val_disp;
int64_t offset = (int64_t) get_label_disp (ctx, lr.label) - (int64_t) lr.label_val_disp;
gen_assert (offset % 2 == 0);
offset /= 2;
gen_assert (((offset < 0 ? -offset : offset) & ~(int64_t) 0x7fffffff) == 0);

@ -26,8 +26,13 @@ static inline MIR_reg_t target_nth_loc (MIR_reg_t loc, MIR_type_t type, int n) {
/* Hard regs not used in machinized code, preferably call used ones. */
const MIR_reg_t TEMP_INT_HARD_REG1 = R10_HARD_REG, TEMP_INT_HARD_REG2 = R11_HARD_REG;
#ifndef _WIN64
const MIR_reg_t TEMP_FLOAT_HARD_REG1 = XMM8_HARD_REG, TEMP_FLOAT_HARD_REG2 = XMM9_HARD_REG;
const MIR_reg_t TEMP_DOUBLE_HARD_REG1 = XMM8_HARD_REG, TEMP_DOUBLE_HARD_REG2 = XMM9_HARD_REG;
#else
const MIR_reg_t TEMP_FLOAT_HARD_REG1 = XMM4_HARD_REG, TEMP_FLOAT_HARD_REG2 = XMM5_HARD_REG;
const MIR_reg_t TEMP_DOUBLE_HARD_REG1 = XMM4_HARD_REG, TEMP_DOUBLE_HARD_REG2 = XMM5_HARD_REG;
#endif
const MIR_reg_t TEMP_LDOUBLE_HARD_REG1 = MIR_NON_HARD_REG;
const MIR_reg_t TEMP_LDOUBLE_HARD_REG2 = MIR_NON_HARD_REG;
@ -50,7 +55,13 @@ static inline int target_fixed_hard_reg_p (MIR_reg_t hard_reg) {
static inline int target_call_used_hard_reg_p (MIR_reg_t hard_reg) {
assert (hard_reg <= MAX_HARD_REG);
#ifndef _WIN64
return !(hard_reg == BX_HARD_REG || (hard_reg >= R12_HARD_REG && hard_reg <= R15_HARD_REG));
#else
return !(hard_reg == BX_HARD_REG || hard_reg == SI_HARD_REG || hard_reg == DI_HARD_REG
|| (hard_reg >= R12_HARD_REG && hard_reg <= R15_HARD_REG)
|| (hard_reg >= XMM6_HARD_REG && hard_reg <= XMM15_HARD_REG));
#endif
}
/* Stack layout (sp refers to the last reserved stack slot address)
@ -249,7 +260,7 @@ static void machinize_call (MIR_context_t ctx, MIR_insn_t call_insn) {
if (proto->vararg_p && type == MIR_T_D) {
gen_assert (int_arg_num > 0 && int_arg_num <= 4);
arg_reg = get_int_arg_reg (int_arg_num - 1);
setup_call_hard_reg_args (call_insn, arg_reg);
setup_call_hard_reg_args (ctx, call_insn, arg_reg);
/* mir does not support moving fp to int regs directly, spill and load them instead */
mem_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_D, 8, SP_HARD_REG, MIR_NON_HARD_REG, 1);
new_insn = MIR_new_insn (ctx, MIR_DMOV, mem_op, arg_op);
@ -278,7 +289,7 @@ static void machinize_call (MIR_context_t ctx, MIR_insn_t call_insn) {
}
#ifndef _WIN64
if (proto->vararg_p) {
setup_call_hard_reg_args (call_insn, AX_HARD_REG);
setup_call_hard_reg_args (ctx, call_insn, AX_HARD_REG);
new_insn = MIR_new_insn (ctx, MIR_MOV, _MIR_new_hard_reg_op (ctx, AX_HARD_REG),
MIR_new_int_op (ctx, xmm_args));
gen_add_insn_before (ctx, call_insn, new_insn);
@ -568,7 +579,8 @@ static void target_machinize (MIR_context_t ctx) {
mem_size = 8 /*ret*/ + start_sp_from_bp_offset;
for (int i = 0; i < 4; i++) {
arg_reg = get_int_arg_reg (i);
mem_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, mem_size, FP_HARD_REG, MIR_NON_HARD_REG, 1);
mem_op
= _MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, mem_size, FP_HARD_REG, MIR_NON_HARD_REG, 1);
new_insn = MIR_new_insn (ctx, MIR_MOV, mem_op, _MIR_new_hard_reg_op (ctx, arg_reg));
gen_add_insn_before (ctx, insn, new_insn);
mem_size += 8;
@ -617,12 +629,14 @@ static void target_machinize (MIR_context_t ctx) {
&& mem_op.mode == MIR_OP_MEM);
/* load and increment va pointer */
treg_op = MIR_new_reg_op (ctx, gen_new_temp_reg (ctx, MIR_T_I64, curr_func_item->u.func));
gen_mov (ctx, insn, MIR_MOV, treg_op, MIR_new_mem_op (ctx, MIR_T_I64, 0, va_reg_op.u.reg, 0, 1));
gen_mov (ctx, insn, MIR_MOV, treg_op,
MIR_new_mem_op (ctx, MIR_T_I64, 0, va_reg_op.u.reg, 0, 1));
new_insn = MIR_new_insn (ctx, MIR_MOV, res_reg_op, treg_op);
gen_add_insn_before (ctx, insn, new_insn);
new_insn = MIR_new_insn (ctx, MIR_ADD, treg_op, treg_op, MIR_new_int_op (ctx, 8));
gen_add_insn_before (ctx, insn, new_insn);
gen_mov (ctx, insn, MIR_MOV, MIR_new_mem_op (ctx, MIR_T_I64, 0, va_reg_op.u.reg, 0, 1), treg_op);
gen_mov (ctx, insn, MIR_MOV, MIR_new_mem_op (ctx, MIR_T_I64, 0, va_reg_op.u.reg, 0, 1),
treg_op);
#endif
gen_delete_insn (ctx, insn);
} else if (MIR_call_code_p (code)) {
@ -750,12 +764,16 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
/* Use add for matching LEA: */
new_insn = MIR_new_insn (ctx, MIR_ADD, fp_reg_op, sp_reg_op, MIR_new_int_op (ctx, -8));
gen_add_insn_before (ctx, anchor, new_insn); /* bp = sp - 8 */
if (!func->vararg_p) {
service_area_size = 8;
} else {
service_area_size = reg_save_area_size + 8;
service_area_size = func->vararg_p ? reg_save_area_size + 8 : 8;
stack_slots_size = stack_slots_num * 8;
/* stack slots, and saved regs as multiple of 16 bytes: */
block_size = (stack_slots_size + 8 * saved_hard_regs_num + 15) / 16 * 16;
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op,
MIR_new_int_op (ctx, block_size + service_area_size));
gen_add_insn_before (ctx, anchor, new_insn); /* sp -= block size + service_area_size */
if (func->vararg_p) {
#ifndef _WIN64
start = -(int64_t) service_area_size;
start = block_size;
isave (ctx, anchor, start, DI_HARD_REG);
isave (ctx, anchor, start + 8, SI_HARD_REG);
isave (ctx, anchor, start + 16, DX_HARD_REG);
@ -772,12 +790,6 @@ static void target_make_prolog_epilog (MIR_context_t ctx, bitmap_t used_hard_reg
dsave (ctx, anchor, start + 160, XMM7_HARD_REG);
#endif
}
stack_slots_size = stack_slots_num * 8;
/* stack slots, and saved regs as multiple of 16 bytes: */
block_size = (stack_slots_size + 8 * saved_hard_regs_num + 15) / 16 * 16;
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op,
MIR_new_int_op (ctx, block_size + service_area_size));
gen_add_insn_before (ctx, anchor, new_insn); /* sp -= block size + service_area_size */
bp_saved_reg_offset = block_size + (func->vararg_p ? reg_save_area_size : 0);
/* Saving callee saved hard registers: */
for (i = n = 0; i <= MAX_HARD_REG; i++)
@ -1044,7 +1056,7 @@ static const struct pattern patterns[] = {
{MIR_D2I, "r r", "F2 X 0F 2C r0 R1"}, /* cvttsd2si r0,r1 */
{MIR_D2I, "r md", "F2 X 0F 2C r0 m1"}, /* cvttsd2si r0,m1 */
{MIR_F2D, "r r", "F3 0F 5A r0 R1"}, /* cvtss2sd r0,r1 */
{MIR_F2D, "r r", "F3 Y 0F 5A r0 R1"}, /* cvtss2sd r0,r1 */
{MIR_F2D, "r mf", "F3 Y 0F 5A r0 m1"}, /* cvtss2sd r0,m1 */
/* fld m1;fstpl -16(sp);movsd r0,-16(sp): */
{MIR_LD2D, "r mld", "DB /5 m1; DD /3 mt; F2 Y 0F 10 r0 mt"},
@ -1913,7 +1925,7 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
for (insn = DLIST_HEAD (MIR_insn_t, curr_func_item->u.func->insns); insn != NULL;
insn = DLIST_NEXT (MIR_insn_t, insn)) {
if (insn->code == MIR_LABEL) {
set_label_disp (insn, VARR_LENGTH (uint8_t, result_code));
set_label_disp (ctx, insn, VARR_LENGTH (uint8_t, result_code));
} else {
replacement = find_insn_pattern_replacement (ctx, insn);
if (replacement == NULL) {
@ -1932,10 +1944,10 @@ static uint8_t *target_translate (MIR_context_t ctx, size_t *len) {
if (!lr.abs_addr_p) {
set_int64 (&VARR_ADDR (uint8_t, result_code)[lr.label_val_disp],
(int64_t) get_label_disp (lr.label) - (int64_t) lr.next_insn_disp, 4);
(int64_t) get_label_disp (ctx, lr.label) - (int64_t) lr.next_insn_disp, 4);
} else {
set_int64 (&VARR_ADDR (uint8_t, result_code)[lr.label_val_disp],
(int64_t) get_label_disp (lr.label), 8);
(int64_t) get_label_disp (ctx, lr.label), 8);
VARR_PUSH (uint64_t, abs_address_locs, lr.label_val_disp);
}
}

File diff suppressed because it is too large Load Diff

@ -80,144 +80,157 @@ DEF_VARR (htab_ind_t)
VARR (htab_ind_t) * entries; \
} HTAB (T);
#define DEF_HTAB(T) \
HTAB_T (T) \
\
static inline void HTAB_OP_DEF (T, create) (HTAB (T) * *htab, htab_size_t min_size, \
htab_hash_t (*hash_func) (T el, void *arg), \
int (*eq_func) (T el1, T el2, void *arg), \
void (*free_func) (T el, void *arg), void *arg) { \
HTAB (T) * ht; \
htab_size_t i, size; \
\
for (size = 2; min_size > size; size *= 2) \
; \
ht = malloc (sizeof (*ht)); \
if (ht == NULL) mir_htab_error ("htab: no memory"); \
VARR_CREATE (HTAB_EL (T), ht->els, size); \
VARR_TAILOR (HTAB_EL (T), ht->els, size); \
VARR_CREATE (htab_ind_t, ht->entries, 2 * size); \
ht->arg = arg; \
ht->hash_func = hash_func; \
ht->eq_func = eq_func; \
ht->free_func = free_func; \
ht->els_num = ht->els_start = ht->els_bound = ht->collisions = 0; \
for (i = 0; i < 2 * size; i++) VARR_PUSH (htab_ind_t, ht->entries, HTAB_EMPTY_IND); \
*htab = ht; \
} \
\
static inline void HTAB_OP_DEF (T, clear) (HTAB (T) * htab) { \
htab_ind_t *addr; \
htab_size_t i, size; \
HTAB_EL (T) * els_addr; \
void *arg; \
\
HTAB_ASSERT (htab != NULL, "clear", T); \
arg = htab->arg; \
if (htab->free_func != NULL) { \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
size = VARR_LENGTH (HTAB_EL (T), htab->els); \
for (i = 0; i < htab->els_bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) htab->free_func (els_addr[i].el, arg); \
} \
htab->els_num = htab->els_start = htab->els_bound = 0; \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
size = VARR_LENGTH (htab_ind_t, htab->entries); \
for (i = 0; i < size; i++) addr[i] = HTAB_EMPTY_IND; \
} \
\
static inline void HTAB_OP_DEF (T, destroy) (HTAB (T) * *htab) { \
HTAB_ASSERT (*htab != NULL, "destroy", T); \
if ((*htab)->free_func != NULL) HTAB_OP (T, clear) (*htab); \
VARR_DESTROY (HTAB_EL (T), (*htab)->els); \
VARR_DESTROY (htab_ind_t, (*htab)->entries); \
free (*htab); \
*htab = NULL; \
} \
\
static inline int HTAB_OP_DEF (T, do) (HTAB (T) * htab, T el, enum htab_action action, \
T * res) { \
htab_ind_t ind, el_ind, *entry, *first_deleted_entry = NULL; \
htab_hash_t hash, peterb; \
htab_size_t els_size, size, mask, start, bound, i; \
htab_ind_t *addr; \
HTAB_EL (T) * els_addr; \
void *arg; \
\
HTAB_ASSERT (htab != NULL, "do htab", T); \
size = VARR_LENGTH (htab_ind_t, htab->entries); \
els_size = VARR_LENGTH (HTAB_EL (T), htab->els); \
arg = htab->arg; \
HTAB_ASSERT (els_size * 2 == size, "do size", T); \
if ((action == HTAB_INSERT || action == HTAB_REPLACE) && htab->els_bound == els_size) { \
size *= 2; \
VARR_TAILOR (htab_ind_t, htab->entries, size); \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
for (i = 0; i < size; i++) addr[i] = HTAB_EMPTY_IND; \
VARR_TAILOR (HTAB_EL (T), htab->els, els_size * 2); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
start = htab->els_start; \
bound = htab->els_bound; \
htab->els_start = htab->els_bound = htab->els_num = 0; \
for (i = start; i < bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) { \
HTAB_OP (T, do) (htab, els_addr[i].el, HTAB_INSERT, res); \
HTAB_ASSERT ((*htab->eq_func) (*res, els_addr[i].el, arg), "do expand", T); \
} \
HTAB_ASSERT (bound - start >= htab->els_bound, "do bound", T); \
} \
mask = size - 1; \
hash = (*htab->hash_func) (el, arg); \
if (hash == HTAB_DELETED_HASH) hash += 1; \
peterb = hash; \
ind = hash & mask; \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
for (;; htab->collisions++) { \
entry = addr + ind; \
el_ind = *entry; \
if (el_ind != HTAB_EMPTY_IND) { \
if (el_ind == HTAB_DELETED_IND) { \
first_deleted_entry = entry; \
} else if (els_addr[el_ind].hash == hash \
&& (*htab->eq_func) (els_addr[el_ind].el, el, arg)) { \
if (action == HTAB_REPLACE) { \
if (htab->free_func != NULL) htab->free_func (els_addr[el_ind].el, arg); \
els_addr[el_ind].el = el; \
} \
if (action != HTAB_DELETE) { \
*res = els_addr[el_ind].el; \
} else { \
htab->els_num--; \
*entry = HTAB_DELETED_IND; \
if (htab->free_func != NULL) htab->free_func (els_addr[el_ind].el, arg); \
els_addr[el_ind].hash = HTAB_DELETED_HASH; \
} \
return TRUE; \
} \
} else { \
if (action == HTAB_INSERT || action == HTAB_REPLACE) { \
htab->els_num++; \
if (first_deleted_entry != NULL) entry = first_deleted_entry; \
els_addr[htab->els_bound].hash = hash; \
els_addr[htab->els_bound].el = el; \
*entry = htab->els_bound++; \
*res = el; \
} \
return FALSE; \
} \
peterb >>= 11; \
ind = (5 * ind + peterb + 1) & mask; \
} \
} \
\
static inline htab_size_t HTAB_OP_DEF (T, els_num) (HTAB (T) * htab) { \
HTAB_ASSERT (htab != NULL, "els_num", T); \
return htab->els_num; \
} \
static inline htab_size_t HTAB_OP_DEF (T, collisions) (HTAB (T) * htab) { \
HTAB_ASSERT (htab != NULL, "collisions", T); \
return htab->collisions; \
#define DEF_HTAB(T) \
HTAB_T (T) \
\
static inline void HTAB_OP_DEF (T, create) (HTAB (T) * *htab, htab_size_t min_size, \
htab_hash_t (*hash_func) (T el, void *arg), \
int (*eq_func) (T el1, T el2, void *arg), \
void (*free_func) (T el, void *arg), void *arg) { \
HTAB (T) * ht; \
htab_size_t i, size; \
\
for (size = 2; min_size > size; size *= 2) \
; \
ht = malloc (sizeof (*ht)); \
if (ht == NULL) mir_htab_error ("htab: no memory"); \
VARR_CREATE (HTAB_EL (T), ht->els, size); \
VARR_TAILOR (HTAB_EL (T), ht->els, size); \
VARR_CREATE (htab_ind_t, ht->entries, 2 * size); \
ht->arg = arg; \
ht->hash_func = hash_func; \
ht->eq_func = eq_func; \
ht->free_func = free_func; \
ht->els_num = ht->els_start = ht->els_bound = ht->collisions = 0; \
for (i = 0; i < 2 * size; i++) VARR_PUSH (htab_ind_t, ht->entries, HTAB_EMPTY_IND); \
*htab = ht; \
} \
\
static inline void HTAB_OP_DEF (T, clear) (HTAB (T) * htab) { \
htab_ind_t *addr; \
htab_size_t i, size; \
HTAB_EL (T) * els_addr; \
void *arg; \
\
HTAB_ASSERT (htab != NULL, "clear", T); \
arg = htab->arg; \
if (htab->free_func != NULL) { \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
size = VARR_LENGTH (HTAB_EL (T), htab->els); \
for (i = 0; i < htab->els_bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) htab->free_func (els_addr[i].el, arg); \
} \
htab->els_num = htab->els_start = htab->els_bound = 0; \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
size = VARR_LENGTH (htab_ind_t, htab->entries); \
for (i = 0; i < size; i++) addr[i] = HTAB_EMPTY_IND; \
} \
\
static inline void HTAB_OP_DEF (T, destroy) (HTAB (T) * *htab) { \
HTAB_ASSERT (*htab != NULL, "destroy", T); \
if ((*htab)->free_func != NULL) HTAB_OP (T, clear) (*htab); \
VARR_DESTROY (HTAB_EL (T), (*htab)->els); \
VARR_DESTROY (htab_ind_t, (*htab)->entries); \
free (*htab); \
*htab = NULL; \
} \
\
static inline int HTAB_OP_DEF (T, do) (HTAB (T) * htab, T el, enum htab_action action, \
T * res) { \
htab_ind_t ind, el_ind, *entry, *first_deleted_entry = NULL; \
htab_hash_t hash, peterb; \
htab_size_t els_size, size, mask, start, bound, i; \
htab_ind_t *addr; \
HTAB_EL (T) * els_addr; \
void *arg; \
\
HTAB_ASSERT (htab != NULL, "do htab", T); \
size = VARR_LENGTH (htab_ind_t, htab->entries); \
els_size = VARR_LENGTH (HTAB_EL (T), htab->els); \
arg = htab->arg; \
HTAB_ASSERT (els_size * 2 == size, "do size", T); \
if ((action == HTAB_INSERT || action == HTAB_REPLACE) && htab->els_bound == els_size) { \
size *= 2; \
VARR_TAILOR (htab_ind_t, htab->entries, size); \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
for (i = 0; i < size; i++) addr[i] = HTAB_EMPTY_IND; \
VARR_TAILOR (HTAB_EL (T), htab->els, els_size * 2); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
start = htab->els_start; \
bound = htab->els_bound; \
htab->els_start = htab->els_bound = htab->els_num = 0; \
for (i = start; i < bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) { \
HTAB_OP (T, do) (htab, els_addr[i].el, HTAB_INSERT, res); \
HTAB_ASSERT ((*htab->eq_func) (*res, els_addr[i].el, arg), "do expand", T); \
} \
HTAB_ASSERT (bound - start >= htab->els_bound, "do bound", T); \
} \
mask = size - 1; \
hash = (*htab->hash_func) (el, arg); \
if (hash == HTAB_DELETED_HASH) hash += 1; \
peterb = hash; \
ind = hash & mask; \
addr = VARR_ADDR (htab_ind_t, htab->entries); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
for (;; htab->collisions++) { \
entry = addr + ind; \
el_ind = *entry; \
if (el_ind != HTAB_EMPTY_IND) { \
if (el_ind == HTAB_DELETED_IND) { \
first_deleted_entry = entry; \
} else if (els_addr[el_ind].hash == hash \
&& (*htab->eq_func) (els_addr[el_ind].el, el, arg)) { \
if (action == HTAB_REPLACE) { \
if (htab->free_func != NULL) htab->free_func (els_addr[el_ind].el, arg); \
els_addr[el_ind].el = el; \
} \
if (action != HTAB_DELETE) { \
*res = els_addr[el_ind].el; \
} else { \
htab->els_num--; \
*entry = HTAB_DELETED_IND; \
if (htab->free_func != NULL) htab->free_func (els_addr[el_ind].el, arg); \
els_addr[el_ind].hash = HTAB_DELETED_HASH; \
} \
return TRUE; \
} \
} else { \
if (action == HTAB_INSERT || action == HTAB_REPLACE) { \
htab->els_num++; \
if (first_deleted_entry != NULL) entry = first_deleted_entry; \
els_addr[htab->els_bound].hash = hash; \
els_addr[htab->els_bound].el = el; \
*entry = htab->els_bound++; \
*res = el; \
} \
return FALSE; \
} \
peterb >>= 11; \
ind = (5 * ind + peterb + 1) & mask; \
} \
} \
\
static inline htab_size_t HTAB_OP_DEF (T, els_num) (HTAB (T) * htab) { \
HTAB_ASSERT (htab != NULL, "els_num", T); \
return htab->els_num; \
} \
static inline htab_size_t HTAB_OP_DEF (T, collisions) (HTAB (T) * htab) { \
HTAB_ASSERT (htab != NULL, "collisions", T); \
return htab->collisions; \
} \
\
static inline void HTAB_OP_DEF (T, foreach_elem) (HTAB (T) * htab, \
void (*func) (T el, void *arg), void *arg) { \
htab_ind_t *addr; \
htab_size_t i, size; \
HTAB_EL (T) * els_addr; \
\
HTAB_ASSERT (htab != NULL, "foreach_elem", T); \
els_addr = VARR_ADDR (HTAB_EL (T), htab->els); \
size = VARR_LENGTH (HTAB_EL (T), htab->els); \
for (i = 0; i < htab->els_bound; i++) \
if (els_addr[i].hash != HTAB_DELETED_HASH) func (els_addr[i].el, arg); \
}
#define HTAB_CREATE(T, V, S, H, EQ, A) (HTAB_OP (T, create) (&(V), S, H, EQ, NULL, A))
@ -229,5 +242,6 @@ DEF_VARR (htab_ind_t)
#define HTAB_DO(T, V, EL, A, TAB_EL) (HTAB_OP (T, do) (V, EL, A, &(TAB_EL)))
#define HTAB_ELS_NUM(T, V) (HTAB_OP (T, els_num) (V))
#define HTAB_COLLISIONS(T, V) (HTAB_OP (T, collisions) (V))
#define HTAB_FOREACH_ELEM(T, V, F, A) (HTAB_OP (T, foreach_elem) (V, F, A))
#endif /* #ifndef MIR_HTAB_H */

@ -169,5 +169,7 @@ static inline void MIR_VARR_NO_RETURN mir_varr_error (const char *message) {
#define VARR_PUSH(T, V, O) (VARR_OP (T, push) (V, O))
#define VARR_PUSH_ARR(T, V, A, L) (VARR_OP (T, push_arr) (V, A, L))
#define VARR_POP(T, V) (VARR_OP (T, pop) (V))
#define VARR_FOREACH_ELEM(T, V, I, EL) \
for ((I) = 0; (I) >= VARR_LENGTH (T, V) ? 0 : (EL = VARR_GET (T, V, I), 1); (I)++)
#endif /* #ifndef MIR_VARR_H */

@ -415,15 +415,22 @@ DEF_VARR (reg_desc_t);
DEF_HTAB (size_t);
struct reg_type_cache {
MIR_func_t func;
VARR (MIR_type_t) * types;
};
struct reg_ctx {
VARR (reg_desc_t) * reg_descs;
HTAB (size_t) * namenum2rdn_tab;
HTAB (size_t) * reg2rdn_tab;
struct reg_type_cache reg_type_cache;
};
#define reg_descs ctx->reg_ctx->reg_descs
#define namenum2rdn_tab ctx->reg_ctx->namenum2rdn_tab
#define reg2rdn_tab ctx->reg_ctx->reg2rdn_tab
#define reg_type_cache ctx->reg_ctx->reg_type_cache
static int namenum2rdn_eq (size_t rdn1, size_t rdn2, void *arg) {
MIR_context_t ctx = arg;
@ -465,6 +472,8 @@ static void reg_init (MIR_context_t ctx) {
VARR_PUSH (reg_desc_t, reg_descs, rd); /* for 0 reg */
HTAB_CREATE (size_t, namenum2rdn_tab, 300, namenum2rdn_hash, namenum2rdn_eq, ctx);
HTAB_CREATE (size_t, reg2rdn_tab, 300, reg2rdn_hash, reg2rdn_eq, ctx);
reg_type_cache.func = NULL;
VARR_CREATE (MIR_type_t, reg_type_cache.types, 1000);
}
static MIR_reg_t create_func_reg (MIR_context_t ctx, MIR_func_t func, const char *name,
@ -496,6 +505,7 @@ static void reg_finish (MIR_context_t ctx) {
VARR_DESTROY (reg_desc_t, reg_descs);
HTAB_DESTROY (size_t, namenum2rdn_tab);
HTAB_DESTROY (size_t, reg2rdn_tab);
VARR_DESTROY (MIR_type_t, reg_type_cache.types);
free (ctx->reg_ctx);
ctx->reg_ctx = NULL;
}
@ -1870,7 +1880,20 @@ MIR_reg_t MIR_reg (MIR_context_t ctx, const char *reg_name, MIR_func_t func) {
}
MIR_type_t MIR_reg_type (MIR_context_t ctx, MIR_reg_t reg, MIR_func_t func) {
return get_func_rd_by_reg (ctx, reg, func)->type;
MIR_type_t type;
if (reg_type_cache.func != func) {
reg_type_cache.func = func;
VARR_TRUNC (MIR_type_t, reg_type_cache.types, 0);
}
if (VARR_LENGTH (MIR_type_t, reg_type_cache.types) > reg
&& (type = VARR_GET (MIR_type_t, reg_type_cache.types, reg)) != MIR_T_UNDEF)
return type;
type = get_func_rd_by_reg (ctx, reg, func)->type;
while (VARR_LENGTH (MIR_type_t, reg_type_cache.types) <= reg)
VARR_PUSH (MIR_type_t, reg_type_cache.types, MIR_T_UNDEF);
VARR_SET (MIR_type_t, reg_type_cache.types, reg, type);
return type;
}
const char *MIR_reg_name (MIR_context_t ctx, MIR_reg_t reg, MIR_func_t func) {

@ -34,9 +34,6 @@
*/
static const char Lua_header[] =
//"#ifndef __SIZE_TYPE__\n"
//"#define __SIZE_TYPE__ long long\n"
//"#endif\n"
//"typedef __SIZE_TYPE__ size_t;\n"
"typedef long long size_t;\n"
"typedef long long ptrdiff_t;\n"

Loading…
Cancel
Save