issue #169 update MIR sources from upstream

mir-nonssa
Dibyendu Majumdar 4 years ago
parent 174cfa0168
commit 7f94078e8e

@ -43,11 +43,14 @@
their value
* `MIR_T_F` and `MIR_T_D` -- IEEE single and double precision floating point values
* `MIR_T_LD` - long double values. It is machine-dependent and can be IEEE double, x86 80-bit FP,
or IEEE quad precision FP values
or IEEE quad precision FP values. If it is the same as double, the double type will be used instead.
So please don't expect machine-independence of MIR code working with long double values
* `MIR_T_P` -- pointer values. Depending on the target pointer value is actually 32-bit or 64-bit integer value
* `MIR_T_BLK` -- block data. This type can be used only for argument of function
* `MIR_T_RBLK` -- return block data. This type can be used only for argument of function
* MIR textual representation of the types are correspondingly `i8`,
`u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `f`, `d`, `p`,
and `v`
`u8`, `i16`, `u16`, `i32`, `u32`, `i64`, `u64`, `f`, `d`, `ld`, `p`,
and `blk`
* Function `int MIR_int_type_p (MIR_type_t t)` returns TRUE if given type is an integer one (it includes pointer type too)
* Function `int MIR_fp_type_p (MIR_type_t t)` returns TRUE if given type is a floating point type
@ -78,6 +81,7 @@
only one result, have no arguments, not use any call or any instruction with memory
* The expression function is called during linking and its result is used to initialize the data
* **Memory segment**: `MIR_bss_item` with optional name (`MIR_item_t MIR_new_bss (MIR_context_t ctx, const char *name, size_t len)`)
* Long double data item is changed to double one, if long double coincides with double for given target or ABI
* Names of MIR functions, imports, and prototypes should be unique in a module
* API functions `MIR_output_item (MIR_context_t ctx, FILE *f, MIR_item_t item)`
and `MIR_output_module (MIR_context_t ctx, FILE *f, MIR_module_t module)` output item or module
@ -96,6 +100,7 @@
* A variable should have an unique name in the function
* A variable is represented by a structure of type `MIR_var_t`
* The structure contains variable name and its type
* The structure contains also type size for variable of `MIR_T_BLK` or `MIR_T_RBLK` type
* MIR function with its arguments is created through API function `MIR_item_t MIR_new_func (MIR_context_t ctx, const
char *name, size_t nres, MIR_type_t *res_types, size_t nargs, ...)`
or function `MIR_item_t MIR_new_func_arr (MIR_context_t ctx, const char *name, size_t nres, MIR_type_t *res_types, size_t nargs, MIR_var_t *arg_vars)`
@ -119,6 +124,12 @@
{<insn>}
endfun
```
* Textual presentation of block type argument in `func` has form `blk:<size>(<var_name>)`.
The corresponding argument in `call` insn should have analogous form
`blk:<the same size>(<local var name containing address of passed block data>)`
* Block data are passed by value. How they are exactly passed is machine-defined:
* they are always passed on stack for x86-64, aarch64, and s390x
* they can (partially) passed through registers and on stack for ppc64
* Non-argument function variables are created through API function
`MIR_reg_t MIR_new_func_reg (MIR_context_t ctx, MIR_func_t func, MIR_type_t type, const char *name)`
* The only permitted integer type for the variable is `MIR_T_I64` (or MIR_T_U64???)
@ -139,7 +150,9 @@
`MIR_op_t MIR_new_int_op (MIR_context_t ctx, int64_t v)` and `MIR_op_t MIR_new_uint_op (MIR_context_t ctx, uint64_t v)`
* In MIR text they are represented the same way as C integer numbers (e.g. octal, decimal, hexadecimal ones)
* **Float, double or long double value operands** created through API functions `MIR_op_t MIR_new_float_op (MIR_context_t ctx, float v)`,
`MIR_op_t MIR_new_double_op (MIR_context_t ctx, double v)`, and `MIR_op_t MIR_new_ldouble_op (MIR_context_t ctx, long double v)`
`MIR_op_t MIR_new_double_op (MIR_context_t ctx, double v)`,
and `MIR_op_t MIR_new_ldouble_op (MIR_context_t ctx, long double v)`.
Long double operand is changed to double one when long double coincides with double for given target or ABI
* In MIR text they are represented the same way as C floating point numbers
* **String operands** created through API functions `MIR_op_t MIR_new_str_op (MIR_context_t ctx, MIR_str_t str)`
* In MIR text they are represented by `typedef struct MIR_str {size_t len; const char *s;} MIR_str_t`
@ -184,6 +197,7 @@
* You can not use `MIR_new_insn` for the creation of call and ret insns as these insns have a variable number of operands.
To create such insns you should use `MIR_new_insn_arr` or special functions
`MIR_insn_t MIR_new_call_insn (MIR_context_t ctx, size_t nops, ...)` and `MIR_insn_t MIR_new_ret_insn (MIR_context_t ctx, size_t nops, ...)`
* Long double insns are changed by double ones if long double coincides with double for given target or ABI
* You can get insn name and number of insn operands through API functions
`const char *MIR_insn_name (MIR_context_t ctx, MIR_insn_code_t code)` and `size_t MIR_insn_nops (MIR_context_t ctx, MIR_insn_t insn)`
* You can add a created insn at the beginning or end of function insn list through API functions
@ -386,7 +400,7 @@
* The first insn saves the stack pointer in the operand
* The second insn restores stack pointer from the operand
### MIR_VA_START, MIR_VA_ARG, and MIR_VA_END insns
### MIR_VA_START, MIR_VA_ARG, MIR_VA_STACK_ARG, and MIR_VA_END insns
* These insns are only for variable number arguments functions
* `MIR_VA_START` and `MIR_VA_END` have one input operand, an address
of va_list structure (see C stdarg.h for more details). Unlike C
@ -394,6 +408,9 @@
* `MIR_VA_ARG` takes va_list and any memory operand and returns
address of the next argument in the 1st insn operand. The memory
operand type defines the type of the argument
* `MIR_VA_STACK_ARG` takes va_list and integer operand and returns
address of the next argument passed as a block argument
of the size given by the integer operand
* va_list operand can be memory with undefined type. In this case
address of the va_list is not in the memory but is the
memory address
@ -401,7 +418,7 @@
## MIR API example
* The following code on C creates MIR analog of C code
`int64_t loop (int64_t arg1) {int64_t count = 0; while (count < arg1) count++; return count;}`
```
```c
MIR_module_t m = MIR_new_module (ctx, "m");
MIR_item_t func = MIR_new_func (ctx, "loop", MIR_T_I64, 1, MIR_T_I64, "arg1");
MIR_reg_t COUNT = MIR_new_func_reg (ctx, func->u.func, MIR_T_I64, "count");
@ -423,9 +440,11 @@
MIR_finish_module (ctx);
```
## MIR text example
## MIR text examples
```
* Sieve of eratosthenes:
```mir
m_sieve: module
export sieve
sieve: func i32, i32:N
@ -466,6 +485,29 @@ ex100: func v
endmodule
```
* Example of block arguments and `va_stack_arg`
```mir
m0: module
f_p: proto i64, 16:blk(a), ...
f: func i64, 16:blk(a), ...
local i64:r, i64:va, i64:a2
alloca va, 32 # allocate enough space va_list
va_start va
va_stack_arg a2, va, 16 # get address of the 2nd blk arg
add r, i64:0(a), i64:8(a2)
ret r
main: func
local i64:a, i64:r
alloca a, 16
mov i64:0(a), 42
mov i64:8(a), 24
call f_p, f, r, blk:16(a), blk:16(a)
ret r
endfunc
endmodule
```
## Other MIR API functions
* MIR API can find a lot of errors. They are reported through a
error function of type `void (*MIR_error_func_t) (MIR_context ctx, MIR_error_type_t
@ -564,9 +606,9 @@ ex100: func v
works only on the same targets as MIR generator
# MIR generator (file mir-gen.h)
* Before use of MIR generator you should initialize it by API function `MIR_gen_init (MIR_context ctx)`
* API function `MIR_gen_finish (MIR_context ctx)` should be called last after any generator usage.
It frees all internal generator data
* Before use of MIR generator for given context you should initialize it by API function `MIR_gen_init (MIR_context ctx)`
* API function `MIR_gen_finish (MIR_context ctx)` frees all internal generator data for the context.
If you want to generate code for the context again after the `MIR_gen_finish` call, you should call `MIR_gen_init` again first
* API function `void *MIR_gen (MIR_context ctx, MIR_item_t func_item)` generates machine code of given MIR function
and returns an address to call it. You can call the code as usual C function by using this address
as the called function address

@ -0,0 +1,91 @@
/* This file is a part of MIR project.
Copyright (C) 2018-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
aarch64 call ABI target specific code.
*/
typedef int target_arg_info_t;
static void target_init_arg_vars (c2m_ctx_t c2m_ctx, target_arg_info_t *arg_info) {}
static int target_return_by_addr_p (c2m_ctx_t c2m_ctx, struct type *ret_type) {
return ((ret_type->mode == TM_STRUCT || ret_type->mode == TM_UNION)
&& type_size (c2m_ctx, ret_type) > 2 * 8);
}
static int reg_aggregate_size (c2m_ctx_t c2m_ctx, struct type *type) {
int size;
if (type->mode != TM_STRUCT && type->mode != TM_UNION) return -1;
return (size = type_size (c2m_ctx, type)) <= 2 * 8 ? size : -1;
}
static void target_add_res_proto (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, VARR (MIR_type_t) * res_types,
VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
int size;
if ((size = reg_aggregate_size (c2m_ctx, ret_type)) < 0) {
simple_add_res_proto (c2m_ctx, ret_type, arg_info, res_types, arg_vars);
return;
}
if (size == 0) return;
VARR_PUSH (MIR_type_t, res_types, MIR_T_I64);
if (size > 8) VARR_PUSH (MIR_type_t, res_types, MIR_T_I64);
}
static int target_add_call_res_op (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, size_t call_arg_area_offset) {
MIR_context_t ctx = c2m_ctx->ctx;
int size;
if ((size = reg_aggregate_size (c2m_ctx, ret_type)) < 0)
return simple_add_call_res_op (c2m_ctx, ret_type, arg_info, call_arg_area_offset);
if (size == 0) return -1;
VARR_PUSH (MIR_op_t, call_ops,
MIR_new_reg_op (ctx, get_new_temp (c2m_ctx, MIR_T_I64).mir_op.u.reg));
if (size > 8)
VARR_PUSH (MIR_op_t, call_ops,
MIR_new_reg_op (ctx, get_new_temp (c2m_ctx, MIR_T_I64).mir_op.u.reg));
return size <= 8 ? 1 : 2;
}
static op_t target_gen_post_call_res_code (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res,
MIR_insn_t call, size_t call_ops_start) {
int size;
if ((size = reg_aggregate_size (c2m_ctx, ret_type)) < 0)
return simple_gen_post_call_res_code (c2m_ctx, ret_type, res, call, call_ops_start);
if (size != 0)
gen_multiple_load_store (c2m_ctx, ret_type, &VARR_ADDR (MIR_op_t, call_ops)[call_ops_start + 2],
res.mir_op, FALSE);
return res;
}
static void target_add_ret_ops (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res) {
int i, size;
if ((size = reg_aggregate_size (c2m_ctx, ret_type)) < 0) {
simple_add_ret_ops (c2m_ctx, ret_type, res);
return;
}
assert (res.mir_op.mode == MIR_OP_MEM && VARR_LENGTH (MIR_op_t, ret_ops) == 0 && size <= 2 * 8);
for (i = 0; size > 0; size -= 8, i++)
VARR_PUSH (MIR_op_t, ret_ops, get_new_temp (c2m_ctx, MIR_T_I64).mir_op);
gen_multiple_load_store (c2m_ctx, ret_type, VARR_ADDR (MIR_op_t, ret_ops), res.mir_op, TRUE);
}
static void target_add_arg_proto (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
target_arg_info_t *arg_info, VARR (MIR_var_t) * arg_vars) {
simple_add_arg_proto (c2m_ctx, name, arg_type, arg_info, arg_vars);
}
static void target_add_call_arg_op (c2m_ctx_t c2m_ctx, struct type *arg_type,
target_arg_info_t *arg_info, op_t arg) {
simple_add_call_arg_op (c2m_ctx, arg_type, arg_info, arg);
}
static int target_gen_gather_arg (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
decl_t param_decl, target_arg_info_t *arg_info) {
return FALSE;
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,300 @@
/* This file is a part of MIR project.
Copyright (C) 2018-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
ppc64 call ABI target specific code.
*/
typedef int target_arg_info_t;
static void target_init_arg_vars (c2m_ctx_t c2m_ctx, target_arg_info_t *arg_info) {}
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static MIR_type_t fp_homogeneous_type (c2m_ctx_t c2m_ctx, struct type *param_type, int *num) {
return MIR_T_UNDEF;
}
#else
static MIR_type_t fp_homogeneous_type_1 (c2m_ctx_t c2m_ctx, MIR_type_t curr_type, struct type *type,
int *num) {
int n;
MIR_type_t t;
if (type->mode == TM_STRUCT || type->mode == TM_UNION || type->mode == TM_ARR) {
switch (type->mode) {
case TM_ARR: { /* Arrays are handled as small records. */
struct arr_type *arr_type = type->u.arr_type;
struct expr *cexpr = arr_type->size->attr;
if ((t = fp_homogeneous_type_1 (c2m_ctx, curr_type, type->u.arr_type->el_type, &n))
== MIR_T_UNDEF)
return MIR_T_UNDEF;
*num = arr_type->size->code == N_IGNORE || !cexpr->const_p ? 1 : cexpr->u.i_val;
return t;
}
case TM_STRUCT:
case TM_UNION:
t = curr_type;
*num = 0;
for (node_t el = NL_HEAD (NL_EL (type->u.tag_type->ops, 1)->ops); el != NULL;
el = NL_NEXT (el))
if (el->code == N_MEMBER) {
decl_t decl = el->attr;
if ((t = fp_homogeneous_type_1 (c2m_ctx, t, decl->decl_spec.type, &n)) == MIR_T_UNDEF)
return MIR_T_UNDEF;
if (type->mode == TM_STRUCT)
*num += n;
else if (*num < n)
*num = n;
}
return t;
default: assert (FALSE);
}
}
assert (scalar_type_p (type));
if ((t = get_mir_type (c2m_ctx, type)) != MIR_T_F && t != MIR_T_D) return MIR_T_UNDEF;
if (curr_type != t && curr_type != MIR_T_UNDEF) return MIR_T_UNDEF;
*num = 1;
return t;
}
static MIR_type_t fp_homogeneous_type (c2m_ctx_t c2m_ctx, struct type *param_type, int *num) {
if (param_type->mode != TM_STRUCT && param_type->mode != TM_UNION) return MIR_T_UNDEF;
return fp_homogeneous_type_1 (c2m_ctx, MIR_T_UNDEF, param_type, num);
}
#endif
static int reg_aggregate_p (c2m_ctx_t c2m_ctx, struct type *ret_type) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return FALSE;
#else
return type_size (c2m_ctx, ret_type) <= 2 * 8;
#endif
}
static int target_return_by_addr_p (c2m_ctx_t c2m_ctx, struct type *ret_type) {
MIR_type_t type;
int n;
if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) return FALSE;
if (((type = fp_homogeneous_type (c2m_ctx, ret_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8)
return FALSE;
return !reg_aggregate_p (c2m_ctx, ret_type);
}
static void target_add_res_proto (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, VARR (MIR_type_t) * res_types,
VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
MIR_type_t type;
int i, n, size;
if (void_type_p (ret_type)) return;
if (((type = fp_homogeneous_type (c2m_ctx, ret_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
for (i = 0; i < n; i++) VARR_PUSH (MIR_type_t, res_types, type);
} else if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) {
VARR_PUSH (MIR_type_t, res_types, get_mir_type (c2m_ctx, ret_type));
} else if (reg_aggregate_p (c2m_ctx, ret_type)) {
size = type_size (c2m_ctx, ret_type);
for (; size > 0; size -= 8) VARR_PUSH (MIR_type_t, res_types, MIR_T_I64);
} else {
var.name = RET_ADDR_NAME;
var.type = MIR_T_RBLK;
var.size = type_size (c2m_ctx, ret_type);
VARR_PUSH (MIR_var_t, arg_vars, var);
}
}
static int target_add_call_res_op (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, size_t call_arg_area_offset) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
op_t temp;
int i, n, size;
if (void_type_p (ret_type)) return -1;
if (((type = fp_homogeneous_type (c2m_ctx, ret_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
for (i = 0; i < n; i++) {
temp = get_new_temp (c2m_ctx, type);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
}
return n;
} else if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) {
type = get_mir_type (c2m_ctx, ret_type);
type = promote_mir_int_type (type);
temp = get_new_temp (c2m_ctx, type);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
return 1;
} else if (reg_aggregate_p (c2m_ctx, ret_type)) {
size = type_size (c2m_ctx, ret_type);
if (size == 0) return -1;
for (int s = size; s > 0; s -= 8) {
temp = get_new_temp (c2m_ctx, MIR_T_I64);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
}
return (size + 7) / 8;
} else {
temp = get_new_temp (c2m_ctx, MIR_T_I64);
emit3 (c2m_ctx, MIR_ADD, temp.mir_op,
MIR_new_reg_op (ctx, MIR_reg (ctx, FP_NAME, curr_func->u.func)),
MIR_new_int_op (ctx, call_arg_area_offset));
temp.mir_op
= MIR_new_mem_op (ctx, MIR_T_RBLK, type_size (c2m_ctx, ret_type), temp.mir_op.u.reg, 0, 1);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
return 0;
}
}
static op_t target_gen_post_call_res_code (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res,
MIR_insn_t call, size_t call_ops_start) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_insn_t insn;
int i, n;
if (void_type_p (ret_type)) return res;
if (((type = fp_homogeneous_type (c2m_ctx, ret_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
assert (res.mir_op.mode == MIR_OP_MEM);
for (i = 0; i < n; i++) {
insn = MIR_new_insn (ctx, tp_mov (type),
MIR_new_mem_op (ctx, type,
res.mir_op.u.mem.disp + (type == MIR_T_F ? 4 : 8) * i,
res.mir_op.u.mem.base, res.mir_op.u.mem.index,
res.mir_op.u.mem.scale),
VARR_GET (MIR_op_t, call_ops, i + call_ops_start + 2));
MIR_append_insn (ctx, curr_func, insn);
}
} else if ((ret_type->mode == TM_STRUCT || ret_type->mode == TM_UNION)
&& reg_aggregate_p (c2m_ctx, ret_type)) {
assert (res.mir_op.mode == MIR_OP_MEM); /* addr */
gen_multiple_load_store (c2m_ctx, ret_type, &VARR_ADDR (MIR_op_t, call_ops)[call_ops_start + 2],
res.mir_op, FALSE);
}
return res;
}
static void target_add_ret_ops (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_insn_t insn;
MIR_reg_t ret_addr_reg;
op_t temp, var;
int i, n, size;
if (void_type_p (ret_type)) return;
if (((type = fp_homogeneous_type (c2m_ctx, ret_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
assert (res.mir_op.mode == MIR_OP_MEM);
for (int i = 0; i < n; i++) {
temp = get_new_temp (c2m_ctx, type);
insn = MIR_new_insn (ctx, tp_mov (type), temp.mir_op,
MIR_new_mem_op (ctx, type,
res.mir_op.u.mem.disp + (type == MIR_T_F ? 4 : 8) * i,
res.mir_op.u.mem.base, res.mir_op.u.mem.index,
res.mir_op.u.mem.scale));
MIR_append_insn (ctx, curr_func, insn);
VARR_PUSH (MIR_op_t, ret_ops, temp.mir_op);
}
} else if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) {
VARR_PUSH (MIR_op_t, ret_ops, res.mir_op);
} else if (reg_aggregate_p (c2m_ctx, ret_type)) {
size = type_size (c2m_ctx, ret_type);
assert (res.mir_op.mode == MIR_OP_MEM && VARR_LENGTH (MIR_op_t, ret_ops) == 0);
for (int i = 0; size > 0; size -= 8, i++)
VARR_PUSH (MIR_op_t, ret_ops, get_new_temp (c2m_ctx, MIR_T_I64).mir_op);
gen_multiple_load_store (c2m_ctx, ret_type, &VARR_ADDR (MIR_op_t, ret_ops)[0], res.mir_op,
TRUE);
} else {
ret_addr_reg = MIR_reg (ctx, RET_ADDR_NAME, curr_func->u.func);
var = new_op (NULL, MIR_new_mem_op (ctx, MIR_T_I8, 0, ret_addr_reg, 0, 1));
size = type_size (c2m_ctx, ret_type);
block_move (c2m_ctx, var, res, size);
}
}
static void target_add_arg_proto (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
target_arg_info_t *arg_info, VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
MIR_type_t type;
int n;
if (((type = fp_homogeneous_type (c2m_ctx, arg_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
for (int i = 0; i < n; i++) {
var.name = gen_get_indexed_name (c2m_ctx, name, i);
var.type = type;
VARR_PUSH (MIR_var_t, arg_vars, var);
}
return;
}
type = (arg_type->mode == TM_STRUCT || arg_type->mode == TM_UNION
? MIR_T_BLK
: get_mir_type (c2m_ctx, arg_type));
var.name = name;
var.type = type;
if (type == MIR_T_BLK) var.size = type_size (c2m_ctx, arg_type);
VARR_PUSH (MIR_var_t, arg_vars, var);
}
static void target_add_call_arg_op (c2m_ctx_t c2m_ctx, struct type *arg_type,
target_arg_info_t *arg_info, op_t arg) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_var_t var;
MIR_type_t type;
op_t temp;
int n;
if (((type = fp_homogeneous_type (c2m_ctx, arg_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
assert (arg.mir_op.mode == MIR_OP_MEM);
arg = mem_to_address (c2m_ctx, arg, TRUE);
for (int i = 0; i < n; i++) {
temp = get_new_temp (c2m_ctx, type);
MIR_append_insn (ctx, curr_func,
MIR_new_insn (ctx, tp_mov (type), temp.mir_op,
MIR_new_mem_op (ctx, type, (type == MIR_T_F ? 4 : 8) * i,
arg.mir_op.u.reg, 0, 1)));
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
}
return;
}
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) {
VARR_PUSH (MIR_op_t, call_ops, arg.mir_op);
} else {
assert (arg.mir_op.mode == MIR_OP_MEM);
arg = mem_to_address (c2m_ctx, arg, TRUE);
VARR_PUSH (MIR_op_t, call_ops,
MIR_new_mem_op (ctx, MIR_T_BLK, type_size (c2m_ctx, arg_type), arg.mir_op.u.reg, 0,
1));
}
}
static int target_gen_gather_arg (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
decl_t param_decl, target_arg_info_t *arg_info) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_var_t var;
MIR_type_t type;
reg_var_t reg_var;
int i, n;
if (((type = fp_homogeneous_type (c2m_ctx, arg_type, &n)) == MIR_T_F || type == MIR_T_D)
&& n <= 8) {
for (i = 0; i < n; i++) {
assert (!param_decl->reg_p);
reg_var = get_reg_var (c2m_ctx, type, gen_get_indexed_name (c2m_ctx, name, i));
MIR_append_insn (ctx, curr_func,
MIR_new_insn (ctx, tp_mov (type),
MIR_new_mem_op (ctx, type,
param_decl->offset
+ (type == MIR_T_F ? 4 : 8) * i,
MIR_reg (ctx, FP_NAME, curr_func->u.func), 0,
1),
MIR_new_reg_op (ctx, reg_var.reg)));
}
return TRUE;
}
return FALSE;
}

@ -0,0 +1,94 @@
/* This file is a part of MIR project.
Copyright (C) 2018-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
s390x call ABI target specific code.
*/
typedef int target_arg_info_t;
static void target_init_arg_vars (c2m_ctx_t c2m_ctx, target_arg_info_t *arg_info) {}
static int target_return_by_addr_p (c2m_ctx_t c2m_ctx, struct type *ret_type) {
return simple_return_by_addr_p (c2m_ctx, ret_type);
}
static void target_add_res_proto (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, VARR (MIR_type_t) * res_types,
VARR (MIR_var_t) * arg_vars) {
simple_add_res_proto (c2m_ctx, ret_type, arg_info, res_types, arg_vars);
}
static int target_add_call_res_op (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, size_t call_arg_area_offset) {
return simple_add_call_res_op (c2m_ctx, ret_type, arg_info, call_arg_area_offset);
}
static op_t target_gen_post_call_res_code (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res,
MIR_insn_t call, size_t call_ops_start) {
return simple_gen_post_call_res_code (c2m_ctx, ret_type, res, call, call_ops_start);
}
static void target_add_ret_ops (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res) {
simple_add_ret_ops (c2m_ctx, ret_type, res);
}
static int reg_aggregate_p (c2m_ctx_t c2m_ctx, struct type *arg_type) {
size_t size = type_size (c2m_ctx, arg_type);
return size == 1 || size == 2 || size == 4 || size == 8;
}
static void target_add_arg_proto (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
target_arg_info_t *arg_info, VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
MIR_type_t type;
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION)
type = get_mir_type (c2m_ctx, arg_type);
else if (reg_aggregate_p (c2m_ctx, arg_type))
type = MIR_T_I64;
else
type = MIR_T_BLK;
var.name = name;
var.type = type;
if (type == MIR_T_BLK) var.size = type_size (c2m_ctx, arg_type);
VARR_PUSH (MIR_var_t, arg_vars, var);
}
static void target_add_call_arg_op (c2m_ctx_t c2m_ctx, struct type *arg_type,
target_arg_info_t *arg_info, op_t arg) {
op_t temp;
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) {
VARR_PUSH (MIR_op_t, call_ops, arg.mir_op);
} else if (reg_aggregate_p (c2m_ctx, arg_type)) {
assert (arg.mir_op.mode == MIR_OP_MEM);
temp = get_new_temp (c2m_ctx, MIR_T_I64);
gen_multiple_load_store (c2m_ctx, arg_type, &temp.mir_op, arg.mir_op, TRUE);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
} else {
assert (arg.mir_op.mode == MIR_OP_MEM);
arg = mem_to_address (c2m_ctx, arg, TRUE);
VARR_PUSH (MIR_op_t, call_ops,
MIR_new_mem_op (c2m_ctx->ctx, MIR_T_BLK, type_size (c2m_ctx, arg_type),
arg.mir_op.u.reg, 0, 1));
}
}
static int target_gen_gather_arg (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
decl_t param_decl, target_arg_info_t *arg_info) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_op_t param_op;
reg_var_t reg_var;
if ((arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION)
|| !reg_aggregate_p (c2m_ctx, arg_type))
return FALSE;
assert (!param_decl->reg_p);
reg_var = get_reg_var (c2m_ctx, MIR_T_I64, name);
param_op = MIR_new_reg_op (ctx, reg_var.reg);
gen_multiple_load_store (c2m_ctx, arg_type, &param_op,
MIR_new_mem_op (ctx, MIR_T_UNDEF, param_decl->offset,
MIR_reg (ctx, FP_NAME, curr_func->u.func), 0, 1),
FALSE);
return TRUE;
}

@ -3,59 +3,57 @@
x86_64 ABI target specific code.
*/
#define ATYPICAL_CALL_ABI
/* See https://github.com/hjl-tools/x86-psABI/wiki/x86-64-psABI-1.0.pdf. We use MIR_T_UNDEF for
MEMORY. */
enum { NO_CLASS = MIR_T_BOUND + 1, X87UP_CLASS };
enum add_arg_class { NO_CLASS = MIR_T_BOUND + 1, X87UP_CLASS };
#define MAX_QWORDS 8
#ifndef _WIN64
#define MAX_QWORDS 2
#else
#define MAX_QWORDS 1
#endif
static MIR_type_t get_result_type (MIR_type_t arg_type1, MIR_type_t arg_type2) {
if (arg_type1 == arg_type2) return arg_type1;
if (arg_type1 == NO_CLASS) return arg_type2;
if (arg_type2 == NO_CLASS) return arg_type1;
if ((enum add_arg_class) arg_type1 == NO_CLASS) return arg_type2;
if ((enum add_arg_class) arg_type2 == NO_CLASS) return arg_type1;
if (arg_type1 == MIR_T_UNDEF || arg_type2 == MIR_T_UNDEF) return MIR_T_UNDEF;
if ((arg_type1 == MIR_T_I32 && arg_type2 == MIR_T_F)
|| (arg_type2 == MIR_T_I32 && arg_type1 == MIR_T_F))
return MIR_T_I32;
if (arg_type1 == MIR_T_I64 || arg_type1 == MIR_T_I32 || arg_type2 == MIR_T_I64
|| arg_type2 == MIR_T_I32)
return MIR_T_I64;
if (arg_type1 == MIR_T_LD || arg_type2 == MIR_T_LD || arg_type1 == X87UP_CLASS
|| arg_type2 == X87UP_CLASS)
if (arg_type1 == MIR_T_LD || arg_type2 == MIR_T_LD
|| (enum add_arg_class) arg_type1 == X87UP_CLASS
|| (enum add_arg_class) arg_type2 == X87UP_CLASS)
return MIR_T_UNDEF;
return MIR_T_D;
}
static int classify_arg (MIR_context_t ctx, struct type *type, MIR_type_t types[MAX_QWORDS],
mir_size_t offset, int bit_field_p) {
c2m_ctx_t c2m_ctx = *c2m_ctx_loc (ctx);
static int classify_arg (c2m_ctx_t c2m_ctx, struct type *type, MIR_type_t types[MAX_QWORDS],
int bit_field_p) {
size_t size = type_size (c2m_ctx, type);
int i, n_el_qwords, n_qwords = (size + 7) / 8;
MIR_type_t mir_type;
if (type->mode == TM_STRUCT || type->mode == TM_UNION || type->mode == TM_STRUCT) {
if (type->mode == TM_STRUCT || type->mode == TM_UNION || type->mode == TM_ARR) {
MIR_type_t subtypes[MAX_QWORDS];
if (n_qwords > 8) return 0; /* too big aggregate */
if (n_qwords > MAX_QWORDS) return 0; /* too big aggregate */
#ifndef _WIN64
for (i = 0; i < n_qwords; i++) types[i] = NO_CLASS;
switch (type->mode) {
case TM_ARR: { /* Arrays are handled as small records. */
n_el_qwords = classify_arg (ctx, type->u.arr_type->el_type, subtypes, 0, FALSE);
n_el_qwords = classify_arg (c2m_ctx, type->u.arr_type->el_type, subtypes, FALSE);
if (n_el_qwords == 0) return 0;
/* make full types: */
if (subtypes[0] == MIR_T_F && size != 4) subtypes[0] = MIR_T_D;
if (subtypes[0] == MIR_T_I32 && (bit_field_p || size != 4)) subtypes[0] = MIR_T_I64;
for (i = 0; i < n_qwords; i++) types[i] = subtypes[i % n_el_qwords];
for (i = 0; i < n_qwords; i++)
types[i] = get_result_type (types[i], subtypes[i % n_el_qwords]);
break;
}
case TM_STRUCT:
@ -64,14 +62,13 @@ static int classify_arg (MIR_context_t ctx, struct type *type, MIR_type_t types[
el = NL_NEXT (el))
if (el->code == N_MEMBER) {
decl_t decl = el->attr;
int start_qword = (offset + decl->offset) / 8;
int start_qword = decl->offset / 8;
if (decl->bit_offset >= 0) {
types[start_qword] = get_result_type (MIR_T_I64, types[start_qword]);
} else {
n_el_qwords = classify_arg (ctx, decl->decl_spec.type, subtypes,
offset + (type->mode == TM_STRUCT ? decl->offset : 0),
decl->bit_offset >= 0);
n_el_qwords
= classify_arg (c2m_ctx, decl->decl_spec.type, subtypes, decl->bit_offset >= 0);
if (n_el_qwords == 0) return 0;
for (i = 0; i < n_el_qwords && (i + start_qword) < n_qwords; i++)
types[i + start_qword] = get_result_type (subtypes[i], types[i + start_qword]);
@ -85,27 +82,25 @@ static int classify_arg (MIR_context_t ctx, struct type *type, MIR_type_t types[
for (i = 0; i < n_qwords; i++) {
if (types[i] == MIR_T_UNDEF) return 0; /* pass in memory if a word class is memory. */
if (types[i] == X87UP_CLASS && (i == 0 || types[i - 1] != MIR_T_LD)) return 0;
if ((enum add_arg_class) types[i] == X87UP_CLASS && (i == 0 || types[i - 1] != MIR_T_LD))
return 0;
}
return n_qwords;
#else
types[0] = MIR_T_I64;
return 1;
#endif
}
assert (scalar_type_p (type));
switch (mir_type = get_mir_type (ctx, type)) {
case MIR_T_F: types[0] = offset % 8 != 0 ? MIR_T_D : MIR_T_F; return 1;
switch (mir_type = get_mir_type (c2m_ctx, type)) {
case MIR_T_F:
case MIR_T_D: types[0] = MIR_T_D; return 1;
case MIR_T_LD:
types[0] = MIR_T_LD;
types[1] = X87UP_CLASS;
return 2;
default:
if (!bit_field_p && offset % 8 + size <= 4) {
types[0] = MIR_T_I32;
} else {
assert (size <= 8);
types[0] = MIR_T_I64;
}
return 1;
default: types[0] = MIR_T_I64; return 1;
}
}
@ -113,12 +108,11 @@ typedef struct target_arg_info {
int n_iregs, n_fregs;
} target_arg_info_t;
static void target_init_arg_vars (MIR_context_t ctx, target_arg_info_t *arg_info) {
static void target_init_arg_vars (c2m_ctx_t c2m_ctx, target_arg_info_t *arg_info) {
arg_info->n_iregs = arg_info->n_fregs = 0;
}
static const char *qword_name (MIR_context_t ctx, const char *name, int num) {
c2m_ctx_t c2m_ctx = *c2m_ctx_loc (ctx);
static const char *qword_name (c2m_ctx_t c2m_ctx, const char *name, int num) {
char prefix[50];
sprintf (prefix, "Q%u_", num);
@ -128,22 +122,35 @@ static const char *qword_name (MIR_context_t ctx, const char *name, int num) {
return uniq_cstr (c2m_ctx, VARR_ADDR (char, temp_string)).s;
}
static void target_add_res (MIR_context_t ctx, struct func_type *func_type,
target_arg_info_t *arg_info) {
MIR_var_t var;
static void update_last_qword_type (c2m_ctx_t c2m_ctx, struct type *type,
MIR_type_t qword_types[MAX_QWORDS], int n) {
size_t last_size, size = type_size (c2m_ctx, type);
MIR_type_t mir_type;
assert (n != 0);
if ((last_size = size % 8) == 0 || n > 1) return;
mir_type = qword_types[n - 1];
if (last_size <= 4 && mir_type == MIR_T_D) qword_types[n - 1] = MIR_T_F;
if (last_size <= 4 && mir_type == MIR_T_I64)
qword_types[n - 1] = last_size <= 1 ? MIR_T_I8 : last_size <= 2 ? MIR_T_I16 : MIR_T_I32;
}
static int process_ret_type (c2m_ctx_t c2m_ctx, struct type *ret_type,
MIR_type_t qword_types[MAX_QWORDS]) {
MIR_type_t type;
c2m_ctx_t c2m_ctx = *c2m_ctx_loc (ctx);
MIR_type_t qword_types[MAX_QWORDS];
int n_iregs, n_fregs, n_stregs, n, n_qwords, curr;
int n, n_iregs, n_fregs, n_stregs, curr;
int n_qwords = classify_arg (c2m_ctx, ret_type, qword_types, FALSE);
if (void_type_p (func_type->ret_type)) return;
n_qwords = classify_arg (ctx, func_type->ret_type, qword_types, 0, FALSE);
if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) return 0;
if (n_qwords != 0) {
update_last_qword_type (c2m_ctx, ret_type, qword_types, n_qwords);
n_iregs = n_fregs = n_stregs = curr = 0;
for (n = 0; n < n_qwords; n++) { /* start from the last qword */
type = qword_types[n];
qword_types[curr++] = type;
switch (type) {
switch ((int) type) {
case MIR_T_I8:
case MIR_T_I16:
case MIR_T_I32:
case MIR_T_I64: n_iregs++; break;
case MIR_T_F:
@ -153,78 +160,265 @@ static void target_add_res (MIR_context_t ctx, struct func_type *func_type,
n_qwords--;
curr--;
break;
case NO_CLASS:
case MIR_T_UNDEF: assert (FALSE);
default: assert (FALSE);
}
}
if (n_iregs > 2 || n_fregs > 2 || n_stregs > 1) {
n_qwords = 0;
}
if (n_iregs > 2 || n_fregs > 2 || n_stregs > 1) n_qwords = 0;
}
return n_qwords;
}
static int target_return_by_addr_p (c2m_ctx_t c2m_ctx, struct type *ret_type) {
MIR_type_t qword_types[MAX_QWORDS];
int n_qwords;
if (void_type_p (ret_type)) return FALSE;
n_qwords = process_ret_type (c2m_ctx, ret_type, qword_types);
return n_qwords == 0 && (ret_type->mode == TM_STRUCT || ret_type->mode == TM_UNION);
}
static void target_add_res_proto (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, VARR (MIR_type_t) * res_types,
VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
MIR_type_t type;
MIR_type_t qword_types[MAX_QWORDS];
int n, n_qwords;
proto_info.res_ref_p = FALSE;
if (n_qwords == 0) { /* return by reference */
if (void_type_p (ret_type)) return;
n_qwords = process_ret_type (c2m_ctx, ret_type, qword_types);
if (n_qwords != 0) {
for (n = 0; n < n_qwords; n++)
VARR_PUSH (MIR_type_t, res_types, promote_mir_int_type (qword_types[n]));
} else if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) {
type = get_mir_type (c2m_ctx, ret_type);
VARR_PUSH (MIR_type_t, res_types, type);
} else { /* return by reference */
var.name = RET_ADDR_NAME;
var.type = MIR_POINTER_TYPE;
VARR_PUSH (MIR_var_t, proto_info.arg_vars, var);
proto_info.res_ref_p = TRUE;
var.type = MIR_T_RBLK;
var.size = type_size (c2m_ctx, ret_type);
VARR_PUSH (MIR_var_t, arg_vars, var);
arg_info->n_iregs++;
return;
}
}
static int target_add_call_res_op (c2m_ctx_t c2m_ctx, struct type *ret_type,
target_arg_info_t *arg_info, size_t call_arg_area_offset) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_type_t qword_types[MAX_QWORDS];
op_t temp;
int i, n_qwords;
if (void_type_p (ret_type)) return -1;
n_qwords = process_ret_type (c2m_ctx, ret_type, qword_types);
if (n_qwords != 0) {
for (i = 0; i < n_qwords; i++) {
temp = get_new_temp (c2m_ctx, promote_mir_int_type (qword_types[i]));
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
}
return n_qwords;
} else if (ret_type->mode == TM_STRUCT || ret_type->mode == TM_UNION) { /* return by reference */
arg_info->n_iregs++;
temp = get_new_temp (c2m_ctx, MIR_T_I64);
emit3 (c2m_ctx, MIR_ADD, temp.mir_op,
MIR_new_reg_op (ctx, MIR_reg (ctx, FP_NAME, curr_func->u.func)),
MIR_new_int_op (ctx, call_arg_area_offset));
temp.mir_op
= MIR_new_mem_op (ctx, MIR_T_RBLK, type_size (c2m_ctx, ret_type), temp.mir_op.u.reg, 0, 1);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
return 0;
} else {
for (n = 0; n < n_qwords; n++) VARR_PUSH (MIR_type_t, proto_info.ret_types, qword_types[n]);
type = get_mir_type (c2m_ctx, ret_type);
type = promote_mir_int_type (type);
temp = get_new_temp (c2m_ctx, type);
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
return 1;
}
}
static void target_add_param (MIR_context_t ctx, const char *name, struct type *param_type,
decl_t param_decl, target_arg_info_t *arg_info) {
MIR_var_t var;
static op_t target_gen_post_call_res_code (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res,
MIR_insn_t call, size_t call_ops_start) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
c2m_ctx_t c2m_ctx = *c2m_ctx_loc (ctx);
MIR_insn_t insn;
MIR_type_t qword_types[MAX_QWORDS];
int n_iregs, n_fregs, n;
int n_qwords = classify_arg (ctx, param_type, qword_types, 0, FALSE);
int i, n_qwords;
if (void_type_p (ret_type)) return res;
n_qwords = process_ret_type (c2m_ctx, ret_type, qword_types);
if (n_qwords != 0) {
n_iregs = n_fregs = 0;
for (n = 0; n < n_qwords; n++) { /* start from the last qword */
switch ((type = qword_types[n])) {
case MIR_T_I32:
case MIR_T_I64: n_iregs++; break;
case MIR_T_F:
case MIR_T_D: n_fregs++; break;
case X87UP_CLASS:
case MIR_T_LD: n_qwords = 0; goto pass_by_ref;
case NO_CLASS:
case MIR_T_UNDEF: assert (FALSE);
}
assert (res.mir_op.mode == MIR_OP_MEM);
for (i = 0; i < n_qwords; i++) {
type = qword_types[i];
insn = MIR_new_insn (ctx, tp_mov (type),
MIR_new_mem_op (ctx, type, res.mir_op.u.mem.disp + 8 * i,
res.mir_op.u.mem.base, res.mir_op.u.mem.index,
res.mir_op.u.mem.scale),
VARR_GET (MIR_op_t, call_ops, i + call_ops_start + 2));
MIR_append_insn (ctx, curr_func, insn);
}
if (arg_info->n_iregs + n_iregs > 6 || arg_info->n_fregs + n_fregs > 8) {
n_qwords = 0;
} else { /* aggregate passed by value: */
arg_info->n_iregs += n_iregs;
arg_info->n_fregs += n_fregs;
if (param_decl != NULL) {
param_decl->param_args_num = n_iregs + n_fregs;
param_decl->param_args_start = VARR_LENGTH (MIR_var_t, proto_info.arg_vars);
}
for (n = 0; n < n_qwords; n++) {
var.name = qword_name (ctx, name, n);
var.type = qword_types[n];
VARR_PUSH (MIR_var_t, proto_info.arg_vars, var);
}
}
return res;
}
static void target_add_ret_ops (c2m_ctx_t c2m_ctx, struct type *ret_type, op_t res) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_type_t qword_types[MAX_QWORDS];
MIR_insn_t insn;
MIR_reg_t ret_addr_reg;
op_t temp, var;
int i, size, n_qwords;
if (void_type_p (ret_type)) return;
n_qwords = process_ret_type (c2m_ctx, ret_type, qword_types);
if (n_qwords != 0) {
for (i = 0; i < n_qwords; i++) {
type = qword_types[i];
temp = get_new_temp (c2m_ctx, promote_mir_int_type (type));
insn = MIR_new_insn (ctx, tp_mov (type), temp.mir_op,
MIR_new_mem_op (ctx, type, res.mir_op.u.mem.disp + 8 * i,
res.mir_op.u.mem.base, res.mir_op.u.mem.index,
res.mir_op.u.mem.scale));
MIR_append_insn (ctx, curr_func, insn);
VARR_PUSH (MIR_op_t, ret_ops, temp.mir_op);
}
} else if (ret_type->mode != TM_STRUCT && ret_type->mode != TM_UNION) {
VARR_PUSH (MIR_op_t, ret_ops, res.mir_op);
} else {
ret_addr_reg = MIR_reg (ctx, RET_ADDR_NAME, curr_func->u.func);
var = new_op (NULL, MIR_new_mem_op (ctx, MIR_T_I8, 0, ret_addr_reg, 0, 1));
size = type_size (c2m_ctx, ret_type);
block_move (c2m_ctx, var, res, size);
}
}
static int process_aggregate_arg (c2m_ctx_t c2m_ctx, struct type *arg_type,
target_arg_info_t *arg_info, MIR_type_t qword_types[MAX_QWORDS]) {
MIR_type_t type;
int n, n_iregs, n_fregs, n_qwords = classify_arg (c2m_ctx, arg_type, qword_types, FALSE);
if (n_qwords == 0) return 0;
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) return 0;
update_last_qword_type (c2m_ctx, arg_type, qword_types, n_qwords);
n_iregs = n_fregs = 0;
for (n = 0; n < n_qwords; n++) { /* start from the last qword */
switch ((int) (type = qword_types[n])) {
case MIR_T_I8:
case MIR_T_I16:
case MIR_T_I32:
case MIR_T_I64: n_iregs++; break;
case MIR_T_F:
case MIR_T_D: n_fregs++; break;
case X87UP_CLASS:
case MIR_T_LD: return 0;
default: assert (FALSE);
}
}
if (arg_info->n_iregs + n_iregs > 6 || arg_info->n_fregs + n_fregs > 8) return 0;
/* aggregate passed by value: update arg_info */
arg_info->n_iregs += n_iregs;
arg_info->n_fregs += n_fregs;
return n_qwords;
}
static void target_add_arg_proto (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
target_arg_info_t *arg_info, VARR (MIR_var_t) * arg_vars) {
MIR_var_t var;
MIR_type_t type;
MIR_type_t qword_types[MAX_QWORDS];
int n, n_qwords = process_aggregate_arg (c2m_ctx, arg_type, arg_info, qword_types);
if (n_qwords != 0) {
for (n = 0; n < n_qwords; n++) {
var.name = qword_name (c2m_ctx, name, n);
var.type = promote_mir_int_type (qword_types[n]);
VARR_PUSH (MIR_var_t, arg_vars, var);
}
return;
}
pass_by_ref:
if (n_qwords == 0) { /* pass by ref for aggregates and pass by value for others: */
type = (param_type->mode == TM_STRUCT || param_type->mode == TM_UNION
? MIR_POINTER_TYPE
: get_mir_type (ctx, param_type));
var.name = name;
/* pass aggregates on the stack and pass by value for others: */
var.name = name;
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) {
type = get_mir_type (c2m_ctx, arg_type);
var.type = type;
VARR_PUSH (MIR_var_t, proto_info.arg_vars, var);
if (type == MIR_T_F || type == MIR_T_D)
arg_info->n_fregs += n_fregs;
arg_info->n_fregs++;
else if (type != MIR_T_LD)
arg_info->n_iregs += n_iregs;
arg_info->n_iregs++;
} else {
var.type = MIR_T_BLK;
var.size = type_size (c2m_ctx, arg_type);
}
VARR_PUSH (MIR_var_t, arg_vars, var);
}
static void target_add_call_arg_op (c2m_ctx_t c2m_ctx, struct type *arg_type,
target_arg_info_t *arg_info, op_t arg) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
MIR_type_t qword_types[MAX_QWORDS];
op_t temp;
int n, n_qwords = process_aggregate_arg (c2m_ctx, arg_type, arg_info, qword_types);
if (n_qwords != 0) {
assert (arg.mir_op.mode == MIR_OP_MEM);
arg = mem_to_address (c2m_ctx, arg, TRUE);
for (n = 0; n < n_qwords; n++) {
type = qword_types[n];
temp = get_new_temp (c2m_ctx, promote_mir_int_type (type));
MIR_append_insn (ctx, curr_func,
MIR_new_insn (ctx, tp_mov (type), temp.mir_op,
MIR_new_mem_op (ctx, type, 8 * n, arg.mir_op.u.reg, 0, 1)));
VARR_PUSH (MIR_op_t, call_ops, temp.mir_op);
}
return;
}
/* pass aggregates on the stack and pass by value for others: */
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) {
type = get_mir_type (c2m_ctx, arg_type);
VARR_PUSH (MIR_op_t, call_ops, arg.mir_op);
if (type == MIR_T_F || type == MIR_T_D)
arg_info->n_fregs++;
else if (type != MIR_T_LD)
arg_info->n_iregs++;
} else {
assert (arg.mir_op.mode == MIR_OP_MEM);
arg = mem_to_address (c2m_ctx, arg, TRUE);
VARR_PUSH (MIR_op_t, call_ops,
MIR_new_mem_op (ctx, MIR_T_BLK, type_size (c2m_ctx, arg_type), arg.mir_op.u.reg, 0,
1));
}
}
static int target_gen_gather_arg (c2m_ctx_t c2m_ctx, const char *name, struct type *arg_type,
decl_t param_decl, target_arg_info_t *arg_info) {
MIR_context_t ctx = c2m_ctx->ctx;
MIR_type_t type;
reg_var_t reg_var;
MIR_type_t qword_types[MAX_QWORDS];
int i, n_qwords = process_aggregate_arg (c2m_ctx, arg_type, arg_info, qword_types);
if (arg_type->mode != TM_STRUCT && arg_type->mode != TM_UNION) {
assert (n_qwords == 0);
type = get_mir_type (c2m_ctx, arg_type);
if (type == MIR_T_F || type == MIR_T_D)
arg_info->n_fregs++;
else if (type != MIR_T_LD)
arg_info->n_iregs++;
return FALSE;
}
if (n_qwords == 0) return FALSE;
for (i = 0; i < n_qwords; i++) {
assert (!param_decl->reg_p);
type = qword_types[i];
reg_var = get_reg_var (c2m_ctx, type, qword_name (c2m_ctx, name, i));
MIR_append_insn (ctx, curr_func,
MIR_new_insn (ctx, tp_mov (type),
MIR_new_mem_op (ctx, type, param_decl->offset + 8 * i,
MIR_reg (ctx, FP_NAME, curr_func->u.func), 0, 1),
MIR_new_reg_op (ctx, reg_var.reg)));
}
return TRUE;
}

@ -3,7 +3,13 @@
*/
#include "../mirc.h"
#ifndef _WIN32
#include "mirc-x86_64-linux.h"
#else
#include "mirc-x86_64-win.h"
#endif
static const char *standard_includes[] = {mirc, x86_64_mirc};

@ -0,0 +1,101 @@
/* This file is a part of MIR project.
Copyright (C) 2019-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
*/
static char x86_64_mirc[]
= "#define __amd64 1\n"
"#define __amd64__ 1\n"
"#define __x86_64 1\n"
"#define __x86_64__ 1\n"
"\n"
"#define __SIZEOF_DOUBLE__ 8\n"
"#define __SIZEOF_FLOAT__ 4\n"
"#define __SIZEOF_INT__ 4\n"
#if __SIZEOF_LONG_DOUBLE__ == 16
"#define __SIZEOF_LONG_DOUBLE__ 16\n"
#else
"#define __SIZEOF_LONG_DOUBLE__ 8\n"
#endif
"#define __SIZEOF_LONG_LONG__ 8\n"
"#define __SIZEOF_LONG__ 4\n"
"#define __SIZEOF_POINTER__ 8\n"
"#define __SIZEOF_PTRDIFF_T__ 8\n"
"#define __SIZEOF_SHORT__ 2\n"
"#define __SIZEOF_SIZE_T__ 8\n"
"\n"
"#define __BYTE_ORDER__ 1234\n"
"#define __ORDER_LITTLE_ENDIAN__ 1234\n"
"#define __ORDER_BIG_ENDIAN__ 4321\n"
"\n"
"/* Some type macros: */\n"
"#define __SIZE_TYPE__ long long unsigned int\n"
"#define __PTRDIFF_TYPE__ long long int\n"
"#define __INTMAX_TYPE__ long long int\n"
"#define __UINTMAX_TYPE__ long long unsigned int\n"
"#define __INT8_TYPE__ signed char\n"
"#define __INT16_TYPE__ short\n"
"#define __INT32_TYPE__ int\n"
"#define __INT64_TYPE__ long long int\n"
"#define __UINT8_TYPE__ unsigned char\n"
"#define __UINT16_TYPE__ unsigned short\n"
"#define __UINT32_TYPE__ unsigned int\n"
"#define __UINT64_TYPE__ long long unsigned int\n"
"#define __INTPTR_TYPE__ long long int\n"
"#define __UINTPTR_TYPE__ long long unsigned int\n"
"\n"
"#define __CHAR_BIT__ 8\n"
"#define __INT8_MAX__ 127\n"
"#define __INT16_MAX__ 32767\n"
"#define __INT32_MAX__ 2147483647\n"
"#define __INT64_MAX__ 9223372036854775807LL\n"
"#define __UINT8_MAX__ (__INT8_MAX__ * 2u + 1u)\n"
"#define __UINT16_MAX__ (__INT16_MAX__ * 2u + 1u)\n"
"#define __UINT32_MAX__ (__INT32_MAX__ * 2u + 1u)\n"
"#define __UINT64_MAX__ (__INT64_MAX__ * 2u + 1u)\n"
"#define __SCHAR_MAX__ __INT8_MAX__\n"
"#define __SHRT_MAX__ __INT16_MAX__\n"
"#define __INT_MAX__ __INT32_MAX__\n"
"#define __LONG_MAX__ __INT32_MAX__\n"
"#define __LONG_LONG_MAX__ __INT64_MAX__\n"
"#define __SIZE_MAX__ __UINT64_MAX__\n"
"#define __PTRDIFF_MAX__ __INT64_MAX__\n"
"#define __INTMAX_MAX__ __INT64_MAX__\n"
"#define __UINTMAX_MAX__ __UINT64_MAX__\n"
"#define __INTPTR_MAX__ __INT64_MAX__\n"
"#define __UINTPTR_MAX__ __UINT64_MAX__\n"
"\n"
"#define __FLT_MIN_EXP__ (-125)\n"
"#define __FLT_MAX_EXP__ 128\n"
"#define __FLT_DIG__ 6\n"
"#define __FLT_DECIMAL_DIG__ 9\n"
"#define __FLT_MANT_DIG__ 24\n"
"#define __FLT_MIN__ 1.17549435082228750796873653722224568e-38F\n"
"#define __FLT_MAX__ 3.40282346638528859811704183484516925e+38F\n"
"#define __FLT_EPSILON__ 1.19209289550781250000000000000000000e-7F\n"
"\n"
"#define __DBL_MIN_EXP__ (-1021)\n"
"#define __DBL_MAX_EXP__ 1024\n"
"#define __DBL_DIG__ 15\n"
"#define __DBL_DECIMAL_DIG__ 17\n"
"#define __DBL_MANT_DIG__ 53\n"
"#define __DBL_MAX__ ((double) 1.79769313486231570814527423731704357e+308L)\n"
"#define __DBL_MIN__ ((double) 2.22507385850720138309023271733240406e-308L)\n"
"#define __DBL_EPSILON__ ((double) 2.22044604925031308084726333618164062e-16L)\n"
"\n"
"typedef unsigned short char16_t;\n"
"typedef unsigned int char32_t;\n"
"\n"
"#define WIN32 1\n"
"#define _WIN32 1\n"
"#define __WIN32 1\n"
"#define __WIN32__ 1\n"
"#define WIN64 1\n"
"#define _WIN64 1\n"
"#define __WIN64 1\n"
"#define __WIN64__ 1\n"
"#define WINNT 1\n"
"#define __WINNT 1\n"
"#define __WINNT__ 1\n"
"#define __MSVCRT__ 1\n"
"\n"
"void *alloca (long long unsigned);\n";

@ -4,7 +4,11 @@
#define VA_LIST_IS_ARRAY_P 0
// _MIR_get_thunk, _MIR_redirect_thunk, _MIR_get_interp_shim, _MIR_get_ff_call, _MIR_get_wrapper
/* Small BLK (less or equal to two quadwords) args are passed in
*fully* regs or on stack (w/o address), otherwise it is put
somehwere on stack and its address passed instead. First RBLK arg
is passed in r8. Other RBLK independently of size is always passed
by address as an usual argument. */
void *_MIR_get_bstart_builtin (MIR_context_t ctx) {
static const uint32_t bstart_code[] = {
@ -60,7 +64,28 @@ void *va_arg_builtin (void *p, uint64_t t) {
return a;
}
void *va_stack_arg_builtin (void *p, size_t s) { return *(void **) va_arg_builtin (p, MIR_T_I64); }
void *va_stack_arg_builtin (void *p, size_t s) {
struct aarch64_va_list *va = p;
void *a;
long size = (s + 7) / 8 * 8;
if (size <= 2 * 8 && va->__gr_offs + size > 0) { /* not enough regs to pass: */
a = va->__stack;
va->__stack = (char *) va->__stack + size;
va->__gr_offs += size;
return a;
}
if (size > 2 * 8) size = 8;
if (va->__gr_offs < 0) {
a = (char *) va->__gr_top + va->__gr_offs;
va->__gr_offs += size;
} else {
a = va->__stack;
va->__stack = (char *) va->__stack + size;
}
if (s > 2 * 8) return *(void **) a; /* address */
return a;
}
void va_start_interp_builtin (MIR_context_t ctx, void *p, void *a) {
struct aarch64_va_list *va = p;
@ -154,18 +179,18 @@ void _MIR_redirect_thunk (MIR_context_t ctx, void *thunk, void *to) {
}
static void gen_blk_mov (MIR_context_t ctx, uint32_t offset, uint32_t addr_offset, uint32_t qwords,
uint32_t addr_reg) {
uint32_t addr_reg) {
static const uint32_t blk_mov_pat[] = {
/* 0:*/ 0xf940026c, /* ldr x12, [x19,<addr_offset>]*/
/* 4:*/ 0x910003e0, /* add <addr_reg>, sp, <offset>*/
/* 8:*/ 0xd280000b, /* mov x11, 0*/
/* c:*/ 0xd280000e, /* mov x14, <qwords>*/
/* 10:*/ 0xf86c696a, /* ldr x10, [x11,x12]*/
/* 14:*/ 0xd10005ce, /* sub x14, x14, #0x1*/
/* 18:*/ 0xf820696a, /* str x10, [x11,<addr_reg>x13]*/
/* 1c:*/ 0xf10001df, /* cmp x14, 0*/
/* 20:*/ 0x9100216b, /* add x11, x11, 8*/
/* 24:*/ 0x54ffff61, /* b.ne 10 */
/* 0:*/ 0xf940026c, /* ldr x12, [x19,<addr_offset>]*/
/* 4:*/ 0x910003e0, /* add <addr_reg>, sp, <offset>*/
/* 8:*/ 0xd280000b, /* mov x11, 0*/
/* c:*/ 0xd280000e, /* mov x14, <qwords>*/
/* 10:*/ 0xf86c696a, /* ldr x10, [x11,x12]*/
/* 14:*/ 0xd10005ce, /* sub x14, x14, #0x1*/
/* 18:*/ 0xf820696a, /* str x10, [x11,<addr_reg>x13]*/
/* 1c:*/ 0xf10001df, /* cmp x14, 0*/
/* 20:*/ 0x9100216b, /* add x11, x11, 8*/
/* 24:*/ 0x54ffff61, /* b.ne 10 */
};
if (qwords == 0) {
uint32_t pat = 0x910003e0 | addr_reg | (offset << 10); /* add <add_reg>, sp, <offset>*/
@ -181,23 +206,27 @@ static void gen_blk_mov (MIR_context_t ctx, uint32_t offset, uint32_t addr_offse
}
}
/* save r0-r7, v0-v7 */
static const uint32_t save_insns[] = {
/* save r0-r8,v0-v7 */
0xa9bf1fe6, /* stp R6, R7, [SP, #-16]! */
0xa9bf17e4, /* stp R4, R5, [SP, #-16]! */
0xa9bf0fe2, /* stp R2, R3, [SP, #-16]! */
0xa9bf07e0, /* stp R0, R1, [SP, #-16]! */
0xd10043ff, /* sub SP, SP, #16 */
0xf90007e8, /* str x8, [SP, #8] */
0xadbf1fe6, /* stp Q6, Q7, [SP, #-32]! */
0xadbf17e4, /* stp Q4, Q5, [SP, #-32]! */
0xadbf0fe2, /* stp Q2, Q3, [SP, #-32]! */
0xadbf07e0, /* stp Q0, Q1, [SP, #-32]! */
};
static const uint32_t restore_insns[] = {
/* restore r0-r8,v0-v7 */
0xacc107e0, /* ldp Q0, Q1, SP, #32 */
0xacc10fe2, /* ldp Q2, Q3, SP, #32 */
0xacc117e4, /* ldp Q4, Q5, SP, #32 */
0xacc11fe6, /* ldp Q6, Q7, SP, #32 */
0xf94007e8, /* ldr x8, [SP, #8] */
0x910043ff, /* add SP, SP, #16 */
0xa8c107e0, /* ldp R0, R1, SP, #16 */
0xa8c10fe2, /* ldp R2, R3, SP, #16 */
0xa8c117e4, /* ldp R4, R5, SP, #16 */
@ -211,11 +240,11 @@ static const uint32_t ldld_pat = 0x3dc00260; /* ldr q, [x19], offset */
/* Generation: fun (fun_addr, res_arg_addresses):
push x19, x30; sp-=sp_offset; x9=fun_addr; x19=res/arg_addrs
x8=mem[x19,<offset>]; (arg_reg=mem[x8](or addr of blk copy on the stack)
or x8=mem[x8] or x13=addr of blk copy on the stack;
mem[sp,sp_offset]=x8|x13) ...
x10=mem[x19,<offset>]; (arg_reg=mem[x10](or addr of blk copy on the stack)
or x10=mem[x10] or x13=addr of blk copy on the stack;
mem[sp,sp_offset]=x10|x13) ...
call fun_addr; sp+=offset
x8=mem[x19,<offset>]; res_reg=mem[x8]; ...
x10=mem[x19,<offset>]; res_reg=mem[x10]; ...
pop x19, x30; ret x30. */
void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, size_t nargs,
_MIR_arg_desc_t *arg_descs, int vararg_p) {
@ -233,21 +262,27 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
0xa8c17bf3, /* ldp x19,x30,[sp],16 */
0xd65f03c0, /* ret x30 */
};
static const uint32_t st_pat = 0xf9000000; /* str x, [xn|sp], offset */
static const uint32_t sts_pat = 0xbd000000; /* str s, [xn|sp], offset */
static const uint32_t std_pat = 0xfd000000; /* str d, [xn|sp], offset */
static const uint32_t stld_pat = 0x3d800000; /* str q, [xn|sp], offset */
static const uint32_t gen_ld_pat = 0xf9400000; /* ldr x, [xn|sp], offset */
static const uint32_t st_pat = 0xf9000000; /* str x, [xn|sp], offset */
static const uint32_t sts_pat = 0xbd000000; /* str s, [xn|sp], offset */
static const uint32_t std_pat = 0xfd000000; /* str d, [xn|sp], offset */
static const uint32_t stld_pat = 0x3d800000; /* str q, [xn|sp], offset */
MIR_type_t type;
uint32_t n_xregs = 0, n_vregs = 0, sp_offset = 0, blk_offset = 0, pat, offset_imm, scale;
uint32_t sp = 31, addr_reg, qwords;
uint32_t *addr;
const uint32_t temp_reg = 8; /* x8 or v9 */
const uint32_t temp_reg = 10; /* x10 */
mir_assert (sizeof (long double) == 16);
for (size_t i = 0; i < nargs; i++) { /* caclulate offset for blk params */
type = arg_descs[i].type;
if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P || type == MIR_T_BLK) {
if (n_xregs++ >= 8) blk_offset += 8;
if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P || MIR_blk_type_p (type)) {
if (type == MIR_T_BLK && (qwords = (arg_descs[i].size + 7) / 8) <= 2) {
if (n_xregs + qwords > 8) blk_offset += qwords * 8;
n_xregs += qwords;
} else {
if (n_xregs++ >= 8) blk_offset += 8;
}
} else if (type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD) {
if (n_vregs++ >= 8) blk_offset += type == MIR_T_LD ? 16 : 8;
} else {
@ -264,16 +299,39 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
offset_imm = (((i + nres) * sizeof (long double) << 10)) >> scale;
if (type == MIR_T_BLK) {
qwords = (arg_descs[i].size + 7) / 8;
addr_reg = n_xregs < 8 ? n_xregs : 13;
gen_blk_mov (ctx, blk_offset, (i + nres) * sizeof (long double), qwords, addr_reg);
blk_offset += qwords * 8;
if (n_xregs++ >= 8) {
pat = st_pat | ((sp_offset >> scale) << 10) | addr_reg | (sp << 5);
push_insns (ctx, &pat, sizeof (pat));
sp_offset += 8;
if (qwords <= 2) {
addr_reg = 13;
pat = ld_pat | offset_imm | addr_reg;
push_insns (ctx, &pat, sizeof (pat));
if (n_xregs + qwords <= 8) {
for (int n = 0; n < qwords; n++) {
pat = gen_ld_pat | (((n * 8) >> scale) << 10) | (n_xregs + n) | (addr_reg << 5);
push_insns (ctx, &pat, sizeof (pat));
}
} else {
for (int n = 0; n < qwords; n++) {
pat = gen_ld_pat | (((n * 8) >> scale) << 10) | temp_reg | (addr_reg << 5);
push_insns (ctx, &pat, sizeof (pat));
pat = st_pat | ((sp_offset >> scale) << 10) | temp_reg | (sp << 5);
push_insns (ctx, &pat, sizeof (pat));
sp_offset += 8;
}
}
n_xregs += qwords;
} else {
addr_reg = n_xregs < 8 ? n_xregs : 13;
gen_blk_mov (ctx, blk_offset, (i + nres) * sizeof (long double), qwords, addr_reg);
blk_offset += qwords * 8;
if (n_xregs++ >= 8) {
pat = st_pat | ((sp_offset >> scale) << 10) | addr_reg | (sp << 5);
push_insns (ctx, &pat, sizeof (pat));
sp_offset += 8;
}
}
} else if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P) {
if (n_xregs < 8) {
} else if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P || type == MIR_T_RBLK) {
if (type == MIR_T_RBLK && i == 0) {
pat = ld_pat | offset_imm | 8; /* x8 - hidden result address */
} else if (n_xregs < 8) {
pat = ld_pat | offset_imm | n_xregs++;
} else {
pat = ld_pat | offset_imm | temp_reg;
@ -308,18 +366,19 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
n_xregs = n_vregs = 0;
for (size_t i = 0; i < nres; i++) { /* results */
offset_imm = i * sizeof (long double) << 10;
offset_imm >>= res_types[i] == MIR_T_F ? 2 : res_types[i] == MIR_T_D ? 3 : 4;
if (((MIR_T_I8 <= res_types[i] && res_types[i] <= MIR_T_U64) || res_types[i] == MIR_T_P)
&& n_xregs < 8) {
offset_imm >>= 3;
pat = st_pat | offset_imm | n_xregs++ | (19 << 5);
push_insns (ctx, &pat, sizeof (pat));
} else if ((res_types[i] == MIR_T_F || res_types[i] == MIR_T_D || res_types[i] == MIR_T_LD)
&& n_vregs < 8) {
offset_imm >>= res_types[i] == MIR_T_F ? 2 : res_types[i] == MIR_T_D ? 3 : 4;
pat = res_types[i] == MIR_T_F ? sts_pat : res_types[i] == MIR_T_D ? std_pat : stld_pat;
pat |= offset_imm | n_vregs++ | (19 << 5);
push_insns (ctx, &pat, sizeof (pat));
} else {
(*error_func) (MIR_ret_error, "x86-64 can not handle this combination of return values");
(*error_func) (MIR_ret_error, "aarch64 can not handle this combination of return values");
}
}
push_insns (ctx, epilog, sizeof (epilog));
@ -330,21 +389,22 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
/* Transform C call to call of void handler (MIR_context_t ctx, MIR_item_t func_item,
va_list va, MIR_val_t *results) */
void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handler) {
static const uint32_t save_x19_pat = 0xf81f0ff3; /* str x19, [sp,-16]! */
static const uint32_t save_x19_pat = 0xf81f0ff3; /* str x19, [sp,-16]! */
static const uint32_t set_gr_offs = 0x128007e9; /* mov w9, #-64 # gr_offs */
static const uint32_t set_x8_gr_offs = 0x128008e9; /* mov w9, #-72 # gr_offs */
static const uint32_t prepare_pat[] = {
0xd10083ff, /* sub sp, sp, 32 # allocate va_list */
0x910003e8, /* mov x8, sp # va_list addr */
0x128007e9, /* mov w9, #-64 # gr_offs */
0xb9001909, /* str w9,[x8, 24] # va_list.gr_offs */
0x910003ea, /* mov x10, sp # va_list addr */
0xb9001949, /* str w9,[x10, 24] # va_list.gr_offs */
0x12800fe9, /* mov w9, #-128 # vr_offs */
0xb9001d09, /* str w9,[x8, 28] #va_list.vr_offs */
0x910383e9, /* add x9, sp, #224 # gr_top */
0xf9000509, /* str x9,[x8, 8] # va_list.gr_top */
0xb9001d49, /* str w9,[x10, 28] #va_list.vr_offs */
0x9103c3e9, /* add x9, sp, #240 # gr_top */
0xf9000549, /* str x9,[x10, 8] # va_list.gr_top */
0x91004129, /* add x9, x9, #16 # stack */
0xf9000109, /* str x9,[x8] # valist.stack */
0xf9000149, /* str x9,[x10] # valist.stack */
0x910283e9, /* add x9, sp, #160 # vr_top*/
0xf9000909, /* str x9,[x8, 16] # va_list.vr_top */
0xaa0803e2, /* mov x2, x8 # va arg */
0xf9000949, /* str x9,[x10, 16] # va_list.vr_top */
0xaa0a03e2, /* mov x2, x10 # va arg */
0xd2800009, /* mov x9, <(nres+1)*16> */
0xcb2963ff, /* sub sp, sp, x9 */
0x910043e3, /* add x3, sp, 16 # results arg */
@ -353,18 +413,24 @@ void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handl
};
static const uint32_t shim_end[] = {
0xf94003fe, /* ldr x30, [sp] */
0xd2800009, /* mov x9, 224+(nres+1)*16 */
0xd2800009, /* mov x9, 240+(nres+1)*16 */
0x8b2963ff, /* add sp, sp, x9 */
0xf84107f3, /* ldr x19, sp, 16 */
0xd65f03c0, /* ret x30 */
};
uint32_t pat, imm, n_xregs, n_vregs, offset, offset_imm;
uint32_t nres = func_item->u.func->nres;
MIR_type_t *results = func_item->u.func->res_types;
MIR_func_t func = func_item->u.func;
uint32_t nres = func->nres;
int x8_res_p = func->nargs != 0 && VARR_GET (MIR_var_t, func->vars, 0).type == MIR_T_RBLK;
MIR_type_t *results = func->res_types;
VARR_TRUNC (uint8_t, machine_insns, 0);
push_insns (ctx, &save_x19_pat, sizeof (save_x19_pat));
push_insns (ctx, save_insns, sizeof (save_insns));
if (x8_res_p)
push_insns (ctx, &set_x8_gr_offs, sizeof (set_x8_gr_offs));
else
push_insns (ctx, &set_gr_offs, sizeof (set_gr_offs));
push_insns (ctx, prepare_pat, sizeof (prepare_pat));
imm = (nres + 1) * 16;
mir_assert (imm < (1 << 16));
@ -394,7 +460,7 @@ void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handl
offset += 16;
}
push_insns (ctx, shim_end, sizeof (shim_end));
imm = 224 + (nres + 1) * 16;
imm = 240 + (nres + 1) * 16;
mir_assert (imm < (1 << 16));
((uint32_t *) (VARR_ADDR (uint8_t, machine_insns) + VARR_LENGTH (uint8_t, machine_insns)))[-4]
|= imm << 5;
@ -402,14 +468,14 @@ void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handl
VARR_LENGTH (uint8_t, machine_insns));
}
/* Save regs x0-x7, q0-q7; x9 = call hook_address (ctx, called_func); restore regs; br x9 */
/* Save regs x8, x0-x7, q0-q7; x9 = call hook_address (ctx, called_func); restore regs; br x9 */
void *_MIR_get_wrapper (MIR_context_t ctx, MIR_item_t called_func, void *hook_address) {
static const uint32_t jmp_insn = 0xd61f0120; /* br x9 */
static const uint32_t move_insn = 0xaa0003e9; /* mov x9, x0 */
static const uint32_t save_fplr = 0xa9bf7bfd; /* stp R29, R30, [SP, #-16]! */
static const uint32_t restore_fplr = 0xa8c17bfd; /* ldp R29, R30, SP, #16 */
uint8_t *base_addr, *curr_addr, *code;
size_t len = sizeof (save_insns) + sizeof (restore_insns); /* initial code length */
size_t len = sizeof (save_insns) + sizeof (restore_insns);
for (;;) {
curr_addr = base_addr = _MIR_get_new_code_addr (ctx, len);

@ -84,6 +84,9 @@ static inline int target_call_used_hard_reg_p (MIR_reg_t hard_reg, MIR_type_t ty
| old FP | frame pointer for previous func stack frame; new FP refers for here
| | it has lowest address as 12-bit offsets are only positive
|---------------|
| small aggr |
| save area | optional
|---------------|
| alloca areas | optional
|---------------|
| slots for | dynamically allocated/deallocated by caller
@ -128,7 +131,7 @@ static MIR_reg_t get_arg_reg (MIR_type_t arg_type, size_t *int_arg_num, size_t *
}
(*fp_arg_num)++;
*mov_code = arg_type == MIR_T_F ? MIR_FMOV : arg_type == MIR_T_D ? MIR_DMOV : MIR_LDMOV;
} else { /* including BLK: */
} else { /* including BLK, RBLK: */
switch (*int_arg_num) {
case 0:
case 1:
@ -182,9 +185,12 @@ static void gen_blk_mov (gen_ctx_t gen_ctx, MIR_insn_t anchor, size_t to_disp,
treg_op2 = MIR_new_reg_op (ctx, gen_new_temp_reg (gen_ctx, MIR_T_I64, func));
treg_op3 = MIR_new_reg_op (ctx, gen_new_temp_reg (gen_ctx, MIR_T_I64, func));
/* Save arg regs: */
if (save_regs > 0) gen_mov (gen_ctx, anchor, MIR_MOV, treg_op, _MIR_new_hard_reg_op (ctx, R0_HARD_REG));
if (save_regs > 1) gen_mov (gen_ctx, anchor, MIR_MOV, treg_op2, _MIR_new_hard_reg_op (ctx, R1_HARD_REG));
if (save_regs > 2) gen_mov (gen_ctx, anchor, MIR_MOV, treg_op3, _MIR_new_hard_reg_op (ctx, R2_HARD_REG));
if (save_regs > 0)
gen_mov (gen_ctx, anchor, MIR_MOV, treg_op, _MIR_new_hard_reg_op (ctx, R0_HARD_REG));
if (save_regs > 1)
gen_mov (gen_ctx, anchor, MIR_MOV, treg_op2, _MIR_new_hard_reg_op (ctx, R1_HARD_REG));
if (save_regs > 2)
gen_mov (gen_ctx, anchor, MIR_MOV, treg_op3, _MIR_new_hard_reg_op (ctx, R2_HARD_REG));
/* call blk move: */
proto_item = _MIR_builtin_proto (ctx, curr_func_item->module, BLK_MOV_P, 0, NULL, 3, MIR_T_I64,
"to", MIR_T_I64, "from", MIR_T_I64, "nwords");
@ -200,7 +206,8 @@ static void gen_blk_mov (gen_ctx_t gen_ctx, MIR_insn_t anchor, size_t to_disp,
MIR_new_insn (gen_ctx->ctx, MIR_ADD, _MIR_new_hard_reg_op (ctx, R1_HARD_REG),
MIR_new_reg_op (ctx, from_base_reg),
MIR_new_int_op (ctx, from_disp)));
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R2_HARD_REG), MIR_new_int_op (ctx, qwords));
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R2_HARD_REG),
MIR_new_int_op (ctx, qwords));
ops[0] = MIR_new_ref_op (ctx, proto_item);
ops[1] = freg_op;
ops[2] = _MIR_new_hard_reg_op (ctx, R0_HARD_REG);
@ -209,9 +216,12 @@ static void gen_blk_mov (gen_ctx_t gen_ctx, MIR_insn_t anchor, size_t to_disp,
new_insn = MIR_new_insn_arr (ctx, MIR_CALL, 5, ops);
gen_add_insn_before (gen_ctx, anchor, new_insn);
/* Restore arg regs: */
if (save_regs > 0) gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R0_HARD_REG), treg_op);
if (save_regs > 1) gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R1_HARD_REG), treg_op2);
if (save_regs > 2) gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R2_HARD_REG), treg_op3);
if (save_regs > 0)
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R0_HARD_REG), treg_op);
if (save_regs > 1)
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R1_HARD_REG), treg_op2);
if (save_regs > 2)
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R2_HARD_REG), treg_op3);
}
static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
@ -250,8 +260,8 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (call_insn->ops[i].mode == MIR_OP_MEM) {
type = MIR_T_BLK;
gen_assert (call_insn->ops[i].u.mem.type == type);
type = call_insn->ops[i].u.mem.type;
gen_assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = call_insn->ops[i].value_mode; // ??? smaller ints
gen_assert (mode == MIR_OP_INT || mode == MIR_OP_UINT || mode == MIR_OP_FLOAT
@ -261,7 +271,12 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
"passing float variadic arg (should be passed as double)");
type = mode == MIR_OP_DOUBLE ? MIR_T_D : mode == MIR_OP_LDOUBLE ? MIR_T_LD : MIR_T_I64;
}
if (get_arg_reg (type, &int_arg_num, &fp_arg_num, &new_insn_code) == MIR_NON_HARD_REG) {
gen_assert (!MIR_blk_type_p (type) || call_insn->ops[i].mode == MIR_OP_MEM);
if (type == MIR_T_RBLK && i == start) continue; /* hidden arg */
if (type == MIR_T_BLK && (qwords = (call_insn->ops[i].u.mem.disp + 7) / 8) <= 2) {
if (int_arg_num + qwords > 8) blk_offset += qwords * 8;
int_arg_num += qwords;
} else if (get_arg_reg (type, &int_arg_num, &fp_arg_num, &new_insn_code) == MIR_NON_HARD_REG) {
if (type == MIR_T_LD && blk_offset % 16 != 0) blk_offset = (blk_offset + 15) / 16 * 16;
blk_offset += type == MIR_T_LD ? 16 : 8;
}
@ -271,12 +286,12 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
for (size_t i = start; i < nops; i++) {
arg_op = call_insn->ops[i];
gen_assert (arg_op.mode == MIR_OP_REG || arg_op.mode == MIR_OP_HARD_REG
|| (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.type == MIR_T_BLK));
|| (arg_op.mode == MIR_OP_MEM && MIR_blk_type_p (arg_op.u.mem.type)));
if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (call_insn->ops[i].mode == MIR_OP_MEM) {
type = MIR_T_BLK;
gen_assert (call_insn->ops[i].u.mem.type == type);
type = call_insn->ops[i].u.mem.type;
gen_assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = call_insn->ops[i].value_mode; // ??? smaller ints
type = mode == MIR_OP_DOUBLE ? MIR_T_D : mode == MIR_OP_LDOUBLE ? MIR_T_LD : MIR_T_I64;
@ -287,23 +302,67 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
ext_insn = MIR_new_insn (ctx, ext_code, temp_op, arg_op);
call_insn->ops[i] = arg_op = temp_op;
}
if (type == MIR_T_BLK) {
gen_assert (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.disp >= 0 && arg_op.u.mem.index == 0);
gen_assert (
!MIR_blk_type_p (type)
|| (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.disp >= 0 && arg_op.u.mem.index == 0));
if (type == MIR_T_RBLK && i == start) { /* hidden arg */
arg_reg_op = _MIR_new_hard_reg_op (ctx, R8_HARD_REG);
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
call_insn->ops[i] = arg_reg_op;
continue;
} else if (type == MIR_T_BLK) {
qwords = (arg_op.u.mem.disp + 7) / 8;
gen_blk_mov (gen_ctx, call_insn, blk_offset, SP_HARD_REG, 0, arg_op.u.mem.base, qwords, int_arg_num);
if (qwords <= 2) {
arg_reg = R0_HARD_REG + int_arg_num;
if (int_arg_num + qwords <= 8) {
/* A trick to keep arg regs live: */
call_insn->ops[i]
= _MIR_new_hard_reg_mem_op (ctx, MIR_T_UNDEF, 0, int_arg_num,
qwords < 2 ? MIR_NON_HARD_REG : int_arg_num + 1, 1);
if (qwords == 0) continue;
new_insn
= MIR_new_insn (ctx, MIR_MOV, _MIR_new_hard_reg_op (ctx, R0_HARD_REG + int_arg_num++),
MIR_new_mem_op (ctx, MIR_T_I64, 0, arg_op.u.mem.base, 0, 1));
gen_add_insn_before (gen_ctx, call_insn, new_insn);
if (qwords == 2) {
new_insn
= MIR_new_insn (ctx, MIR_MOV, _MIR_new_hard_reg_op (ctx, R0_HARD_REG + int_arg_num++),
MIR_new_mem_op (ctx, MIR_T_I64, 8, arg_op.u.mem.base, 0, 1));
gen_add_insn_before (gen_ctx, call_insn, new_insn);
}
} else { /* pass on stack w/o address: */
gen_blk_mov (gen_ctx, call_insn, mem_size, SP_HARD_REG, 0, arg_op.u.mem.base, qwords,
int_arg_num);
call_insn->ops[i] = _MIR_new_hard_reg_mem_op (ctx, MIR_T_UNDEF, mem_size, SP_HARD_REG,
MIR_NON_HARD_REG, 1);
mem_size += qwords * 8;
blk_offset += qwords * 8;
int_arg_num += qwords;
}
continue;
}
gen_blk_mov (gen_ctx, call_insn, blk_offset, SP_HARD_REG, 0, arg_op.u.mem.base, qwords,
int_arg_num);
arg_op = MIR_new_reg_op (ctx, gen_new_temp_reg (gen_ctx, MIR_T_I64, func));
gen_add_insn_before (gen_ctx, call_insn,
MIR_new_insn (gen_ctx->ctx, MIR_ADD, arg_op,
_MIR_new_hard_reg_op (ctx, SP_HARD_REG),
MIR_new_int_op (ctx, blk_offset)));
MIR_new_insn (gen_ctx->ctx, MIR_ADD, arg_op,
_MIR_new_hard_reg_op (ctx, SP_HARD_REG),
MIR_new_int_op (ctx, blk_offset)));
blk_offset += qwords * 8;
}
if ((arg_reg = get_arg_reg (type, &int_arg_num, &fp_arg_num, &new_insn_code))
!= MIR_NON_HARD_REG) {
!= MIR_NON_HARD_REG) {
/* put arguments to argument hard regs */
if (ext_insn != NULL) gen_add_insn_before (gen_ctx, call_insn, ext_insn);
arg_reg_op = _MIR_new_hard_reg_op (ctx, arg_reg);
new_insn = MIR_new_insn (ctx, new_insn_code, arg_reg_op, arg_op);
if (type != MIR_T_RBLK) {
new_insn = MIR_new_insn (ctx, new_insn_code, arg_reg_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
new_insn = MIR_new_insn (ctx, MIR_MOV, arg_reg_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
arg_reg_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_RBLK, arg_op.u.mem.disp, arg_reg,
MIR_NON_HARD_REG, 1);
}
gen_add_insn_before (gen_ctx, call_insn, new_insn);
call_insn->ops[i] = arg_reg_op;
} else { /* put arguments on the stack */
@ -313,7 +372,13 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
= (type == MIR_T_F ? MIR_FMOV
: type == MIR_T_D ? MIR_DMOV : type == MIR_T_LD ? MIR_LDMOV : MIR_MOV);
mem_op = _MIR_new_hard_reg_mem_op (ctx, mem_type, mem_size, SP_HARD_REG, MIR_NON_HARD_REG, 1);
new_insn = MIR_new_insn (ctx, new_insn_code, mem_op, arg_op);
if (type != MIR_T_RBLK) {
new_insn = MIR_new_insn (ctx, new_insn_code, mem_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
new_insn
= MIR_new_insn (ctx, new_insn_code, mem_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
}
gen_assert (prev_call_insn != NULL); /* call_insn should not be 1st after simplification */
MIR_insert_insn_after (ctx, curr_func_item, prev_call_insn, new_insn);
prev_insn = DLIST_PREV (MIR_insn_t, new_insn);
@ -606,6 +671,7 @@ DEF_VARR (MIR_code_reloc_t);
struct target_ctx {
unsigned char alloca_p, stack_arg_func_p, leaf_p;
size_t small_aggregate_save_area;
VARR (int) * pattern_indexes;
VARR (insn_pattern_info_t) * insn_pattern_info;
VARR (uint8_t) * result_code;
@ -617,6 +683,7 @@ struct target_ctx {
#define alloca_p gen_ctx->target_ctx->alloca_p
#define stack_arg_func_p gen_ctx->target_ctx->stack_arg_func_p
#define leaf_p gen_ctx->target_ctx->leaf_p
#define small_aggregate_save_area gen_ctx->target_ctx->small_aggregate_save_area
#define pattern_indexes gen_ctx->target_ctx->pattern_indexes
#define insn_pattern_info gen_ctx->target_ctx->insn_pattern_info
#define result_code gen_ctx->target_ctx->result_code
@ -638,18 +705,53 @@ static void target_machinize (gen_ctx_t gen_ctx) {
MIR_type_t type, mem_type, res_type;
MIR_insn_code_t code, new_insn_code;
MIR_insn_t insn, next_insn, new_insn, anchor;
MIR_var_t var;
MIR_reg_t ret_reg, arg_reg;
MIR_op_t ret_reg_op, arg_reg_op, mem_op, prev_sp_op, temp_op;
size_t i, int_arg_num, fp_arg_num, mem_size;
MIR_op_t ret_reg_op, arg_reg_op, mem_op, temp_op;
size_t i, int_arg_num, fp_arg_num, mem_size, qwords;
assert (curr_func_item->item_type == MIR_func_item);
func = curr_func_item->u.func;
stack_arg_func_p = FALSE;
anchor = DLIST_HEAD (MIR_insn_t, func->insns);
small_aggregate_save_area = 0;
for (i = int_arg_num = fp_arg_num = mem_size = 0; i < func->nargs; i++) {
/* Argument extensions is already done in simplify */
/* Prologue: generate arg_var = hard_reg|stack mem ... */
type = VARR_GET (MIR_var_t, func->vars, i).type;
/* Prologue: generate arg_var = hard_reg|stack mem|stack addr ... */
var = VARR_GET (MIR_var_t, func->vars, i);
type = var.type;
if (type == MIR_T_RBLK && i == 0) { /* hidden arg */
arg_reg_op = _MIR_new_hard_reg_op (ctx, R8_HARD_REG);
gen_mov (gen_ctx, anchor, MIR_MOV, MIR_new_reg_op (ctx, i + 1), arg_reg_op);
continue;
} else if (type == MIR_T_BLK && (qwords = (var.size + 7) / 8) <= 2) {
if (int_arg_num + qwords <= 8) {
small_aggregate_save_area += qwords * 8;
new_insn = MIR_new_insn (ctx, MIR_SUB, MIR_new_reg_op (ctx, i + 1),
_MIR_new_hard_reg_op (ctx, FP_HARD_REG),
MIR_new_int_op (ctx, small_aggregate_save_area));
gen_add_insn_before (gen_ctx, anchor, new_insn);
if (qwords == 0) continue;
gen_mov (gen_ctx, anchor, MIR_MOV, MIR_new_mem_op (ctx, MIR_T_I64, 0, i + 1, 0, 1),
_MIR_new_hard_reg_op (ctx, int_arg_num));
if (qwords == 2)
gen_mov (gen_ctx, anchor, MIR_MOV, MIR_new_mem_op (ctx, MIR_T_I64, 8, i + 1, 0, 1),
_MIR_new_hard_reg_op (ctx, int_arg_num + 1));
} else { /* pass on stack w/o address: */
if (!stack_arg_func_p) {
stack_arg_func_p = TRUE;
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R8_HARD_REG),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 16, FP_HARD_REG, MIR_NON_HARD_REG, 1));
}
gen_add_insn_before (gen_ctx, anchor,
MIR_new_insn (ctx, MIR_ADD, MIR_new_reg_op (ctx, i + 1),
_MIR_new_hard_reg_op (ctx, R8_HARD_REG),
MIR_new_int_op (ctx, mem_size)));
mem_size += qwords * 8;
}
int_arg_num += qwords;
continue;
}
arg_reg = get_arg_reg (type, &int_arg_num, &fp_arg_num, &new_insn_code);
if (arg_reg != MIR_NON_HARD_REG) {
arg_reg_op = _MIR_new_hard_reg_op (ctx, arg_reg);
@ -658,8 +760,7 @@ static void target_machinize (gen_ctx_t gen_ctx) {
/* arg is on the stack */
if (!stack_arg_func_p) {
stack_arg_func_p = TRUE;
prev_sp_op = _MIR_new_hard_reg_op (ctx, R8_HARD_REG);
gen_mov (gen_ctx, anchor, MIR_MOV, prev_sp_op,
gen_mov (gen_ctx, anchor, MIR_MOV, _MIR_new_hard_reg_op (ctx, R8_HARD_REG),
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 16, FP_HARD_REG, MIR_NON_HARD_REG, 1));
}
mem_type = type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD ? type : MIR_T_I64;
@ -841,7 +942,7 @@ static void target_make_prolog_epilog (gen_ctx_t gen_ctx, bitmap_t used_hard_reg
saved_fregs_num++;
}
if (leaf_p && !alloca_p && saved_iregs_num == 0 && saved_fregs_num == 0 && !func->vararg_p
&& stack_slots_num == 0)
&& stack_slots_num == 0 && !stack_arg_func_p && small_aggregate_save_area == 0)
return;
sp_reg_op = _MIR_new_hard_reg_op (ctx, SP_HARD_REG);
fp_reg_op = _MIR_new_hard_reg_op (ctx, FP_HARD_REG);
@ -888,7 +989,7 @@ static void target_make_prolog_epilog (gen_ctx_t gen_ctx, bitmap_t used_hard_reg
_MIR_new_hard_reg_mem_op (ctx, MIR_T_I64, 0, SP_HARD_REG, MIR_NON_HARD_REG, 1),
_MIR_new_hard_reg_op (ctx, FP_HARD_REG)); /* mem[sp] = fp */
gen_mov (gen_ctx, anchor, MIR_MOV, fp_reg_op, sp_reg_op); /* fp = sp */
if (func->vararg_p) {
if (func->vararg_p) { // ??? saving only regs corresponding to ...
MIR_reg_t base = SP_HARD_REG;
start = (int64_t) frame_size - reg_save_area_size;
@ -933,6 +1034,13 @@ static void target_make_prolog_epilog (gen_ctx_t gen_ctx, bitmap_t used_hard_reg
offset += 16;
}
}
if (small_aggregate_save_area != 0) { // ??? duplication with vararg saved regs
if (small_aggregate_save_area % 16 != 0)
small_aggregate_save_area = (small_aggregate_save_area + 15) / 16 * 16;
new_insn = MIR_new_insn (ctx, MIR_SUB, sp_reg_op, sp_reg_op,
MIR_new_int_op (ctx, small_aggregate_save_area));
gen_add_insn_before (gen_ctx, anchor, new_insn); /* sp -= <small aggr save area size> */
}
/* Epilogue: */
anchor = DLIST_TAIL (MIR_insn_t, func->insns);
assert (anchor->code == MIR_RET);

@ -262,12 +262,12 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
for (size_t i = start; i < nops; i++) {
arg_op = call_insn->ops[i];
gen_assert (arg_op.mode == MIR_OP_REG || arg_op.mode == MIR_OP_HARD_REG
|| (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.type == MIR_T_BLK));
|| (arg_op.mode == MIR_OP_MEM && MIR_blk_type_p (arg_op.u.mem.type)));
if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (call_insn->ops[i].mode == MIR_OP_MEM) {
type = MIR_T_BLK;
gen_assert (call_insn->ops[i].u.mem.type == type);
type = arg_op.u.mem.type;
gen_assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = call_insn->ops[i].value_mode; // ??? smaller ints
gen_assert (mode == MIR_OP_INT || mode == MIR_OP_UINT || mode == MIR_OP_FLOAT
@ -333,7 +333,14 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
} else if (type != MIR_T_F && type != MIR_T_D && type != MIR_T_LD && n_iregs < 8) {
if (ext_insn != NULL) gen_add_insn_before (gen_ctx, call_insn, ext_insn);
arg_reg_op = _MIR_new_hard_reg_op (ctx, R3_HARD_REG + n_iregs);
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, arg_op);
if (type != MIR_T_RBLK) {
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
arg_reg_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_RBLK, arg_op.u.mem.disp,
R3_HARD_REG + n_iregs, MIR_NON_HARD_REG, 1);
}
call_insn->ops[i] = arg_reg_op;
} else { /* put arguments on the stack */
if (ext_insn != NULL) gen_add_insn_before (gen_ctx, call_insn, ext_insn);
@ -342,7 +349,12 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
: type == MIR_T_D ? MIR_DMOV : type == MIR_T_LD ? MIR_LDMOV : MIR_MOV);
mem_op = _MIR_new_hard_reg_mem_op (ctx, mem_type, mem_size + PPC64_STACK_HEADER_SIZE,
SP_HARD_REG, MIR_NON_HARD_REG, 1);
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op, arg_op);
if (type != MIR_T_RBLK) {
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
}
call_insn->ops[i] = mem_op;
}
mem_size += type == MIR_T_LD ? 16 : 8;
@ -649,7 +661,7 @@ static void target_machinize (gen_ctx_t gen_ctx) {
gen_mov (gen_ctx, anchor, type == MIR_T_F ? MIR_FMOV : type == MIR_T_D ? MIR_DMOV : MIR_LDMOV,
arg_var_op,
_MIR_new_hard_reg_mem_op (ctx, type, disp, R12_HARD_REG, MIR_NON_HARD_REG, 1));
} else if (type == MIR_T_BLK) { // ??? FBLK
} else if (type == MIR_T_BLK) {
qwords = (VARR_GET (MIR_var_t, func->vars, i).size + 7) / 8;
offset = int_arg_num < 8 ? PPC64_STACK_HEADER_SIZE + int_arg_num * 8 : disp;
set_prev_sp_op (gen_ctx, anchor, &prev_sp_op);
@ -2329,7 +2341,7 @@ static uint8_t *target_translate (gen_ctx_t gen_ctx, size_t *len) {
if (insn->ops[2].u.i == 0) {
gen_mov (gen_ctx, insn, MIR_MOV, insn->ops[0], insn->ops[1]);
old_insn = insn;
insn = DLIST_NEXT (MIR_insn_t, insn);
insn = DLIST_PREV (MIR_insn_t, insn);
gen_delete_insn (gen_ctx, old_insn);
} else {
if (insn->ops[2].mode == MIR_OP_INT && insn->ops[2].u.i < 0) {

@ -286,8 +286,8 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
} else if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (arg_op.mode == MIR_OP_MEM) {
type = MIR_T_BLK;
gen_assert (arg_op.u.mem.type == type);
type = arg_op.u.mem.type;
gen_assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = arg_op.value_mode; // ??? smaller ints
gen_assert (mode == MIR_OP_INT || mode == MIR_OP_UINT || mode == MIR_OP_FLOAT
@ -307,7 +307,7 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
if ((type == MIR_T_F || type == MIR_T_D) && n_fregs < 4) {
/* put arguments to argument hard regs: */
n_fregs++;
} else if (type != MIR_T_F && type != MIR_T_D && n_iregs < 5) {
} else if (type != MIR_T_F && type != MIR_T_D && n_iregs < 5) { /* RBLK too */
n_iregs++;
} else { /* put arguments on the stack */
param_mem_size += 8;
@ -321,14 +321,14 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
for (size_t i = 2; i < nops; i++) { /* process args and ???long double results: */
arg_op = call_insn->ops[i];
gen_assert (arg_op.mode == MIR_OP_REG || arg_op.mode == MIR_OP_HARD_REG
|| (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.type == MIR_T_BLK));
|| (arg_op.mode == MIR_OP_MEM && MIR_blk_type_p (arg_op.u.mem.type)));
if (i < start) {
type = proto->res_types[i - 2];
} else if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (call_insn->ops[i].mode == MIR_OP_MEM) {
type = MIR_T_BLK;
gen_assert (call_insn->ops[i].u.mem.type == type);
type = call_insn->ops[i].u.mem.type;
gen_assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = call_insn->ops[i].value_mode; // ??? smaller ints
gen_assert (mode == MIR_OP_INT || mode == MIR_OP_UINT || mode == MIR_OP_DOUBLE
@ -372,7 +372,14 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
} else if (type != MIR_T_F && type != MIR_T_D && n_iregs < 5) {
if (ext_insn != NULL) gen_add_insn_before (gen_ctx, call_insn, ext_insn);
arg_reg_op = _MIR_new_hard_reg_op (ctx, R2_HARD_REG + n_iregs);
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, arg_op);
if (type != MIR_T_RBLK) {
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
gen_mov (gen_ctx, call_insn, MIR_MOV, arg_reg_op, MIR_new_reg_op (ctx, arg_op.u.mem.base));
arg_reg_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_RBLK, arg_op.u.mem.disp,
R2_HARD_REG + n_iregs, MIR_NON_HARD_REG, 1);
}
if (i >= start) call_insn->ops[i] = arg_reg_op; /* don't change LD return yet */
n_iregs++;
} else { /* put arguments on the stack: */
@ -380,7 +387,13 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
new_insn_code = (type == MIR_T_F ? MIR_FMOV : type == MIR_T_D ? MIR_DMOV : MIR_MOV);
mem_op = _MIR_new_hard_reg_mem_op (ctx, mem_type, param_mem_size + S390X_STACK_HEADER_SIZE,
SP_HARD_REG, MIR_NON_HARD_REG, 1);
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op, arg_op);
if (type != MIR_T_RBLK) {
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op, arg_op);
} else {
assert (arg_op.mode == MIR_OP_MEM);
gen_mov (gen_ctx, call_insn, new_insn_code, mem_op,
MIR_new_reg_op (ctx, arg_op.u.mem.base));
}
if (i >= start) call_insn->ops[i] = mem_op;
param_mem_size += 8;
}

@ -182,7 +182,7 @@ static MIR_reg_t get_arg_reg (MIR_type_t arg_type, size_t *int_arg_num, size_t *
(*int_arg_num)++; /* arg slot used by fp, skip int register */
#endif
*mov_code = arg_type == MIR_T_F ? MIR_FMOV : MIR_DMOV;
} else {
} else { /* including RBLK */
arg_reg = get_int_arg_reg (*int_arg_num);
#ifdef _WIN64
(*fp_arg_num)++; /* arg slot used by int, skip fp register */
@ -203,7 +203,7 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
MIR_op_mode_t mode;
MIR_var_t *arg_vars = NULL;
MIR_reg_t arg_reg;
MIR_op_t arg_op, temp_op, arg_reg_op, ret_reg_op, mem_op;
MIR_op_t arg_op, new_arg_op, temp_op, ret_reg_op, mem_op;
MIR_insn_code_t new_insn_code, ext_code;
MIR_insn_t new_insn, prev_insn, next_insn, ext_insn;
MIR_insn_t prev_call_insn = DLIST_PREV (MIR_insn_t, call_insn);
@ -226,13 +226,15 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
}
for (size_t i = start; i < nops; i++) {
arg_op = call_insn->ops[i];
gen_assert (arg_op.mode == MIR_OP_REG || arg_op.mode == MIR_OP_HARD_REG
|| (arg_op.mode == MIR_OP_MEM && arg_op.u.mem.type == MIR_T_BLK)
|| (arg_op.mode == MIR_OP_HARD_REG_MEM && arg_op.u.hard_reg_mem.type == MIR_T_BLK));
gen_assert (
arg_op.mode == MIR_OP_REG || arg_op.mode == MIR_OP_HARD_REG
|| (arg_op.mode == MIR_OP_MEM && MIR_blk_type_p (arg_op.u.mem.type))
|| (arg_op.mode == MIR_OP_HARD_REG_MEM && MIR_blk_type_p (arg_op.u.hard_reg_mem.type)));
if (i - start < nargs) {
type = arg_vars[i - start].type;
} else if (arg_op.mode == MIR_OP_MEM || arg_op.mode == MIR_OP_HARD_REG_MEM) {
type = MIR_T_BLK;
type = arg_op.mode == MIR_OP_MEM ? arg_op.u.mem.type : arg_op.u.hard_reg_mem.type;
assert (type == MIR_T_BLK || type == MIR_T_RBLK);
} else {
mode = call_insn->ops[i].value_mode; // ??? smaller ints
gen_assert (mode == MIR_OP_INT || mode == MIR_OP_UINT || mode == MIR_OP_FLOAT
@ -257,7 +259,7 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
gen_assert (arg_op.mode == MIR_OP_MEM);
size = (arg_op.u.mem.disp + 7) / 8 * 8;
gen_assert (prev_call_insn != NULL); /* call_insn should not be 1st after simplification */
if (size > 0 && size <= 8 * 8) { /* upto 8 moves */
if (size > 0 && size <= 2 * 8) { /* upto 2 moves */
disp = 0;
first_p = TRUE;
temp_op = MIR_new_reg_op (ctx, gen_new_temp_reg (gen_ctx, MIR_T_I64, func));
@ -315,10 +317,23 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
!= MIR_NON_HARD_REG) {
/* put arguments to argument hard regs */
if (ext_insn != NULL) gen_add_insn_before (gen_ctx, call_insn, ext_insn);
arg_reg_op = _MIR_new_hard_reg_op (ctx, arg_reg);
new_insn = MIR_new_insn (ctx, new_insn_code, arg_reg_op, arg_op);
if (type != MIR_T_RBLK) {
new_arg_op = _MIR_new_hard_reg_op (ctx, arg_reg);
new_insn = MIR_new_insn (ctx, new_insn_code, new_arg_op, arg_op);
} else if (arg_op.mode == MIR_OP_MEM) {
new_insn = MIR_new_insn (ctx, new_insn_code, _MIR_new_hard_reg_op (ctx, arg_reg),
MIR_new_reg_op (ctx, arg_op.u.mem.base));
new_arg_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_RBLK, arg_op.u.mem.disp, arg_reg,
MIR_NON_HARD_REG, 1);
} else {
assert (arg_op.mode == MIR_OP_HARD_REG_MEM);
new_insn = MIR_new_insn (ctx, new_insn_code, _MIR_new_hard_reg_op (ctx, arg_reg),
_MIR_new_hard_reg_op (ctx, arg_op.u.hard_reg_mem.base));
new_arg_op = _MIR_new_hard_reg_mem_op (ctx, MIR_T_RBLK, arg_op.u.hard_reg_mem.disp, arg_reg,
MIR_NON_HARD_REG, 1);
}
gen_add_insn_before (gen_ctx, call_insn, new_insn);
call_insn->ops[i] = arg_reg_op;
call_insn->ops[i] = new_arg_op;
#ifdef _WIN64
/* copy fp reg varargs into corresponding int regs */
if (proto->vararg_p && type == MIR_T_D) {
@ -335,6 +350,11 @@ static void machinize_call (gen_ctx_t gen_ctx, MIR_insn_t call_insn) {
}
#endif
} else { /* put arguments on the stack */
if (type == MIR_T_RBLK) {
assert (arg_op.mode == MIR_OP_MEM || arg_op.mode == MIR_OP_HARD_REG_MEM);
arg_op = arg_op.mode == MIR_OP_MEM ? MIR_new_reg_op (ctx, arg_op.u.mem.base)
: _MIR_new_hard_reg_op (ctx, arg_op.u.hard_reg_mem.base);
}
mem_type = type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD ? type : MIR_T_I64;
new_insn_code
= (type == MIR_T_F ? MIR_FMOV
@ -642,7 +662,7 @@ static void target_machinize (gen_ctx_t gen_ctx) {
mem_offset += 16;
} else if (var.type == MIR_T_BLK) {
mem_offset += var.size;
} else {
} else { /* including RBLK */
gp_offset += 8;
if (gp_offset >= 48) mem_offset += 8;
}
@ -1149,11 +1169,11 @@ static const struct pattern patterns[] = {
{MIR_MOV, "m0 i0", "Y C6 /0 m0 i1"}, /* mov m0,i8 */
{MIR_MOV, "m2 i2", "Y C7 /0 m0 I1"}, /* mov m0,i32 */
{MIR_FMOV, "r r", "F3 Y 0F 10 r0 R1"}, /* movss r0,r1 */
{MIR_FMOV, "r r", "Y 0F 28 r0 R1"}, /* movaps r0,r1 */
{MIR_FMOV, "r mf", "F3 Y 0F 10 r0 m1"}, /* movss r0,m32 */
{MIR_FMOV, "mf r", "F3 Y 0F 11 r1 m0"}, /* movss r0,m32 */
{MIR_DMOV, "r r", "F2 Y 0F 10 r0 R1"}, /* movsd r0,r1 */
{MIR_DMOV, "r r", "66 Y 0F 28 r0 R1"}, /* movapd r0,r1 */
{MIR_DMOV, "r md", "F2 Y 0F 10 r0 m1"}, /* movsd r0,m64 */
{MIR_DMOV, "md r", "F2 Y 0F 11 r1 m0"}, /* movsd m64,r0 */
@ -1199,7 +1219,7 @@ static const struct pattern patterns[] = {
/* fld m1;fstpl -16(sp);movsd r0,-16(sp): */
{MIR_LD2D, "r mld", "DB /5 m1; DD /3 mt; F2 Y 0F 10 r0 mt"},
{MIR_D2F, "r r", "F2 0F 5A r0 R1"}, /* cvtsd2ss r0,r1 */
{MIR_D2F, "r r", "F2 Y 0F 5A r0 R1"}, /* cvtsd2ss r0,r1 */
{MIR_D2F, "r md", "F2 Y 0F 5A r0 m1"}, /* cvtsd2ss r0,m1 */
/* fld m1;fstps -16(sp);movss r0, -16(sp): */
{MIR_LD2F, "r mld", "DB /5 m1; D9 /3 mt; F3 Y 0F 10 r0 mt"},

File diff suppressed because it is too large Load Diff

@ -22,11 +22,17 @@
#define MIR_HASH_UNALIGNED_ACCESS 0
#endif
#if (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || defined(_MSC_VER)
#define MIR_LITTLE_ENDIAN 1
#else
#define MIR_LITTLE_ENDIAN 0
#endif
static inline uint64_t mir_get_key_part (const uint8_t *v, size_t len, int relax_p) {
size_t i, start = 0;
uint64_t tail = 0;
if (relax_p || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) {
if (relax_p || MIR_LITTLE_ENDIAN) {
#if MIR_HASH_UNALIGNED_ACCESS
if (len == sizeof (uint64_t)) return *(uint64_t *) v;
if (len >= sizeof (uint32_t)) {

@ -32,6 +32,10 @@ void MIR_set_interp_interface (MIR_context_t ctx, MIR_item_t func_item) {}
#define ALWAYS_INLINE inline
#endif
#if defined(_MSC_VER)
#define alloca _alloca
#endif
typedef MIR_val_t *code_t;
typedef struct func_desc {
@ -409,7 +413,7 @@ static void generate_icode (MIR_context_t ctx, MIR_item_t func_item) {
mir_assert (ops[i].mode == MIR_OP_LABEL);
v.i = 0;
} else if (MIR_call_code_p (code) && ops[i].mode == MIR_OP_MEM) {
mir_assert (ops[i].u.mem.type == MIR_T_BLK);
mir_assert (MIR_blk_type_p (ops[i].u.mem.type));
v.i = ops[i].u.mem.base;
update_max_nreg (v.i, &max_nreg);
} else {
@ -1375,7 +1379,7 @@ static htab_hash_t ff_interface_hash (ff_interface_t i, void *arg) {
h = mir_hash (i->res_types, sizeof (MIR_type_t) * i->nres, h);
for (size_t n = 0; n < i->nargs; n++) {
h = mir_hash_step (h, i->arg_descs[n].type);
if (i->arg_descs[n].type == MIR_T_BLK) h = mir_hash_step (h, i->arg_descs[n].size);
if (MIR_blk_type_p (i->arg_descs[n].type)) h = mir_hash_step (h, i->arg_descs[n].size);
}
return mir_hash_finish (h);
}
@ -1385,7 +1389,7 @@ static int ff_interface_eq (ff_interface_t i1, ff_interface_t i2, void *arg) {
if (memcmp (i1->res_types, i2->res_types, sizeof (MIR_type_t) * i1->nres) != 0) return FALSE;
for (size_t n = 0; n < i1->nargs; n++) {
if (i1->arg_descs[n].type != i2->arg_descs[n].type) return FALSE;
if (i1->arg_descs[n].type == MIR_T_BLK && i1->arg_descs[n].size != i2->arg_descs[n].size)
if (MIR_blk_type_p (i1->arg_descs[n].type) && i1->arg_descs[n].size != i2->arg_descs[n].size)
return FALSE;
}
return TRUE;
@ -1452,12 +1456,12 @@ static void call (MIR_context_t ctx, MIR_val_t *bp, MIR_op_t *insn_arg_ops, code
for (i = 0; i < nargs; i++) {
if (i < arg_vars_num) {
call_arg_descs[i].type = arg_vars[i].type;
if (arg_vars[i].type == MIR_T_BLK) call_arg_descs[i].size = arg_vars[i].size;
if (MIR_blk_type_p (arg_vars[i].type)) call_arg_descs[i].size = arg_vars[i].size;
continue;
}
if (insn_arg_ops[i].mode == MIR_OP_MEM) { /* block arg */
mir_assert (insn_arg_ops[i].u.mem.type == MIR_T_BLK);
call_arg_descs[i].type = MIR_T_BLK;
if (insn_arg_ops[i].mode == MIR_OP_MEM) { /* (r)block arg */
mir_assert (MIR_blk_type_p (insn_arg_ops[i].u.mem.type));
call_arg_descs[i].type = insn_arg_ops[i].u.mem.type;
call_arg_descs[i].size = insn_arg_ops[i].u.mem.disp;
} else {
mode = insn_arg_ops[i].value_mode;
@ -1493,7 +1497,8 @@ static void call (MIR_context_t ctx, MIR_val_t *bp, MIR_op_t *insn_arg_ops, code
case MIR_T_D: call_res_args[i + nres].d = arg_vals[i].d; break;
case MIR_T_LD: call_res_args[i + nres].ld = arg_vals[i].ld; break;
case MIR_T_P:
case MIR_T_BLK: call_res_args[i + nres].u = (uint64_t) arg_vals[i].a; break;
case MIR_T_BLK:
case MIR_T_RBLK: call_res_args[i + nres].u = (uint64_t) arg_vals[i].a; break;
default: mir_assert (FALSE);
}
}
@ -1667,7 +1672,8 @@ static void interp (MIR_context_t ctx, MIR_item_t func_item, va_list va, MIR_val
}
case MIR_T_D: arg_vals[i].d = va_arg (va, double); break;
case MIR_T_LD: arg_vals[i].ld = va_arg (va, long double); break;
case MIR_T_P: arg_vals[i].a = va_arg (va, void *); break;
case MIR_T_P:
case MIR_T_RBLK: arg_vals[i].a = va_arg (va, void *); break;
case MIR_T_BLK:
#if defined(__PPC64__) || defined(__aarch64__)
arg_vals[i].a = va_stack_arg_builtin (&va, arg_vars[i].size);

@ -2,7 +2,9 @@
Copyright (C) 2018-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
*/
// _MIR_get_thunk, _MIR_redirect_thunk, _MIR_get_interp_shim, _MIR_get_ff_call, _MIR_get_wrapper
/* BLK is passed in int regs, and if the regs are not enough, the rest is passed on the stack.
RBLK is always passed by address. */
#define VA_LIST_IS_ARRAY_P 1 /* one element which is a pointer to args */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
@ -321,26 +323,11 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
if (qwords > 0) ppc64_gen_ld (ctx, 11, res_reg, param_offset, MIR_T_I64);
for (blk_disp = 0; qwords > 0 && n_gpregs < 8; qwords--, n_gpregs++, blk_disp += 8, disp += 8)
ppc64_gen_ld (ctx, n_gpregs + 3, 11, blk_disp, MIR_T_I64);
#if 0
/* passing FBLK: */
for (blk_disp = 0, qwords = (arg_descs[i].size + 7) / 8;
qwords > 0 && n_fpregs < 13;
qwords--, n_fpregs++, blk_disp += 8, disp += 8) {
ppc64_gen_ld (ctx, n_fpregs + 1, 11, blk_disp, MIR_T_D);
if (vararg_p) {
if (n_gpregs < 8) { /* load into gp reg too */
ppc64_gen_ld (ctx, n_gpregs + 3, 11, blk_disp, MIR_T_I64);
} else {
ppc64_gen_st (ctx, 1 + n_fpregs, 1, disp, MIR_T_D);
}
}
}
#endif
if (qwords > 0) gen_blk_mov (ctx, disp, 11, blk_disp, qwords);
disp += qwords * 8;
param_offset += 16;
continue;
} else if (n_gpregs < 8) {
} else if (n_gpregs < 8) { /* including RBLK */
ppc64_gen_ld (ctx, n_gpregs + 3, res_reg, param_offset, MIR_T_I64);
} else {
ppc64_gen_ld (ctx, 0, res_reg, param_offset, MIR_T_I64);
@ -355,16 +342,16 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
disp = 0;
for (uint32_t i = 0; i < nres; i++) {
type = res_types[i];
if ((type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD) && n_fpregs < 4) {
if ((type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD) && n_fpregs < 8) {
ppc64_gen_st (ctx, n_fpregs + 1, res_reg, disp, type);
n_fpregs++;
if (type == MIR_T_LD) {
if (n_fpregs >= 4)
if (n_fpregs >= 8)
(*error_func) (MIR_ret_error, "ppc64 can not handle this combination of return values");
ppc64_gen_st (ctx, n_fpregs + 1, res_reg, disp + 8, type);
n_fpregs++;
}
} else if (n_gpregs < 1) { // just one gp reg
} else if (n_gpregs < 2) { // just one-two gp reg
ppc64_gen_st (ctx, n_gpregs + 3, res_reg, disp, MIR_T_I64);
n_gpregs++;
} else {
@ -444,7 +431,7 @@ void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handl
ppc64_gen_st (ctx, 0, 1, disp + 8, MIR_T_D);
}
}
} else if (type == MIR_T_BLK) { // ??? FBLK
} else if (type == MIR_T_BLK) {
qwords = (arg_vars[i].size + 7) / 8;
for (; qwords > 0 && n_gpregs < 8; qwords--, n_gpregs++, disp += 8, param_offset += 8)
ppc64_gen_st (ctx, n_gpregs + 3, 1, disp, MIR_T_I64);
@ -484,16 +471,16 @@ void *_MIR_get_interp_shim (MIR_context_t ctx, MIR_item_t func_item, void *handl
disp = n_gpregs = n_fpregs = 0;
for (uint32_t i = 0; i < nres; i++) {
type = res_types[i];
if ((type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD) && n_fpregs < 4) {
if ((type == MIR_T_F || type == MIR_T_D || type == MIR_T_LD) && n_fpregs < 8) {
ppc64_gen_ld (ctx, n_fpregs + 1, res_reg, disp, type);
n_fpregs++;
if (type == MIR_T_LD) {
if (n_fpregs >= 4)
if (n_fpregs >= 8)
(*error_func) (MIR_ret_error, "ppc64 can not handle this combination of return values");
ppc64_gen_ld (ctx, n_fpregs + 1, res_reg, disp + 8, type);
n_fpregs++;
}
} else if (n_gpregs < 1) { // just one gp reg
} else if (n_gpregs < 2) { // just one-two gp reg
ppc64_gen_ld (ctx, n_gpregs + 3, res_reg, disp, MIR_T_I64);
n_gpregs++;
} else {

@ -4,6 +4,8 @@
/* Long doubles (-mlong-double=128) are always passed by its address (for args and results) */
/* BLK and RBLK args are always passed by address. */
#if 0 && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#error "s390x works only in BE mode"
#endif
@ -249,7 +251,7 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
if (type == MIR_T_BLK) frame_size += (arg_descs[i].size + 7) / 8; /* blk value space */
if ((type == MIR_T_F || type == MIR_T_D) && n_fpregs < 4) {
n_fpregs++;
} else if (type != MIR_T_F && type != MIR_T_D && n_gpregs < 5) {
} else if (type != MIR_T_F && type != MIR_T_D && n_gpregs < 5) { /* RBLK too */
n_gpregs++;
} else {
frame_size += 8;
@ -298,7 +300,7 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
s390x_gen_st (ctx, 8, 15, disp, MIR_T_I64); /* stg r8,disp(r15) */
disp += 8;
}
} else if (n_gpregs < 5) {
} else if (n_gpregs < 5) { /* RBLK too */
s390x_gen_ld (ctx, n_gpregs + 2, res_reg, param_offset,
MIR_T_I64); /* lg* rn,param_offset(r7) */
n_gpregs++;

@ -2,6 +2,8 @@
Copyright (C) 2018-2020 Vladimir Makarov <vmakarov.gcc@gmail.com>.
*/
/* BLK and RBLK args are always passed by address. BLK first is copied on the caller stack. */
#define VA_LIST_IS_ARRAY_P 1
void *_MIR_get_bstart_builtin (MIR_context_t ctx) {
@ -297,7 +299,7 @@ void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, s
for (size_t i = 0; i < nargs; i++) {
MIR_type_t type = arg_descs[i].type;
if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P) {
if ((MIR_T_I8 <= type && type <= MIR_T_U64) || type == MIR_T_P || type == MIR_T_RBLK) {
if (n_iregs < max_iregs) {
gen_mov (ctx, (i + nres) * sizeof (long double), iregs[n_iregs++], TRUE);
#ifdef _WIN64

@ -319,6 +319,7 @@ static const struct insn_desc insn_descs[] = {
{MIR_VA_END, "va_end", {MIR_OP_INT, MIR_OP_BOUND}},
{MIR_LABEL, "label", {MIR_OP_BOUND}},
{MIR_UNSPEC, "unspec", {MIR_OP_BOUND}},
{MIR_PHI, "phi", {MIR_OP_BOUND}},
{MIR_INVALID_INSN, "invalid-insn", {MIR_OP_BOUND}},
};
@ -657,8 +658,6 @@ static void remove_func_insns (MIR_context_t ctx, MIR_item_t func_item,
}
static void remove_item (MIR_context_t ctx, MIR_item_t item) {
MIR_module_t module = item->module;
switch (item->item_type) {
case MIR_func_item:
remove_func_insns (ctx, item, &item->u.func->insns);
@ -789,6 +788,7 @@ static const char *type_str (MIR_type_t tp) {
case MIR_T_LD: return "ld";
case MIR_T_P: return "p";
case MIR_T_BLK: return "blk";
case MIR_T_RBLK: return "rblk";
case MIR_T_UNDEF: return "undef";
default: return "";
}
@ -1328,7 +1328,10 @@ void MIR_finish_func (MIR_context_t ctx) {
int out_p, can_be_out_p;
code = insn->code;
if (!curr_func->vararg_p && code == MIR_VA_START) {
if (code == MIR_PHI) {
curr_func = NULL;
(*error_func) (MIR_vararg_func_error, "phi can be used only internally");
} else if (!curr_func->vararg_p && code == MIR_VA_START) {
curr_func = NULL;
(*error_func) (MIR_vararg_func_error, "func %s: va_start is not in vararg function",
func_name);
@ -1382,12 +1385,12 @@ void MIR_finish_func (MIR_context_t ctx) {
case MIR_OP_MEM:
expr_p = FALSE;
if (wrong_type_p (insn->ops[i].u.mem.type)
&& (insn->ops[i].u.mem.type != MIR_T_BLK || !MIR_call_code_p (code))) {
&& (!MIR_blk_type_p (insn->ops[i].u.mem.type) || !MIR_call_code_p (code))) {
curr_func = NULL;
(*error_func) (MIR_wrong_type_error, "func %s: in instruction '%s': wrong type memory",
func_name, insn_descs[code].name);
}
if (insn->ops[i].u.mem.type == MIR_T_BLK && insn->ops[i].u.mem.disp < 0) {
if (MIR_blk_type_p (insn->ops[i].u.mem.type) && insn->ops[i].u.mem.disp < 0) {
curr_func = NULL;
(*error_func) (MIR_wrong_type_error,
"func %s: in instruction '%s': block type memory with disp < 0", func_name,
@ -1737,6 +1740,9 @@ MIR_op_mode_t MIR_insn_op_mode (MIR_context_t ctx, MIR_insn_t insn, size_t nop,
*out_p = FALSE;
/* should be already checked in MIR_finish_func */
return nop == 0 && code == MIR_SWITCH ? MIR_OP_INT : insn->ops[nop].mode;
} else if (code == MIR_PHI) {
*out_p = nop == 0;
return insn->ops[nop].mode;
} else if (MIR_call_code_p (code) || code == MIR_UNSPEC) {
MIR_op_t proto_op;
MIR_proto_t proto;
@ -1819,12 +1825,14 @@ MIR_insn_t MIR_new_insn_arr (MIR_context_t ctx, MIR_insn_code_t code, size_t nop
size_t args_start, narg, i = 0, expected_nops = insn_code_nops (ctx, code);
mir_assert (ops != NULL);
if (!MIR_call_code_p (code) && code != MIR_UNSPEC && code != MIR_RET && code != MIR_SWITCH
&& nops != expected_nops) {
if (!MIR_call_code_p (code) && code != MIR_UNSPEC && code != MIR_PHI && code != MIR_RET
&& code != MIR_SWITCH && nops != expected_nops) {
(*error_func) (MIR_ops_num_error, "wrong number of operands for insn %s",
insn_descs[code].name);
} else if (code == MIR_SWITCH) {
if (nops < 2) (*error_func) (MIR_ops_num_error, "number of MIR_SWITCH operands is less 2");
} else if (code == MIR_PHI) {
if (nops < 3) (*error_func) (MIR_ops_num_error, "number of MIR_PHI operands is less 3");
} else if (MIR_call_code_p (code) || code == MIR_UNSPEC) {
args_start = code == MIR_UNSPEC ? 1 : 2;
if (nops < args_start)
@ -1845,12 +1853,12 @@ MIR_insn_t MIR_new_insn_arr (MIR_context_t ctx, MIR_insn_code_t code, size_t nop
"number of %s operands or results does not correspond to prototype %s",
code == MIR_UNSPEC ? "unspec" : "call", proto->name);
for (i = args_start; i < nops; i++) {
if (ops[i].mode == MIR_OP_MEM && ops[i].u.mem.type == MIR_T_BLK) {
if (ops[i].mode == MIR_OP_MEM && MIR_blk_type_p (ops[i].u.mem.type)) {
if (i - args_start < proto->nres)
(*error_func) (MIR_wrong_type_error, "result of %s is block type memory",
code == MIR_UNSPEC ? "unspec" : "call");
else if ((narg = i - args_start - proto->nres) < VARR_LENGTH (MIR_var_t, proto->args)) {
if (VARR_GET (MIR_var_t, proto->args, narg).type != MIR_T_BLK) {
if (VARR_GET (MIR_var_t, proto->args, narg).type != ops[i].u.mem.type) {
(*error_func) (MIR_wrong_type_error,
"arg of %s is block type memory but param is not of block type",
code == MIR_UNSPEC ? "unspec" : "call");
@ -1861,13 +1869,16 @@ MIR_insn_t MIR_new_insn_arr (MIR_context_t ctx, MIR_insn_code_t code, size_t nop
(unsigned long) ops[i].u.mem.disp,
code == MIR_UNSPEC ? "unspec" : "call");
}
} else if (ops[i].u.mem.type == MIR_T_RBLK) {
(*error_func) (MIR_wrong_type_error,
"RBLK memory can not correspond to unnamed param in %s insn",
code == MIR_UNSPEC ? "unspec" : "call");
}
} else if (i - args_start >= proto->nres
&& (narg = i - args_start - proto->nres) < VARR_LENGTH (MIR_var_t, proto->args)
&& VARR_GET (MIR_var_t, proto->args, narg).type == MIR_T_BLK) {
&& MIR_blk_type_p (VARR_GET (MIR_var_t, proto->args, narg).type)) {
(*error_func) (MIR_wrong_type_error,
"param of %s is of block type but arg is not of block type memory "
"but ",
"param of %s is of block type but arg is not of block type memory",
code == MIR_UNSPEC ? "unspec" : "call");
}
}
@ -1897,7 +1908,9 @@ MIR_insn_t MIR_new_insn (MIR_context_t ctx, MIR_insn_code_t code, ...) {
va_list argp;
size_t nops = insn_code_nops (ctx, code);
if (MIR_call_code_p (code) || code == MIR_UNSPEC || code == MIR_RET || code == MIR_SWITCH)
if (code == MIR_PHI)
(*error_func) (MIR_call_op_error, "Use only MIR_new_insn_arr for creating a phi insn");
else if (MIR_call_code_p (code) || code == MIR_UNSPEC || code == MIR_RET || code == MIR_SWITCH)
(*error_func) (MIR_call_op_error,
"Use only MIR_new_insn_arr or MIR_new_{call,unspec,ret}_insn for creating a "
"call/unspec/ret/switch insn");
@ -2438,7 +2451,7 @@ static void output_func_proto (FILE *f, size_t nres, MIR_type_t *types, size_t n
var = VARR_GET (MIR_var_t, args, i);
if (i != 0 || nres != 0) fprintf (f, ", ");
mir_assert (var.name != NULL);
if (var.type != MIR_T_BLK)
if (!MIR_blk_type_p (var.type))
fprintf (f, "%s:%s", MIR_type_str (NULL, var.type), var.name);
else
fprintf (f, "%s:%lu(%s)", MIR_type_str (NULL, var.type), (unsigned long) var.size, var.name);
@ -2633,12 +2646,10 @@ static MIR_reg_t vn_add_val (MIR_context_t ctx, MIR_func_t func, MIR_type_t type
return val.reg;
}
const char *_MIR_get_temp_item_name (MIR_context_t ctx, MIR_module_t module) {
void _MIR_get_temp_item_name (MIR_context_t ctx, MIR_module_t module, char *buff, size_t buff_len) {
mir_assert (module != NULL);
module->last_temp_item_num++;
snprintf (temp_buff, sizeof (temp_buff), "%s%u", TEMP_ITEM_NAME_PREFIX,
(unsigned) module->last_temp_item_num);
return temp_buff;
snprintf (buff, buff_len, "%s%u", TEMP_ITEM_NAME_PREFIX, (unsigned) module->last_temp_item_num);
}
void MIR_simplify_op (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn, int nop, int out_p,
@ -2651,6 +2662,7 @@ void MIR_simplify_op (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,
MIR_op_mode_t value_mode = op->value_mode;
int move_p = code == MIR_MOV || code == MIR_FMOV || code == MIR_DMOV || code == MIR_LDMOV;
if (code == MIR_PHI) return; /* do nothing: it is a phi insn */
if (code == MIR_UNSPEC && nop == 0) return; /* do nothing: it is an unspec code */
if (MIR_call_code_p (code)) {
if (nop == 0) return; /* do nothing: it is a prototype */
@ -2680,11 +2692,13 @@ void MIR_simplify_op (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,
&& (op->mode == MIR_OP_FLOAT || op->mode == MIR_OP_DOUBLE
|| op->mode == MIR_OP_LDOUBLE))) {
const char *name;
char buff[50];
MIR_item_t item;
MIR_module_t m = curr_module;
curr_module = func_item->module;
name = _MIR_get_temp_item_name (ctx, curr_module);
_MIR_get_temp_item_name (ctx, curr_module, buff, sizeof (buff));
name = buff;
if (op->mode == MIR_OP_STR) {
item = MIR_new_string_data (ctx, name, op->u.str);
*op = MIR_new_ref_op (ctx, item);
@ -2801,7 +2815,7 @@ void MIR_simplify_op (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,
|| (code == MIR_VA_END && nop == 0))
&& mem_op.u.mem.type == MIR_T_UNDEF) {
*op = MIR_new_reg_op (ctx, addr_reg);
} else if (mem_op.u.mem.type != MIR_T_BLK || !MIR_call_code_p (code)) {
} else if (!MIR_blk_type_p (mem_op.u.mem.type) || !MIR_call_code_p (code)) {
type = (mem_op.u.mem.type == MIR_T_F || mem_op.u.mem.type == MIR_T_D
|| mem_op.u.mem.type == MIR_T_LD
? mem_op.u.mem.type
@ -3205,7 +3219,7 @@ static void process_inlines (MIR_context_t ctx, MIR_item_t func_item) {
MIR_insn_t func_insn, next_func_insn, call, insn, new_insn, ret_insn, ret_label;
MIR_item_t called_func_item;
MIR_func_t func, called_func;
size_t func_insns_num, called_func_insns_num, blk_size;
size_t func_insns_num, called_func_insns_num;
char buff[50];
mir_assert (func_item->item_type == MIR_func_item);
@ -3264,19 +3278,20 @@ static void process_inlines (MIR_context_t ctx, MIR_item_t func_item) {
new_reg = MIR_new_func_reg (ctx, func, type, VARR_ADDR (char, temp_string));
set_inline_reg_map (ctx, old_reg, new_reg);
if (i < nargs && call->nops > i + 2 + called_func->nres) { /* Parameter passing */
if (var.type == MIR_T_BLK) { /* alloca and block move: */
MIR_op_t op = call->ops[i + 2 + called_func->nres];
MIR_op_t op = call->ops[i + 2 + called_func->nres];
mir_assert (op.mode == MIR_OP_MEM);
mir_assert (!MIR_blk_type_p (type) || (op.mode == MIR_OP_MEM && type == MIR_T_I64));
if (var.type == MIR_T_BLK) { /* alloca and block move: */
add_blk_move (ctx, func_item, ret_label, MIR_new_reg_op (ctx, new_reg),
MIR_new_reg_op (ctx, op.u.mem.base), var.size);
} else {
if (var.type == MIR_T_RBLK) op = MIR_new_reg_op (ctx, op.u.mem.base);
new_insn
= MIR_new_insn (ctx,
type == MIR_T_F
? MIR_FMOV
: type == MIR_T_D ? MIR_DMOV : type == MIR_T_LD ? MIR_LDMOV : MIR_MOV,
MIR_new_reg_op (ctx, new_reg), call->ops[i + 2 + called_func->nres]);
MIR_new_reg_op (ctx, new_reg), op);
MIR_insert_insn_before (ctx, func_item, ret_label, new_insn);
}
}
@ -3293,8 +3308,7 @@ static void process_inlines (MIR_context_t ctx, MIR_item_t func_item) {
inline_insns_after++;
actual_nops = MIR_insn_nops (ctx, insn);
new_insn = MIR_copy_insn (ctx, insn);
mir_assert (insn->code != MIR_VA_ARG && insn->code != MIR_VA_STACK_ARG
&& insn->code != MIR_VA_START && insn->code != MIR_VA_END);
/* va insns are possible here as va_list can be passed as arg */
if (insn->code == MIR_ALLOCA) alloca_p = TRUE;
for (i = 0; i < actual_nops; i++) switch (new_insn->ops[i].mode) {
case MIR_OP_REG:
@ -3453,8 +3467,8 @@ static void *mem_map (size_t len) {
static size_t mem_page_size () { return sysconf (_SC_PAGE_SIZE); }
#else
#include <memoryapi.h>
#include <sysinfoapi.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define PROT_WRITE_EXEC PAGE_EXECUTE_READWRITE
#define PROT_READ_EXEC PAGE_EXECUTE_READ
@ -3497,10 +3511,9 @@ struct machine_code_ctx {
#define machine_insns ctx->machine_code_ctx->machine_insns
static code_holder_t *get_last_code_holder (MIR_context_t ctx, size_t size) {
uint8_t *mem, *free_adddr;
uint8_t *mem;
size_t len, npages;
code_holder_t ch, *ch_ptr;
int new_p = TRUE;
if ((len = VARR_LENGTH (code_holder_t, code_holders)) > 0) {
ch_ptr = VARR_ADDR (code_holder_t, code_holders) + len - 1;
@ -3659,7 +3672,7 @@ typedef enum {
REP3 (TAG_EL, MEM_DISP_INDEX, MEM_BASE_INDEX, MEM_DISP_BASE_INDEX),
/* MIR types. The same order as MIR types: */
REP8 (TAG_EL, TI8, TU8, TI16, TU16, TI32, TU32, TI64, TU64),
REP6 (TAG_EL, TF, TD, TP, TV, TBLOCK, EOI),
REP7 (TAG_EL, TF, TD, TP, TV, TBLOCK, TRBLOCK, EOI),
TAG_EL (EOFILE), /* end of insn with variable number operands (e.g. a call) or end of file */
/* unsigned integer 0..127 is kept in one byte. The most significant bit of the byte is 1: */
U0_MASK = 0x7f,
@ -3954,8 +3967,8 @@ static size_t write_insn (MIR_context_t ctx, writer_func_t writer, MIR_func_t fu
MIR_insn_code_t code = insn->code;
size_t len;
if (code == MIR_UNSPEC)
(*error_func) (MIR_binary_io_error, "MIR_UNSPEC is not portable and can not be output");
if (code == MIR_UNSPEC || code == MIR_PHI)
(*error_func) (MIR_binary_io_error, "UNSPEC or PHI is not portable and can not be output");
if (code == MIR_LABEL) return write_lab (ctx, writer, insn);
nops = MIR_insn_nops (ctx, insn);
len = write_uint (ctx, writer, code);
@ -4065,7 +4078,7 @@ static size_t write_item (MIR_context_t ctx, writer_func_t writer, MIR_item_t it
var = VARR_GET (MIR_var_t, proto->args, i);
len += write_type (ctx, writer, var.type);
len += write_name (ctx, writer, var.name);
if (var.type == MIR_T_BLK) len += write_uint (ctx, writer, var.size);
if (MIR_blk_type_p (var.type)) len += write_uint (ctx, writer, var.size);
}
len += put_byte (ctx, writer, TAG_EOI);
return len;
@ -4080,7 +4093,7 @@ static size_t write_item (MIR_context_t ctx, writer_func_t writer, MIR_item_t it
var = VARR_GET (MIR_var_t, func->vars, i);
len += write_type (ctx, writer, var.type);
len += write_name (ctx, writer, var.name);
if (var.type == MIR_T_BLK) len += write_uint (ctx, writer, var.size);
if (MIR_blk_type_p (var.type)) len += write_uint (ctx, writer, var.size);
}
len += put_byte (ctx, writer, TAG_EOI);
nlocals = VARR_LENGTH (MIR_var_t, func->vars) - func->nargs;
@ -4321,7 +4334,7 @@ static MIR_type_t tag_type (bin_tag_t tag) { return (MIR_type_t) (tag - TAG_TI8)
static MIR_type_t read_type (MIR_context_t ctx, const char *err_msg) {
int c = get_byte (ctx);
if (TAG_TI8 > c || c > TAG_TBLOCK) (*error_func) (MIR_binary_io_error, err_msg);
if (TAG_TI8 > c || c > TAG_TRBLOCK) (*error_func) (MIR_binary_io_error, err_msg);
return tag_type (c);
}
@ -4376,7 +4389,7 @@ static bin_tag_t read_token (MIR_context_t ctx, token_attr_t *attr) {
REP3 (TAG_CASE, MEM_DISP_BASE_INDEX, EOI, EOFILE)
break;
REP8 (TAG_CASE, TI8, TU8, TI16, TU16, TI32, TU32, TI64, TU64)
REP5 (TAG_CASE, TF, TD, TP, TV, TBLOCK)
REP6 (TAG_CASE, TF, TD, TP, TV, TBLOCK, TRBLOCK)
attr->t = (MIR_type_t) (c - TAG_TI8) + MIR_T_I8;
break;
default: (*error_func) (MIR_binary_io_error, "wrong tag %d", c);
@ -4483,7 +4496,7 @@ static int func_proto_read (MIR_context_t ctx, MIR_module_t module, uint64_t *nr
VARR_TRUNC (MIR_type_t, temp_types, 0);
for (i = 0; i < nres; i++) {
tag = read_token (ctx, &attr);
if (TAG_TI8 > tag || tag > TAG_TBLOCK)
if (TAG_TI8 > tag || tag > TAG_TRBLOCK)
(*error_func) (MIR_binary_io_error, "wrong prototype result type tag %d", tag);
VARR_PUSH (MIR_type_t, temp_types, tag_type (tag));
}
@ -4491,11 +4504,11 @@ static int func_proto_read (MIR_context_t ctx, MIR_module_t module, uint64_t *nr
for (;;) {
tag = read_token (ctx, &attr);
if (tag == TAG_EOI) break;
if (TAG_TI8 > tag || tag > TAG_TBLOCK)
if (TAG_TI8 > tag || tag > TAG_TRBLOCK)
(*error_func) (MIR_binary_io_error, "wrong prototype arg type tag %d", tag);
var.type = tag_type (tag);
var.name = read_name (ctx, module, "wrong arg name");
if (var.type == MIR_T_BLK) var.size = read_uint (ctx, "wrong block arg size");
if (MIR_blk_type_p (var.type)) var.size = read_uint (ctx, "wrong block arg size");
VARR_PUSH (MIR_var_t, temp_vars, var);
}
*nres_ptr = nres;
@ -4655,7 +4668,7 @@ void MIR_read_with_func (MIR_context_t ctx, int (*const reader) (MIR_context_t))
(*error_func) (MIR_binary_io_error, "data %s should have no labels",
name == NULL ? "" : name);
tag = read_token (ctx, &attr);
if (TAG_TI8 > tag || tag > TAG_TBLOCK)
if (TAG_TI8 > tag || tag > TAG_TRBLOCK)
(*error_func) (MIR_binary_io_error, "wrong data type tag %d", tag);
type = tag_type (tag);
VARR_TRUNC (uint8_t, temp_data, 0);
@ -4756,7 +4769,7 @@ void MIR_read_with_func (MIR_context_t ctx, int (*const reader) (MIR_context_t))
for (;;) {
tag = read_token (ctx, &attr);
if (tag == TAG_EOI) break;
if (TAG_TI8 > tag || tag > TAG_TBLOCK)
if (TAG_TI8 > tag || tag > TAG_TRBLOCK)
(*error_func) (MIR_binary_io_error, "wrong local var type tag %d", tag);
MIR_new_func_reg (ctx, func->u.func, tag_type (tag),
read_name (ctx, module, "wrong local var name"));
@ -4769,8 +4782,8 @@ void MIR_read_with_func (MIR_context_t ctx, int (*const reader) (MIR_context_t))
if (insn_code >= MIR_LABEL)
(*error_func) (MIR_binary_io_error, "wrong insn code %d", insn_code);
if (insn_code == MIR_UNSPEC)
(*error_func) (MIR_binary_io_error, "UNSPEC is not portable and can not be read");
if (insn_code == MIR_UNSPEC || insn_code == MIR_PHI)
(*error_func) (MIR_binary_io_error, "UNSPEC or PHI is not portable and can not be read");
for (uint64_t i = 0; i < VARR_LENGTH (uint64_t, insn_label_string_nums); i++) {
lab = to_lab (ctx, VARR_GET (uint64_t, insn_label_string_nums, i));
MIR_append_insn (ctx, func, lab);
@ -5212,8 +5225,8 @@ static void read_func_proto (MIR_context_t ctx, size_t nops, MIR_op_t *ops) {
var.type = ops[i].u.mem.type;
var.name = (const char *) ops[i].u.mem.disp;
if (var.name == NULL)
scan_error (ctx, "all func/prototype args should have form type:name or blk:size(name)");
if (var.type == MIR_T_BLK) var.size = ops[i].u.mem.base;
scan_error (ctx, "all func/prototype args should have form type:name or (r)blk:size(name)");
if (MIR_blk_type_p (var.type)) var.size = ops[i].u.mem.base;
VARR_PUSH (MIR_var_t, temp_vars, var);
}
}
@ -5232,6 +5245,7 @@ static MIR_type_t str2type (const char *type_name) {
if (strcmp (type_name, "i8") == 0) return MIR_T_I8;
if (strcmp (type_name, "u8") == 0) return MIR_T_U8;
if (strcmp (type_name, "blk") == 0) return MIR_T_BLK;
if (strcmp (type_name, "rblk") == 0) return MIR_T_RBLK;
return MIR_T_BOUND;
}
@ -5355,8 +5369,8 @@ void MIR_scan_string (MIR_context_t ctx, const char *str) {
if (!HTAB_DO (insn_name_t, insn_name_tab, in, HTAB_FIND, el))
scan_error (ctx, "Unknown insn %s", name);
insn_code = el.code;
if (insn_code == MIR_UNSPEC)
scan_error (ctx, "UNSPEC is not portable and can not be scanned", name);
if (insn_code == MIR_UNSPEC || insn_code == MIR_PHI)
scan_error (ctx, "UNSPEC or PHI is not portable and can not be scanned", name);
for (n = 0; n < VARR_LENGTH (label_name_t, label_names); n++) {
label = create_label_desc (ctx, VARR_GET (label_name_t, label_names, n));
if (func != NULL) MIR_append_insn (ctx, func, label);
@ -5418,11 +5432,11 @@ void MIR_scan_string (MIR_context_t ctx, const char *str) {
scan_token (ctx, &t, get_string_char, unget_string_char);
if (t.code == TC_NAME) {
op.u.mem.disp = (MIR_disp_t) t.u.name;
} else if (local_p || t.code != TC_INT || type != MIR_T_BLK) {
} else if (local_p || t.code != TC_INT || !MIR_blk_type_p (type)) {
scan_error (ctx, local_p ? "wrong var" : "wrong arg");
} else {
op.u.mem.base = t.u.i;
if (t.u.i <= 0 || t.u.i >= (1l << sizeof (MIR_reg_t) * 8))
if (t.u.i <= 0 || t.u.i >= (1ll << sizeof (MIR_reg_t) * 8))
scan_error (ctx, "invalid block arg size");
scan_token (ctx, &t, get_string_char, unget_string_char);
if (t.code != TC_LEFT_PAR) scan_error (ctx, "wrong block arg");
@ -5708,7 +5722,7 @@ static void scan_finish (MIR_context_t ctx) {
/* New Page */
#if defined(__x86_64__)
#if defined(__x86_64__) || defined(_M_AMD64)
#include "mir-x86_64.c"
#elif defined(__aarch64__)
#include "mir-aarch64.c"

@ -131,6 +131,7 @@ typedef enum {
INSN_EL (VA_END), /* operand is va_list */
INSN_EL (LABEL), /* One immediate operand is unique label number */
INSN_EL (UNSPEC), /* First operand unspec code and the rest are args */
INSN_EL (PHI), /* Used only internally in the generator, the first operand is output */
INSN_EL (INVALID_INSN),
INSN_EL (INSN_BOUND), /* Should be the last */
} MIR_insn_code_t;
@ -141,7 +142,7 @@ typedef enum {
typedef enum {
REP8 (TYPE_EL, I8, U8, I16, U16, I32, U32, I64, U64), /* Integer types of different size: */
REP3 (TYPE_EL, F, D, LD), /* Float or (long) double type */
REP2 (TYPE_EL, P, BLK), /* Pointer and memory block */
REP3 (TYPE_EL, P, BLK, RBLK), /* Pointer, (return) memory block */
REP2 (TYPE_EL, UNDEF, BOUND),
} MIR_type_t;
@ -151,6 +152,8 @@ static inline int MIR_int_type_p (MIR_type_t t) {
static inline int MIR_fp_type_p (MIR_type_t t) { return MIR_T_F <= t && t <= MIR_T_LD; }
static inline int MIR_blk_type_p (MIR_type_t t) { return t == MIR_T_BLK || t == MIR_T_RBLK; }
#if UINTPTR_MAX == 0xffffffff
#define MIR_PTR32 1
#define MIR_PTR64 0
@ -258,9 +261,9 @@ struct MIR_insn {
DEF_DLIST (MIR_insn_t, insn_link);
typedef struct MIR_var {
MIR_type_t type; /* MIR_T_BLK can be used only args */
MIR_type_t type; /* MIR_T_BLK and MIR_T_RBLK can be used only args */
const char *name;
size_t size; /* ignored for type != MIR_T_BLK */
size_t size; /* ignored for type != MIR_T_BLK, MIR_T_RBLK */
} MIR_var_t;
DEF_VARR (MIR_var_t);
@ -559,7 +562,8 @@ extern void _MIR_restore_func_insns (MIR_context_t ctx, MIR_item_t func_item);
extern void _MIR_simplify_insn (MIR_context_t ctx, MIR_item_t func_item, MIR_insn_t insn,
int keep_ref_p, int mem_float_p);
extern const char *_MIR_get_temp_item_name (MIR_context_t ctx, MIR_module_t module);
extern void _MIR_get_temp_item_name (MIR_context_t ctx, MIR_module_t module, char *buff,
size_t buff_len);
extern MIR_op_t _MIR_new_hard_reg_op (MIR_context_t ctx, MIR_reg_t hard_reg);
@ -599,7 +603,7 @@ extern void *_MIR_get_bend_builtin (MIR_context_t ctx);
typedef struct {
MIR_type_t type;
size_t size; /* used only for block arg (type == MIR_T_BLK) */
size_t size; /* used only for block arg (type == MIR_T_BLK, MIR_T_RBLK) */
} _MIR_arg_desc_t;
extern void *_MIR_get_ff_call (MIR_context_t ctx, size_t nres, MIR_type_t *res_types, size_t nargs,

@ -19,7 +19,8 @@ static double __attribute__ ((unused)) real_usec_time (void) {
return tv.tv_usec + tv.tv_sec * 1000000.0;
}
#else
#include <profileapi.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// does not return actual time, use as a stopwatch only
static double real_sec_time (void) {

Loading…
Cancel
Save