diff --git a/tccgen.c b/tccgen.c index a6181b0..4bac2b9 100644 --- a/tccgen.c +++ b/tccgen.c @@ -7413,7 +7413,7 @@ static void decl_initializer_alloc(CType *type, AttributeDef *ad, int r, if ((r & VT_VALMASK) == VT_LOCAL) { sec = NULL; #ifdef CONFIG_TCC_BCHECK - if (bcheck && (type->t & VT_ARRAY)) { + if (bcheck) { loc--; } #endif @@ -7422,8 +7422,13 @@ static void decl_initializer_alloc(CType *type, AttributeDef *ad, int r, #ifdef CONFIG_TCC_BCHECK /* handles bounds */ /* XXX: currently, since we do only one pass, we cannot track - '&' operators, so we add only arrays */ - if (bcheck && (type->t & VT_ARRAY)) { + '&' operators. + So add all values because address could be taken or + a struct/union could contain an array. + This makes code very slow! + In the future we might remove the values from the bounds_section + if addres is not taken. */ + if (bcheck) { addr_t *bounds_ptr; /* add padding between regions */ loc--; diff --git a/x86_64-gen.c b/x86_64-gen.c index cc66b60..38a7787 100644 --- a/x86_64-gen.c +++ b/x86_64-gen.c @@ -654,6 +654,8 @@ ST_FUNC void gen_bounded_ptr_add(void) /* save all temporary registers */ save_regs(0); + o(0x51525657); /* push $rdi/%rsi/%rdx/%rcx */ + /* prepare fast x86_64 function call */ gv(RC_RAX); o(0xc68948); // mov %rax,%rsi ## second arg in %rsi, this must be size @@ -670,6 +672,7 @@ ST_FUNC void gen_bounded_ptr_add(void) vtop++; vtop->r = TREG_RAX | VT_BOUNDED; + o(0x5f5e5a59); /* pop $rcx/$rdx/$rsi/%rdi */ /* relocation offset of the bounding function call point */ vtop->c.i = (cur_text_section->reloc->data_offset - sizeof(ElfW(Rela))); @@ -1940,6 +1943,7 @@ void gen_opf(int op) v1.c.i = fc; load(r, &v1); fc = 0; + vtop->r = r = r | VT_LVAL; } if (op == TOK_EQ || op == TOK_NE) { @@ -2007,6 +2011,7 @@ void gen_opf(int op) v1.c.i = fc; load(r, &v1); fc = 0; + vtop->r = r = r | VT_LVAL; } assert(!(vtop[-1].r & VT_LVAL));