cranelift: Implement table_addr in interpreter (#4433)

This commit is contained in:
Afonso Bordado
2022-07-13 20:53:42 +01:00
committed by GitHub
parent 03ece34cbb
commit 4ea46c3ca8
2 changed files with 165 additions and 1 deletions

View File

@@ -0,0 +1,143 @@
test interpret
test run
target x86_64
target s390x
target aarch64
function %set_get_i64(i64 vmctx, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
store.i64 v2, v3
v4 = load.i64 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i64(0, 1) == 1
; run: %set_get_i64(0, 10) == 10
; run: %set_get_i64(1, 1) == 1
; run: %set_get_i64(1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %set_get_i64(10, 1) == 1
; run: %set_get_i64(10, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
function %set_get_i32(i64 vmctx, i64, i32) -> i32 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 8, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i32):
;; Note here the offset +4
v3 = table_addr.i64 table0, v1, +4
store.i32 v2, v3
v4 = load.i32 v3
return v4
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i32(0, 1) == 1
; run: %set_get_i32(0, 10) == 10
; run: %set_get_i32(1, 1) == 1
; run: %set_get_i32(1, 0xC0FFEEEE) == 0xC0FFEEEE
; run: %set_get_i32(10, 1) == 1
; run: %set_get_i32(10, 0xC0FFEEEE) == 0xC0FFEEEE
function %set_get_i8(i64 vmctx, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i8):
v3 = table_addr.i64 table0, v1, +0
store.i8 v2, v3
v4 = load.i8 v3
return v4
}
; heap: static, size=2, ptr=vmctx+0, bound=vmctx+8
; run: %set_get_i8(0, 1) == 1
; run: %set_get_i8(0, 0xC0) == 0xC0
; run: %set_get_i8(1, 1) == 1
; run: %set_get_i8(1, 0xFF) == 0xFF
function %large_elm_size(i64 vmctx, i64, i64, i8) -> i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 10240, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i8):
v4 = table_addr.i64 table0, v1, +0
v5 = iadd.i64 v4, v2
store.i8 v3, v5
v6 = load.i8 v5
return v6
}
; heap: static, size=0xC800, ptr=vmctx+0, bound=vmctx+8
; run: %large_elm_size(0, 0, 1) == 1
; run: %large_elm_size(1, 0, 0xC0) == 0xC0
; run: %large_elm_size(0, 1, 1) == 1
; run: %large_elm_size(1, 1, 0xFF) == 0xFF
; run: %large_elm_size(0, 127, 1) == 1
; run: %large_elm_size(1, 127, 0xFF) == 0xFF
; run: %large_elm_size(0, 10239, 1) == 1
; run: %large_elm_size(1, 10239, 0xBB) == 0xBB
; Tests writing a i64 which covers 8 table entries at once
; Loads the first byte and the last to confirm that the slots were written
function %multi_elm_write(i64 vmctx, i64, i64) -> i8, i8 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
table0 = dynamic gv1, element_size 1, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64):
v3 = table_addr.i64 table0, v1, +0
v4 = table_addr.i64 table0, v1, +7
store.i64 v2, v3
v5 = load.i8 v3
v6 = load.i8 v4
return v5, v6
}
; heap: static, size=16, ptr=vmctx+0, bound=vmctx+8
;; When writing these test cases keep in mind that s390x is big endian!
;; We just make sure that the first and last byte are the same to deal with that.
; run: %multi_elm_write(0, 0xC0FFEEEE_FFEEEEC0) == [0xC0, 0xC0]
; run: %multi_elm_write(1, 0xAABBCCDD_EEFF00AA) == [0xAA, 0xAA]
function %heap_table(i64 vmctx, i64, i64, i64) -> i64 {
gv0 = vmctx
gv1 = load.i64 notrap aligned gv0
gv2 = load.i64 notrap aligned gv0 +8
heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64
table0 = dynamic gv1, element_size 9, bound gv2, index_type i64
block0(v0: i64, v1: i64, v2: i64, v3: i64):
; v1 - heap offset (bytes)
; v2 - table offset (elements)
; v3 - store/load value
v4 = heap_addr.i64 heap0, v1, 0
v5 = table_addr.i64 table0, v2, +2
; Store via heap, load via table
store.i64 v3, v4
v6 = load.i64 v5
return v6
}
; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8
; run: %heap_table(2, 0, 0xAABBCCDD_EEFF0011) == 0xAABBCCDD_EEFF0011
; run: %heap_table(11, 1, 0xC0FFEEEE_DECAFFFF) == 0xC0FFEEEE_DECAFFFF
; run: %heap_table(20, 2, 1) == 1
; run: %heap_table(29, 3, -10) == -10

View File

@@ -410,7 +410,28 @@ where
} }
Opcode::GetPinnedReg => unimplemented!("GetPinnedReg"), Opcode::GetPinnedReg => unimplemented!("GetPinnedReg"),
Opcode::SetPinnedReg => unimplemented!("SetPinnedReg"), Opcode::SetPinnedReg => unimplemented!("SetPinnedReg"),
Opcode::TableAddr => unimplemented!("TableAddr"), Opcode::TableAddr => {
if let InstructionData::TableAddr { table, offset, .. } = inst {
let table = &state.get_current_function().tables[table];
let base = state.resolve_global_value(table.base_gv)?;
let bound = state.resolve_global_value(table.bound_gv)?;
let index_ty = table.index_type;
let element_size = V::int(u64::from(table.element_size) as i128, index_ty)?;
let inst_offset = V::int(i32::from(offset) as i128, index_ty)?;
let byte_offset = arg(0)?.mul(element_size.clone())?.add(inst_offset)?;
let bound_bytes = bound.mul(element_size)?;
if byte_offset.gt(&bound_bytes)? {
return Ok(ControlFlow::Trap(CraneliftTrap::User(
TrapCode::HeapOutOfBounds,
)));
}
assign(base.add(byte_offset)?)
} else {
unreachable!()
}
}
Opcode::Iconst => assign(Value::int(imm().into_int()?, ctrl_ty)?), Opcode::Iconst => assign(Value::int(imm().into_int()?, ctrl_ty)?),
Opcode::F32const => assign(imm()), Opcode::F32const => assign(imm()),
Opcode::F64const => assign(imm()), Opcode::F64const => assign(imm()),