Fix mis-aligned access issues with s390x (#4702)

This fixes two problems: minimum symbol alignment for the LARL
instruction, and alignment requirements for LRL/LGRL etc.

The first problem is that the LARL instruction used to load a
symbol address (PC relative) requires that the target symbol
is at least 2-byte aligned.  This is always guaranteed for code
symbols (all instructions must be 2-aligned anyway), but not
necessarily for data symbols.

Other s390x compilers fix this problem by ensuring that all
global symbols are always emitted with a minimum 2-byte
alignment.  This patch introduces an equivalent mechanism
for cranelift:
- Add a symbol_alignment routine to TargetIsa, similar to the
  existing code_section_alignment routine.
- Respect symbol_alignment as minimum alignment for all symbols
  emitted in the object backend (code and data).

The second problem is that PC-relative instructions that
directly *access* data (like LRL/LGRL, STRL/STGRL etc.)
not only have the 2-byte requirement like LARL, but actually
require that their memory operand is *naturally* aligned
(i.e. alignment is at least the size of the access).

This property (natural alignment for memory accesses) is
supposed to be provided by the "aligned" flag in MemFlags;
however, this is not implemented correctly at the moment.

To fix this, this patch:
- Only emits PC-relative memory access instructions if the
  "aligned" flag is set in the associated MemFlags.
- Fixes a bug in emit_small_memory_copy and emit_small_memset
  which currently set the aligned flag unconditionally, ignoring
  the actual alignment info passed by their caller.

Tested with wasmtime and cg_clif.
This commit is contained in:
Ulrich Weigand
2022-08-16 21:39:42 +02:00
committed by GitHub
parent fbfceaec98
commit a916788ab4
14 changed files with 323 additions and 132 deletions

View File

@@ -15,7 +15,7 @@ function %atomic_load_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i64 little v0
v1 = atomic_load.i64 aligned little v0
return v1
}
@@ -37,7 +37,7 @@ function %atomic_load_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i32 little v0
v1 = atomic_load.i32 aligned little v0
return v1
}
@@ -59,7 +59,7 @@ function %atomic_load_i16_sym() -> i16 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i16 little v0
v1 = atomic_load.i16 aligned little v0
return v1
}

View File

@@ -15,7 +15,7 @@ function %atomic_load_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i64 v0
v1 = atomic_load.i64 aligned v0
return v1
}
@@ -37,7 +37,7 @@ function %atomic_load_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i32 v0
v1 = atomic_load.i32 aligned v0
return v1
}
@@ -59,7 +59,7 @@ function %atomic_load_i16_sym() -> i16 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = atomic_load.i16 v0
v1 = atomic_load.i16 aligned v0
return v1
}

View File

@@ -16,7 +16,7 @@ function %atomic_store_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
atomic_store.i64 little v0, v1
atomic_store.i64 aligned little v0, v1
return
}
@@ -53,7 +53,7 @@ function %atomic_store_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
atomic_store.i32 little v0, v1
atomic_store.i32 aligned little v0, v1
return
}
@@ -90,7 +90,7 @@ function %atomic_store_i16_sym(i16) {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
atomic_store.i16 little v0, v1
atomic_store.i16 aligned little v0, v1
return
}

View File

@@ -16,7 +16,7 @@ function %atomic_store_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
atomic_store.i64 v0, v1
atomic_store.i64 aligned v0, v1
return
}
@@ -52,7 +52,7 @@ function %atomic_store_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
atomic_store.i32 v0, v1
atomic_store.i32 aligned v0, v1
return
}
@@ -88,7 +88,7 @@ function %atomic_store_i16_sym(i16) {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
atomic_store.i16 v0, v1
atomic_store.i16 aligned v0, v1
return
}

View File

@@ -69,7 +69,7 @@ function %icmp_slt_i64_sym(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = load.i64 v1
v2 = load.i64 aligned v1
v3 = icmp.i64 slt v0, v2
return v3
}
@@ -97,7 +97,7 @@ function %icmp_slt_i64_sym_ext16(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = sload16.i64 v1
v2 = sload16.i64 aligned v1
v3 = icmp.i64 slt v0, v2
return v3
}
@@ -125,7 +125,7 @@ function %icmp_slt_i64_sym_ext32(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = sload32.i64 v1
v2 = sload32.i64 aligned v1
v3 = icmp.i64 slt v0, v2
return v3
}
@@ -204,7 +204,7 @@ function %icmp_slt_i32_sym(i32) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
v2 = load.i32 v1
v2 = load.i32 aligned v1
v3 = icmp.i32 slt v0, v2
return v3
}
@@ -245,7 +245,7 @@ function %icmp_slt_i32_sym_ext16(i32) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
v2 = sload16.i32 v1
v2 = sload16.i32 aligned v1
v3 = icmp.i32 slt v0, v2
return v3
}
@@ -303,7 +303,7 @@ function %icmp_slt_i16_sym(i16) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
v2 = load.i16 v1
v2 = load.i16 aligned v1
v3 = icmp.i16 slt v0, v2
return v3
}
@@ -415,7 +415,7 @@ function %icmp_ult_i64_sym(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = load.i64 v1
v2 = load.i64 aligned v1
v3 = icmp.i64 ult v0, v2
return v3
}
@@ -443,7 +443,7 @@ function %icmp_ult_i64_sym_ext32(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = uload32.i64 v1
v2 = uload32.i64 aligned v1
v3 = icmp.i64 ult v0, v2
return v3
}
@@ -472,7 +472,7 @@ function %icmp_ult_i64_sym_ext16(i64) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
v2 = uload16.i64 v1
v2 = uload16.i64 aligned v1
v3 = icmp.i64 ult v0, v2
return v3
}
@@ -538,7 +538,7 @@ function %icmp_ult_i32_sym(i32) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
v2 = load.i32 v1
v2 = load.i32 aligned v1
v3 = icmp.i32 ult v0, v2
return v3
}
@@ -567,7 +567,7 @@ function %icmp_ult_i32_sym_ext16(i32) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
v2 = uload16.i32 v1
v2 = uload16.i32 aligned v1
v3 = icmp.i32 ult v0, v2
return v3
}
@@ -627,7 +627,7 @@ function %icmp_ult_i16_mem(i16) -> b1 {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
v2 = load.i16 v1
v2 = load.i16 aligned v1
v3 = icmp.i16 ult v0, v2
return v3
}

View File

@@ -15,7 +15,7 @@ function %load_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i64 little v0
v1 = load.i64 aligned little v0
return v1
}
@@ -58,7 +58,7 @@ function %uload16_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload16.i64 little v0
v1 = uload16.i64 aligned little v0
return v1
}
@@ -82,7 +82,7 @@ function %sload16_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload16.i64 little v0
v1 = sload16.i64 aligned little v0
return v1
}
@@ -106,7 +106,7 @@ function %uload32_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload32.i64 little v0
v1 = uload32.i64 aligned little v0
return v1
}
@@ -130,7 +130,7 @@ function %sload32_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload32.i64 little v0
v1 = sload32.i64 aligned little v0
return v1
}
@@ -153,7 +153,7 @@ function %load_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i32 little v0
v1 = load.i32 aligned little v0
return v1
}
@@ -196,7 +196,7 @@ function %uload16_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload16.i32 little v0
v1 = uload16.i32 aligned little v0
return v1
}
@@ -220,7 +220,7 @@ function %sload16_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload16.i32 little v0
v1 = sload16.i32 aligned little v0
return v1
}
@@ -243,7 +243,7 @@ function %load_i16_sym() -> i16 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i16 little v0
v1 = load.i16 aligned little v0
return v1
}

View File

@@ -15,7 +15,7 @@ function %load_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i64 v0
v1 = load.i64 aligned v0
return v1
}
@@ -57,7 +57,7 @@ function %uload16_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload16.i64 v0
v1 = uload16.i64 aligned v0
return v1
}
@@ -79,7 +79,7 @@ function %sload16_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload16.i64 v0
v1 = sload16.i64 aligned v0
return v1
}
@@ -101,7 +101,7 @@ function %uload32_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload32.i64 v0
v1 = uload32.i64 aligned v0
return v1
}
@@ -123,7 +123,7 @@ function %sload32_i64_sym() -> i64 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload32.i64 v0
v1 = sload32.i64 aligned v0
return v1
}
@@ -145,7 +145,7 @@ function %load_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i32 v0
v1 = load.i32 aligned v0
return v1
}
@@ -197,7 +197,7 @@ function %uload16_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = uload16.i32 v0
v1 = uload16.i32 aligned v0
return v1
}
@@ -229,7 +229,7 @@ function %sload16_i32_sym() -> i32 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = sload16.i32 v0
v1 = sload16.i32 aligned v0
return v1
}
@@ -251,7 +251,7 @@ function %load_i16_sym() -> i16 {
gv0 = symbol colocated %sym
block0:
v0 = symbol_value.i64 gv0
v1 = load.i16 v0
v1 = load.i16 aligned v0
return v1
}

View File

@@ -15,7 +15,7 @@ function %store_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
store.i64 little v0, v1
store.i64 aligned little v0, v1
return
}
@@ -70,7 +70,7 @@ function %istore16_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
istore16.i64 little v0, v1
istore16.i64 aligned little v0, v1
return
}
@@ -103,7 +103,7 @@ function %istore32_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
istore32.i64 little v0, v1
istore32.i64 aligned little v0, v1
return
}
@@ -137,7 +137,7 @@ function %store_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
store.i32 little v0, v1
store.i32 aligned little v0, v1
return
}
@@ -192,7 +192,7 @@ function %istore16_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
istore16.i32 little v0, v1
istore16.i32 aligned little v0, v1
return
}
@@ -225,7 +225,7 @@ function %store_i16_sym(i16) {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
store.i16 little v0, v1
store.i16 aligned little v0, v1
return
}

View File

@@ -15,7 +15,7 @@ function %store_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
store.i64 v0, v1
store.i64 aligned v0, v1
return
}
@@ -69,7 +69,7 @@ function %istore16_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
istore16.i64 v0, v1
istore16.i64 aligned v0, v1
return
}
@@ -102,7 +102,7 @@ function %istore32_i64_sym(i64) {
gv0 = symbol colocated %sym
block0(v0: i64):
v1 = symbol_value.i64 gv0
istore32.i64 v0, v1
istore32.i64 aligned v0, v1
return
}
@@ -135,7 +135,7 @@ function %store_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
store.i32 v0, v1
store.i32 aligned v0, v1
return
}
@@ -199,7 +199,7 @@ function %istore16_i32_sym(i32) {
gv0 = symbol colocated %sym
block0(v0: i32):
v1 = symbol_value.i64 gv0
istore16.i32 v0, v1
istore16.i32 aligned v0, v1
return
}
@@ -232,7 +232,7 @@ function %store_i16_sym(i16) {
gv0 = symbol colocated %sym
block0(v0: i16):
v1 = symbol_value.i64 gv0
store.i16 v0, v1
store.i16 aligned v0, v1
return
}