Files
wasmtime/tests/misc_testsuite/threads/load-store-alignment.wast
Alex Crichton 85f16f488d Consolidate address calculations for atomics (#3143)
* Consolidate address calculations for atomics

This commit consolidates all calcuations of guest addresses into one
`prepare_addr` function. This notably remove the atomics-specifics paths
as well as the `prepare_load` function (now renamed to `prepare_addr`
and folded into `get_heap_addr`).

The goal of this commit is to simplify how addresses are managed in the
code generator for atomics to use all the shared infrastrucutre of other
loads/stores as well. This additionally fixes #3132 via the use of
`heap_addr` in clif for all operations.

I also added a number of tests for loads/stores with varying alignments.
Originally I was going to allow loads/stores to not be aligned since
that's what the current formal specification says, but the overview of
the threads proposal disagrees with the formal specification, so I
figured I'd leave it as-is but adding tests probably doesn't hurt.

Closes #3132

* Fix old backend

* Guarantee misalignment checks happen before out-of-bounds
2021-08-04 15:57:56 -05:00

127 lines
6.4 KiB
Plaintext

(module
;; NB this should use a shared memory when it's supported
(memory 1)
(func (export "32.load8u") (param i32) (result i32)
local.get 0 i32.atomic.load8_u)
(func (export "32.load16u") (param i32) (result i32)
local.get 0 i32.atomic.load16_u)
(func (export "32.load32u") (param i32) (result i32)
local.get 0 i32.atomic.load)
(func (export "64.load8u") (param i32) (result i64)
local.get 0 i64.atomic.load8_u)
(func (export "64.load16u") (param i32) (result i64)
local.get 0 i64.atomic.load16_u)
(func (export "64.load32u") (param i32) (result i64)
local.get 0 i64.atomic.load32_u)
(func (export "64.load64u") (param i32) (result i64)
local.get 0 i64.atomic.load)
(func (export "32.store8") (param i32)
local.get 0 i32.const 0 i32.atomic.store8)
(func (export "32.store16") (param i32)
local.get 0 i32.const 0 i32.atomic.store16)
(func (export "32.store32") (param i32)
local.get 0 i32.const 0 i32.atomic.store)
(func (export "64.store8") (param i32)
local.get 0 i64.const 0 i64.atomic.store8)
(func (export "64.store16") (param i32)
local.get 0 i64.const 0 i64.atomic.store16)
(func (export "64.store32") (param i32)
local.get 0 i64.const 0 i64.atomic.store32)
(func (export "64.store64") (param i32)
local.get 0 i64.const 0 i64.atomic.store)
(func (export "32.load8u o1") (param i32) (result i32)
local.get 0 i32.atomic.load8_u offset=1)
(func (export "32.load16u o1") (param i32) (result i32)
local.get 0 i32.atomic.load16_u offset=1)
(func (export "32.load32u o1") (param i32) (result i32)
local.get 0 i32.atomic.load offset=1)
(func (export "64.load8u o1") (param i32) (result i64)
local.get 0 i64.atomic.load8_u offset=1)
(func (export "64.load16u o1") (param i32) (result i64)
local.get 0 i64.atomic.load16_u offset=1)
(func (export "64.load32u o1") (param i32) (result i64)
local.get 0 i64.atomic.load32_u offset=1)
(func (export "64.load64u o1") (param i32) (result i64)
local.get 0 i64.atomic.load offset=1)
(func (export "32.store8 o1") (param i32)
local.get 0 i32.const 0 i32.atomic.store8 offset=1)
(func (export "32.store16 o1") (param i32)
local.get 0 i32.const 0 i32.atomic.store16 offset=1)
(func (export "32.store32 o1") (param i32)
local.get 0 i32.const 0 i32.atomic.store offset=1)
(func (export "64.store8 o1") (param i32)
local.get 0 i64.const 0 i64.atomic.store8 offset=1)
(func (export "64.store16 o1") (param i32)
local.get 0 i64.const 0 i64.atomic.store16 offset=1)
(func (export "64.store32 o1") (param i32)
local.get 0 i64.const 0 i64.atomic.store32 offset=1)
(func (export "64.store64 o1") (param i32)
local.get 0 i64.const 0 i64.atomic.store offset=1)
)
;; aligned loads
(assert_return (invoke "32.load8u" (i32.const 0)) (i32.const 0))
(assert_return (invoke "32.load16u" (i32.const 0)) (i32.const 0))
(assert_return (invoke "32.load32u" (i32.const 0)) (i32.const 0))
(assert_return (invoke "64.load8u" (i32.const 0)) (i64.const 0))
(assert_return (invoke "64.load16u" (i32.const 0)) (i64.const 0))
(assert_return (invoke "64.load64u" (i32.const 0)) (i64.const 0))
(assert_return (invoke "32.load8u o1" (i32.const 0)) (i32.const 0))
(assert_return (invoke "32.load16u o1" (i32.const 1)) (i32.const 0))
(assert_return (invoke "32.load32u o1" (i32.const 3)) (i32.const 0))
(assert_return (invoke "64.load8u o1" (i32.const 0)) (i64.const 0))
(assert_return (invoke "64.load16u o1" (i32.const 1)) (i64.const 0))
(assert_return (invoke "64.load32u o1" (i32.const 3)) (i64.const 0))
(assert_return (invoke "64.load64u o1" (i32.const 7)) (i64.const 0))
;; misaligned loads
(assert_return (invoke "32.load8u" (i32.const 1)) (i32.const 0))
(assert_trap (invoke "32.load16u" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "32.load32u" (i32.const 1)) "misaligned memory access")
(assert_return (invoke "64.load8u" (i32.const 1)) (i64.const 0))
(assert_trap (invoke "64.load16u" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "64.load32u" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "64.load64u" (i32.const 1)) "misaligned memory access")
(assert_return (invoke "32.load8u o1" (i32.const 0)) (i32.const 0))
(assert_trap (invoke "32.load16u o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "32.load32u o1" (i32.const 0)) "misaligned memory access")
(assert_return (invoke "64.load8u o1" (i32.const 0)) (i64.const 0))
(assert_trap (invoke "64.load16u o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "64.load32u o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "64.load64u o1" (i32.const 0)) "misaligned memory access")
;; aligned stores
(assert_return (invoke "32.store8" (i32.const 0)))
(assert_return (invoke "32.store16" (i32.const 0)))
(assert_return (invoke "32.store32" (i32.const 0)))
(assert_return (invoke "64.store8" (i32.const 0)))
(assert_return (invoke "64.store16" (i32.const 0)))
(assert_return (invoke "64.store64" (i32.const 0)))
(assert_return (invoke "32.store8 o1" (i32.const 0)))
(assert_return (invoke "32.store16 o1" (i32.const 1)))
(assert_return (invoke "32.store32 o1" (i32.const 3)))
(assert_return (invoke "64.store8 o1" (i32.const 0)))
(assert_return (invoke "64.store16 o1" (i32.const 1)))
(assert_return (invoke "64.store32 o1" (i32.const 3)))
(assert_return (invoke "64.store64 o1" (i32.const 7)))
;; misaligned stores
(assert_return (invoke "32.store8" (i32.const 1)) (i32.const 0))
(assert_trap (invoke "32.store16" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "32.store32" (i32.const 1)) "misaligned memory access")
(assert_return (invoke "64.store8" (i32.const 1)) (i64.const 0))
(assert_trap (invoke "64.store16" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "64.store32" (i32.const 1)) "misaligned memory access")
(assert_trap (invoke "64.store64" (i32.const 1)) "misaligned memory access")
(assert_return (invoke "32.store8 o1" (i32.const 0)) (i32.const 0))
(assert_trap (invoke "32.store16 o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "32.store32 o1" (i32.const 0)) "misaligned memory access")
(assert_return (invoke "64.store8 o1" (i32.const 0)) (i64.const 0))
(assert_trap (invoke "64.store16 o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "64.store32 o1" (i32.const 0)) "misaligned memory access")
(assert_trap (invoke "64.store64 o1" (i32.const 0)) "misaligned memory access")