Merge pull request from GHSA-ff4p-7xrq-q5r8
* x64: Remove incorrect `amode_add` lowering rules This commit removes two incorrect rules as part of the x64 backend's computation of addressing modes. These two rules folded a zero-extended 32-bit computation into the address mode operand, but this isn't correct as the 32-bit computation should be truncated to 32-bits but when folded into the address mode computation it happens with 64-bit operands, meaning truncation doesn't happen. * Add release notes
This commit is contained in:
42
RELEASES.md
42
RELEASES.md
@@ -20,6 +20,20 @@ Unreleased.
|
|||||||
|
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 6.0.1
|
||||||
|
|
||||||
|
Released 2023-03-08.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Guest-controlled out-of-bounds read/write on x86\_64
|
||||||
|
[GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8)
|
||||||
|
|
||||||
|
* Miscompilation of `i8x16.select` with the same inputs on x86\_64
|
||||||
|
[GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw)
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
## 6.0.0
|
## 6.0.0
|
||||||
|
|
||||||
Released 2023-02-20
|
Released 2023-02-20
|
||||||
@@ -74,6 +88,20 @@ Released 2023-02-20
|
|||||||
|
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 5.0.1
|
||||||
|
|
||||||
|
Released 2023-03-08.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Guest-controlled out-of-bounds read/write on x86\_64
|
||||||
|
[GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8)
|
||||||
|
|
||||||
|
* Miscompilation of `i8x16.select` with the same inputs on x86\_64
|
||||||
|
[GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw)
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
## 5.0.0
|
## 5.0.0
|
||||||
|
|
||||||
Released 2023-01-20
|
Released 2023-01-20
|
||||||
@@ -123,6 +151,20 @@ Released 2023-01-20
|
|||||||
|
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
## 4.0.1
|
||||||
|
|
||||||
|
Released 2023-03-08.
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Guest-controlled out-of-bounds read/write on x86\_64
|
||||||
|
[GHSA-ff4p-7xrq-q5r8](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-ff4p-7xrq-q5r8)
|
||||||
|
|
||||||
|
* Miscompilation of `i8x16.select` with the same inputs on x86\_64
|
||||||
|
[GHSA-xm67-587q-r2vw](https://github.com/bytecodealliance/wasmtime/security/advisories/GHSA-xm67-587q-r2vw)
|
||||||
|
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
|
||||||
## 4.0.0
|
## 4.0.0
|
||||||
|
|
||||||
Released 2022-12-20
|
Released 2022-12-20
|
||||||
|
|||||||
@@ -1063,20 +1063,6 @@
|
|||||||
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift))))
|
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift))))
|
||||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
(if (u32_lteq (u8_as_u32 shift) 3))
|
||||||
(Amode.ImmRegRegShift off base index shift flags))
|
(Amode.ImmRegRegShift off base index shift flags))
|
||||||
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (uextend (ishl index (iconst (uimm8 shift)))))
|
|
||||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
|
||||||
(Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags))
|
|
||||||
|
|
||||||
;; Same, but with a uextend of a shift of a 32-bit add. This is valid
|
|
||||||
;; because we know our lowering of a narrower-than-64-bit `iadd` will
|
|
||||||
;; always write the full register width, so we can effectively ignore
|
|
||||||
;; the `uextend` and look through it to the `ishl`.
|
|
||||||
;;
|
|
||||||
;; Priority 3 to avoid conflict with the previous rule.
|
|
||||||
(rule 3 (amode_add (Amode.ImmReg off (valid_reg base) flags)
|
|
||||||
(uextend (ishl index @ (iadd _ _) (iconst (uimm8 shift)))))
|
|
||||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
|
||||||
(Amode.ImmRegRegShift off base index shift flags))
|
|
||||||
|
|
||||||
;; -- Case 4 (absorbing constant offsets).
|
;; -- Case 4 (absorbing constant offsets).
|
||||||
;;
|
;;
|
||||||
|
|||||||
@@ -209,8 +209,9 @@ block0(v0: i64, v1: i32):
|
|||||||
; pushq %rbp
|
; pushq %rbp
|
||||||
; movq %rsp, %rbp
|
; movq %rsp, %rbp
|
||||||
; block0:
|
; block0:
|
||||||
; movl %esi, %ecx
|
; movq %rsi, %rdx
|
||||||
; movq -1(%rdi,%rcx,8), %rax
|
; shll $3, %edx, %edx
|
||||||
|
; movq -1(%rdi,%rdx,1), %rax
|
||||||
; movq %rbp, %rsp
|
; movq %rbp, %rsp
|
||||||
; popq %rbp
|
; popq %rbp
|
||||||
; ret
|
; ret
|
||||||
@@ -220,8 +221,9 @@ block0(v0: i64, v1: i32):
|
|||||||
; pushq %rbp
|
; pushq %rbp
|
||||||
; movq %rsp, %rbp
|
; movq %rsp, %rbp
|
||||||
; block1: ; offset 0x4
|
; block1: ; offset 0x4
|
||||||
; movl %esi, %ecx
|
; movq %rsi, %rdx
|
||||||
; movq -1(%rdi, %rcx, 8), %rax ; trap: heap_oob
|
; shll $3, %edx
|
||||||
|
; movq -1(%rdi, %rdx), %rax ; trap: heap_oob
|
||||||
; movq %rbp, %rsp
|
; movq %rbp, %rsp
|
||||||
; popq %rbp
|
; popq %rbp
|
||||||
; retq
|
; retq
|
||||||
@@ -244,7 +246,8 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
; block0:
|
; block0:
|
||||||
; movq %rsi, %r8
|
; movq %rsi, %r8
|
||||||
; addl %r8d, %edx, %r8d
|
; addl %r8d, %edx, %r8d
|
||||||
; movq -1(%rdi,%r8,4), %rax
|
; shll $2, %r8d, %r8d
|
||||||
|
; movq -1(%rdi,%r8,1), %rax
|
||||||
; movq %rbp, %rsp
|
; movq %rbp, %rsp
|
||||||
; popq %rbp
|
; popq %rbp
|
||||||
; ret
|
; ret
|
||||||
@@ -256,7 +259,8 @@ block0(v0: i64, v1: i32, v2: i32):
|
|||||||
; block1: ; offset 0x4
|
; block1: ; offset 0x4
|
||||||
; movq %rsi, %r8
|
; movq %rsi, %r8
|
||||||
; addl %edx, %r8d
|
; addl %edx, %r8d
|
||||||
; movq -1(%rdi, %r8, 4), %rax ; trap: heap_oob
|
; shll $2, %r8d
|
||||||
|
; movq -1(%rdi, %r8), %rax ; trap: heap_oob
|
||||||
; movq %rbp, %rsp
|
; movq %rbp, %rsp
|
||||||
; popq %rbp
|
; popq %rbp
|
||||||
; retq
|
; retq
|
||||||
|
|||||||
Reference in New Issue
Block a user