Merge pull request from GHSA-ff4p-7xrq-q5r8
* x64: Remove incorrect `amode_add` lowering rules This commit removes two incorrect rules as part of the x64 backend's computation of addressing modes. These two rules folded a zero-extended 32-bit computation into the address mode operand, but this isn't correct as the 32-bit computation should be truncated to 32-bits but when folded into the address mode computation it happens with 64-bit operands, meaning truncation doesn't happen. * Add release notes
This commit is contained in:
@@ -1063,20 +1063,6 @@
|
||||
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (ishl index (iconst (uimm8 shift))))
|
||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
||||
(Amode.ImmRegRegShift off base index shift flags))
|
||||
(rule 2 (amode_add (Amode.ImmReg off (valid_reg base) flags) (uextend (ishl index (iconst (uimm8 shift)))))
|
||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
||||
(Amode.ImmRegRegShift off base (extend_to_gpr index $I64 (ExtendKind.Zero)) shift flags))
|
||||
|
||||
;; Same, but with a uextend of a shift of a 32-bit add. This is valid
|
||||
;; because we know our lowering of a narrower-than-64-bit `iadd` will
|
||||
;; always write the full register width, so we can effectively ignore
|
||||
;; the `uextend` and look through it to the `ishl`.
|
||||
;;
|
||||
;; Priority 3 to avoid conflict with the previous rule.
|
||||
(rule 3 (amode_add (Amode.ImmReg off (valid_reg base) flags)
|
||||
(uextend (ishl index @ (iadd _ _) (iconst (uimm8 shift)))))
|
||||
(if (u32_lteq (u8_as_u32 shift) 3))
|
||||
(Amode.ImmRegRegShift off base index shift flags))
|
||||
|
||||
;; -- Case 4 (absorbing constant offsets).
|
||||
;;
|
||||
|
||||
@@ -209,8 +209,9 @@ block0(v0: i64, v1: i32):
|
||||
; pushq %rbp
|
||||
; movq %rsp, %rbp
|
||||
; block0:
|
||||
; movl %esi, %ecx
|
||||
; movq -1(%rdi,%rcx,8), %rax
|
||||
; movq %rsi, %rdx
|
||||
; shll $3, %edx, %edx
|
||||
; movq -1(%rdi,%rdx,1), %rax
|
||||
; movq %rbp, %rsp
|
||||
; popq %rbp
|
||||
; ret
|
||||
@@ -220,8 +221,9 @@ block0(v0: i64, v1: i32):
|
||||
; pushq %rbp
|
||||
; movq %rsp, %rbp
|
||||
; block1: ; offset 0x4
|
||||
; movl %esi, %ecx
|
||||
; movq -1(%rdi, %rcx, 8), %rax ; trap: heap_oob
|
||||
; movq %rsi, %rdx
|
||||
; shll $3, %edx
|
||||
; movq -1(%rdi, %rdx), %rax ; trap: heap_oob
|
||||
; movq %rbp, %rsp
|
||||
; popq %rbp
|
||||
; retq
|
||||
@@ -244,7 +246,8 @@ block0(v0: i64, v1: i32, v2: i32):
|
||||
; block0:
|
||||
; movq %rsi, %r8
|
||||
; addl %r8d, %edx, %r8d
|
||||
; movq -1(%rdi,%r8,4), %rax
|
||||
; shll $2, %r8d, %r8d
|
||||
; movq -1(%rdi,%r8,1), %rax
|
||||
; movq %rbp, %rsp
|
||||
; popq %rbp
|
||||
; ret
|
||||
@@ -256,7 +259,8 @@ block0(v0: i64, v1: i32, v2: i32):
|
||||
; block1: ; offset 0x4
|
||||
; movq %rsi, %r8
|
||||
; addl %edx, %r8d
|
||||
; movq -1(%rdi, %r8, 4), %rax ; trap: heap_oob
|
||||
; shll $2, %r8d
|
||||
; movq -1(%rdi, %r8), %rax ; trap: heap_oob
|
||||
; movq %rbp, %rsp
|
||||
; popq %rbp
|
||||
; retq
|
||||
|
||||
Reference in New Issue
Block a user