Move the 'meta' dir to 'lib/cretonne/meta'.
The 'lib/cretonne' directory will be the new root of a stand-alone cretonne crate containg both Python and Rust sources. This is in preparation for publishing crates on crates.io.
This commit is contained in:
26
lib/cretonne/meta/build.py
Normal file
26
lib/cretonne/meta/build.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# Second-level build script.
|
||||
#
|
||||
# This script is run from src/libcretonne/build.rs to generate Rust files.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import argparse
|
||||
import isa
|
||||
import gen_types
|
||||
import gen_instr
|
||||
import gen_settings
|
||||
import gen_build_deps
|
||||
import gen_encoding
|
||||
|
||||
parser = argparse.ArgumentParser(description='Generate sources for Cretonne.')
|
||||
parser.add_argument('--out-dir', help='set output directory')
|
||||
|
||||
args = parser.parse_args()
|
||||
out_dir = args.out_dir
|
||||
|
||||
isas = isa.all_isas()
|
||||
|
||||
gen_types.generate(out_dir)
|
||||
gen_instr.generate(isas, out_dir)
|
||||
gen_settings.generate(isas, out_dir)
|
||||
gen_encoding.generate(isas, out_dir)
|
||||
gen_build_deps.generate()
|
||||
12
lib/cretonne/meta/check.sh
Executable file
12
lib/cretonne/meta/check.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
cd $(dirname "$0")
|
||||
|
||||
# Run unit tests.
|
||||
python -m unittest discover
|
||||
|
||||
# Check Python sources for Python 3 compatibility using pylint.
|
||||
#
|
||||
# Install pylint with 'pip install pylint'.
|
||||
pylint --py3k --reports=no -- *.py cretonne isa
|
||||
flake8 .
|
||||
76
lib/cretonne/meta/constant_hash.py
Normal file
76
lib/cretonne/meta/constant_hash.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Generate constant hash tables.
|
||||
|
||||
The `constant_hash` module can generate constant pre-populated hash tables. We
|
||||
don't attempt parfect hashing, but simply generate an open addressed
|
||||
quadratically probed hash table.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
def simple_hash(s):
|
||||
"""
|
||||
Compute a primitive hash of a string.
|
||||
|
||||
Example:
|
||||
>>> hex(simple_hash("Hello"))
|
||||
'0x2fa70c01'
|
||||
>>> hex(simple_hash("world"))
|
||||
'0x5b0c31d5'
|
||||
"""
|
||||
h = 5381
|
||||
for c in s:
|
||||
h = ((h ^ ord(c)) + ((h >> 6) + (h << 26))) & 0xffffffff
|
||||
return h
|
||||
|
||||
|
||||
def next_power_of_two(x):
|
||||
"""
|
||||
Compute the next power of two that is greater than `x`:
|
||||
>>> next_power_of_two(0)
|
||||
1
|
||||
>>> next_power_of_two(1)
|
||||
2
|
||||
>>> next_power_of_two(2)
|
||||
4
|
||||
>>> next_power_of_two(3)
|
||||
4
|
||||
>>> next_power_of_two(4)
|
||||
8
|
||||
"""
|
||||
s = 1
|
||||
while x & (x + 1) != 0:
|
||||
x |= x >> s
|
||||
s *= 2
|
||||
return x + 1
|
||||
|
||||
|
||||
def compute_quadratic(items, hash_function):
|
||||
"""
|
||||
Compute an open addressed, quadratically probed hash table containing
|
||||
`items`. The returned table is a list containing the elements of the
|
||||
iterable `items` and `None` in unused slots.
|
||||
|
||||
:param items: Iterable set of items to place in hash table.
|
||||
:param hash_function: Hash function which takes an item and returns a
|
||||
number.
|
||||
|
||||
Simple example (see hash values above, they collide on slot 1):
|
||||
>>> compute_quadratic(['Hello', 'world'], simple_hash)
|
||||
[None, 'Hello', 'world', None]
|
||||
"""
|
||||
|
||||
items = list(items)
|
||||
# Table size must be a power of two. Aim for >20% unused slots.
|
||||
size = next_power_of_two(int(1.20*len(items)))
|
||||
table = [None] * size
|
||||
|
||||
for i in items:
|
||||
h = hash_function(i) % size
|
||||
s = 0
|
||||
while table[h] is not None:
|
||||
s += 1
|
||||
h = (h + s) % size
|
||||
table[h] = i
|
||||
|
||||
return table
|
||||
1155
lib/cretonne/meta/cretonne/__init__.py
Normal file
1155
lib/cretonne/meta/cretonne/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
122
lib/cretonne/meta/cretonne/ast.py
Normal file
122
lib/cretonne/meta/cretonne/ast.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""
|
||||
Abstract syntax trees.
|
||||
|
||||
This module defines classes that can be used to create abstract syntax trees
|
||||
for patern matching an rewriting of cretonne instructions.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
class Def(object):
|
||||
"""
|
||||
An AST definition associates a set of variables with the values produced by
|
||||
an expression.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from .base import iadd_cout, iconst
|
||||
>>> x = Var('x')
|
||||
>>> y = Var('y')
|
||||
>>> x << iconst(4)
|
||||
(Var(x),) << Apply(iconst, (4,))
|
||||
>>> (x, y) << iadd_cout(4, 5)
|
||||
(Var(x), Var(y)) << Apply(iadd_cout, (4, 5))
|
||||
|
||||
The `<<` operator is used to create variable definitions.
|
||||
|
||||
:param defs: Single variable or tuple of variables to be defined.
|
||||
:param expr: Expression generating the values.
|
||||
"""
|
||||
|
||||
def __init__(self, defs, expr):
|
||||
if not isinstance(defs, tuple):
|
||||
defs = (defs,)
|
||||
assert isinstance(expr, Expr)
|
||||
self.defs = defs
|
||||
self.expr = expr
|
||||
|
||||
def __repr__(self):
|
||||
return "{} << {!r}".format(self.defs, self.expr)
|
||||
|
||||
def __str__(self):
|
||||
if len(self.defs) == 1:
|
||||
return "{!s} << {!s}".format(self.defs[0], self.expr)
|
||||
else:
|
||||
return "({}) << {!s}".format(", ".join(self.defs), self.expr)
|
||||
|
||||
|
||||
class Expr(object):
|
||||
"""
|
||||
An AST expression.
|
||||
"""
|
||||
|
||||
def __rlshift__(self, other):
|
||||
"""
|
||||
Define variables using `var << expr` or `(v1, v2) << expr`.
|
||||
"""
|
||||
return Def(other, self)
|
||||
|
||||
|
||||
class Var(Expr):
|
||||
"""
|
||||
A free variable.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
# Bitmask of contexts where this variable is defined.
|
||||
# See XForm._rewrite_defs().
|
||||
self.defctx = 0
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __repr__(self):
|
||||
s = self.name
|
||||
if self.defctx:
|
||||
s += ", d={:02b}".format(self.defctx)
|
||||
return "Var({})".format(s)
|
||||
|
||||
|
||||
class Apply(Expr):
|
||||
"""
|
||||
Apply an instruction to arguments.
|
||||
|
||||
An `Apply` AST expression is created by using function call syntax on
|
||||
instructions. This applies to both bound and unbound polymorphic
|
||||
instructions:
|
||||
|
||||
>>> from .base import jump, iadd
|
||||
>>> jump('next', ())
|
||||
Apply(jump, ('next', ()))
|
||||
>>> iadd.i32('x', 'y')
|
||||
Apply(iadd.i32, ('x', 'y'))
|
||||
|
||||
:param inst: The instruction being applied, an `Instruction` or
|
||||
`BoundInstruction` instance.
|
||||
:param args: Tuple of arguments.
|
||||
"""
|
||||
|
||||
def __init__(self, inst, args):
|
||||
from . import BoundInstruction
|
||||
if isinstance(inst, BoundInstruction):
|
||||
self.inst = inst.inst
|
||||
self.typevars = inst.typevars
|
||||
else:
|
||||
self.inst = inst
|
||||
self.typevars = ()
|
||||
self.args = args
|
||||
assert len(self.inst.ins) == len(args)
|
||||
|
||||
def instname(self):
|
||||
i = self.inst.name
|
||||
for t in self.typevars:
|
||||
i += '.{}'.format(t)
|
||||
return i
|
||||
|
||||
def __repr__(self):
|
||||
return "Apply({}, {})".format(self.instname(), self.args)
|
||||
|
||||
def __str__(self):
|
||||
args = ', '.join(map(str, self.args))
|
||||
return '{}({})'.format(self.instname(), args)
|
||||
1149
lib/cretonne/meta/cretonne/base.py
Normal file
1149
lib/cretonne/meta/cretonne/base.py
Normal file
File diff suppressed because it is too large
Load Diff
29
lib/cretonne/meta/cretonne/entities.py
Normal file
29
lib/cretonne/meta/cretonne/entities.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""
|
||||
The `cretonne.entities` module predefines all the Cretonne entity reference
|
||||
operand types. Thee are corresponding definitions in the `cretonne.entities`
|
||||
Rust module.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import EntityRefKind
|
||||
|
||||
|
||||
#: A reference to an extended basic block in the same function.
|
||||
#: This is primarliy used in control flow instructions.
|
||||
ebb = EntityRefKind(
|
||||
'ebb', 'An extended basic block in the same function.',
|
||||
default_member='destination')
|
||||
|
||||
#: A reference to a stack slot declared in the function preamble.
|
||||
stack_slot = EntityRefKind('stack_slot', 'A stack slot.')
|
||||
|
||||
#: A reference to a function sugnature declared in the function preamble.
|
||||
#: Tbis is used to provide the call signature in an indirect call instruction.
|
||||
sig_ref = EntityRefKind('sig_ref', 'A function signature.')
|
||||
|
||||
#: A reference to an external function declared in the function preamble.
|
||||
#: This is used to provide the callee and signature in a call instruction.
|
||||
func_ref = EntityRefKind('func_ref', 'An external function.')
|
||||
|
||||
#: A reference to a jump table declared in the function preamble.
|
||||
jump_table = EntityRefKind(
|
||||
'jump_table', 'A jump table.', default_member='table')
|
||||
54
lib/cretonne/meta/cretonne/formats.py
Normal file
54
lib/cretonne/meta/cretonne/formats.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
The cretonne.formats defines all instruction formats.
|
||||
|
||||
Every instruction format has a corresponding `InstructionData` variant in the
|
||||
Rust representation of cretonne IL, so all instruction formats must be defined
|
||||
in this module.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import InstructionFormat, value, variable_args
|
||||
from .immediates import imm64, uimm8, ieee32, ieee64, immvector, intcc, floatcc
|
||||
from .entities import ebb, func_ref, jump_table
|
||||
|
||||
Nullary = InstructionFormat()
|
||||
|
||||
Unary = InstructionFormat(value)
|
||||
UnaryImm = InstructionFormat(imm64)
|
||||
UnaryIeee32 = InstructionFormat(ieee32)
|
||||
UnaryIeee64 = InstructionFormat(ieee64)
|
||||
UnaryImmVector = InstructionFormat(immvector, boxed_storage=True)
|
||||
UnarySplit = InstructionFormat(value, multiple_results=True)
|
||||
|
||||
Binary = InstructionFormat(value, value)
|
||||
BinaryImm = InstructionFormat(value, imm64)
|
||||
BinaryImmRev = InstructionFormat(imm64, value)
|
||||
|
||||
# Generate result + overflow flag.
|
||||
BinaryOverflow = InstructionFormat(value, value, multiple_results=True)
|
||||
|
||||
# The select instructions are controlled by the second value operand.
|
||||
# The first value operand is the controlling flag which has a derived type.
|
||||
# The fma instruction has the same constraint on all inputs.
|
||||
Ternary = InstructionFormat(value, value, value, typevar_operand=1)
|
||||
|
||||
# Carry in *and* carry out for `iadd_carry` and friends.
|
||||
TernaryOverflow = InstructionFormat(
|
||||
value, value, value, multiple_results=True, boxed_storage=True)
|
||||
|
||||
InsertLane = InstructionFormat(value, ('lane', uimm8), value)
|
||||
ExtractLane = InstructionFormat(value, ('lane', uimm8))
|
||||
|
||||
IntCompare = InstructionFormat(intcc, value, value)
|
||||
FloatCompare = InstructionFormat(floatcc, value, value)
|
||||
|
||||
Jump = InstructionFormat(ebb, variable_args, boxed_storage=True)
|
||||
Branch = InstructionFormat(value, ebb, variable_args, boxed_storage=True)
|
||||
BranchTable = InstructionFormat(value, jump_table)
|
||||
|
||||
Call = InstructionFormat(
|
||||
func_ref, variable_args, multiple_results=True, boxed_storage=True)
|
||||
Return = InstructionFormat(variable_args, boxed_storage=True)
|
||||
|
||||
|
||||
# Finally extract the names of global variables in this module.
|
||||
InstructionFormat.extract_names(globals())
|
||||
52
lib/cretonne/meta/cretonne/immediates.py
Normal file
52
lib/cretonne/meta/cretonne/immediates.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""
|
||||
The `cretonne.immediates` module predefines all the Cretonne immediate operand
|
||||
types.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import ImmediateKind
|
||||
|
||||
#: A 64-bit immediate integer operand.
|
||||
#:
|
||||
#: This type of immediate integer can interact with SSA values with any
|
||||
#: :py:class:`cretonne.IntType` type.
|
||||
imm64 = ImmediateKind('imm64', 'A 64-bit immediate integer.')
|
||||
|
||||
#: An unsigned 8-bit immediate integer operand.
|
||||
#:
|
||||
#: This small operand is used to indicate lane indexes in SIMD vectors and
|
||||
#: immediate bit counts on shift instructions.
|
||||
uimm8 = ImmediateKind('uimm8', 'An 8-bit immediate unsigned integer.')
|
||||
|
||||
#: A 32-bit immediate floating point operand.
|
||||
#:
|
||||
#: IEEE 754-2008 binary32 interchange format.
|
||||
ieee32 = ImmediateKind('ieee32', 'A 32-bit immediate floating point number.')
|
||||
|
||||
#: A 64-bit immediate floating point operand.
|
||||
#:
|
||||
#: IEEE 754-2008 binary64 interchange format.
|
||||
ieee64 = ImmediateKind('ieee64', 'A 64-bit immediate floating point number.')
|
||||
|
||||
#: A large SIMD vector constant.
|
||||
immvector = ImmediateKind(
|
||||
'immvector',
|
||||
'An immediate SIMD vector.',
|
||||
rust_type='ImmVector')
|
||||
|
||||
#: A condition code for comparing integer values.
|
||||
#:
|
||||
#: This enumerated operand kind is used for the :cton:inst:`icmp` instruction
|
||||
#: and corresponds to the `condcodes::IntCC` Rust type.
|
||||
intcc = ImmediateKind(
|
||||
'intcc',
|
||||
'An integer comparison condition code.',
|
||||
default_member='cond', rust_type='IntCC')
|
||||
|
||||
#: A condition code for comparing floating point values.
|
||||
#:
|
||||
#: This enumerated operand kind is used for the :cton:inst:`fcmp` instruction
|
||||
#: and corresponds to the `condcodes::FloatCC` Rust type.
|
||||
floatcc = ImmediateKind(
|
||||
'floatcc',
|
||||
'A floating point comparison condition code.',
|
||||
default_member='cond', rust_type='FloatCC')
|
||||
48
lib/cretonne/meta/cretonne/legalize.py
Normal file
48
lib/cretonne/meta/cretonne/legalize.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Patterns for legalizing the `base` instruction set.
|
||||
|
||||
The base Cretonne instruction set is 'fat', and many instructions don't have
|
||||
legal representations in a given target ISA. This module defines legalization
|
||||
patterns that describe how base instructions can be transformed to other base
|
||||
instructions that are legal.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from .base import iadd, iadd_cout, iadd_cin, isplit_lohi, iconcat_lohi
|
||||
from .base import isub, isub_bin, isub_bout
|
||||
from .ast import Var
|
||||
from .xform import Rtl, XFormGroup
|
||||
|
||||
|
||||
narrow = XFormGroup()
|
||||
|
||||
x = Var('x')
|
||||
y = Var('y')
|
||||
a = Var('a')
|
||||
b = Var('b')
|
||||
c = Var('c')
|
||||
xl = Var('xl')
|
||||
xh = Var('xh')
|
||||
yl = Var('yl')
|
||||
yh = Var('yh')
|
||||
al = Var('al')
|
||||
ah = Var('ah')
|
||||
|
||||
narrow.legalize(
|
||||
a << iadd(x, y),
|
||||
Rtl(
|
||||
(xl, xh) << isplit_lohi(x),
|
||||
(yl, yh) << isplit_lohi(y),
|
||||
(al, c) << iadd_cout(xl, yl),
|
||||
ah << iadd_cin(xh, yh, c),
|
||||
a << iconcat_lohi(al, ah)
|
||||
))
|
||||
|
||||
narrow.legalize(
|
||||
a << isub(x, y),
|
||||
Rtl(
|
||||
(xl, xh) << isplit_lohi(x),
|
||||
(yl, yh) << isplit_lohi(y),
|
||||
(al, b) << isub_bout(xl, yl),
|
||||
ah << isub_bin(xh, yh, b),
|
||||
a << iconcat_lohi(al, ah)
|
||||
))
|
||||
240
lib/cretonne/meta/cretonne/predicates.py
Normal file
240
lib/cretonne/meta/cretonne/predicates.py
Normal file
@@ -0,0 +1,240 @@
|
||||
"""
|
||||
Cretonne predicates.
|
||||
|
||||
A *predicate* is a function that computes a boolean result. The inputs to the
|
||||
function determine the kind of predicate:
|
||||
|
||||
- An *ISA predicate* is evaluated on the current ISA settings together with the
|
||||
shared settings defined in the :py:mod:`settings` module. Once a target ISA
|
||||
has been configured, the value of all ISA predicates is known.
|
||||
|
||||
- An *Instruction predicate* is evaluated on an instruction instance, so it can
|
||||
inspect all the immediate fields and type variables of the instruction.
|
||||
Instruction predicates can be evaluatd before register allocation, so they
|
||||
can not depend on specific register assignments to the value operands or
|
||||
outputs.
|
||||
|
||||
Predicates can also be computed from other predicates using the `And`, `Or`,
|
||||
and `Not` combinators defined in this module.
|
||||
|
||||
All predicates have a *context* which determines where they can be evaluated.
|
||||
For an ISA predicate, the context is the ISA settings group. For an instruction
|
||||
predicate, the context is the instruction format.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from functools import reduce
|
||||
|
||||
|
||||
def _is_parent(a, b):
|
||||
"""
|
||||
Return true if a is a parent of b, or equal to it.
|
||||
"""
|
||||
while b and a is not b:
|
||||
b = getattr(b, 'parent', None)
|
||||
return a is b
|
||||
|
||||
|
||||
def _descendant(a, b):
|
||||
"""
|
||||
If a is a parent of b or b is a parent of a, return the descendant of the
|
||||
two.
|
||||
|
||||
If neiher is a parent of the other, return None.
|
||||
"""
|
||||
if _is_parent(a, b):
|
||||
return b
|
||||
if _is_parent(b, a):
|
||||
return a
|
||||
return None
|
||||
|
||||
|
||||
class Predicate(object):
|
||||
"""
|
||||
Superclass for all computed predicates.
|
||||
|
||||
Leaf predicates can have other types, such as `Setting`.
|
||||
|
||||
:param parts: Tuple of components in the predicate expression.
|
||||
"""
|
||||
|
||||
def __init__(self, parts):
|
||||
self.name = None
|
||||
self.parts = parts
|
||||
self.context = reduce(
|
||||
_descendant,
|
||||
(p.predicate_context() for p in parts))
|
||||
assert self.context, "Incompatible predicate parts"
|
||||
|
||||
def __str__(self):
|
||||
if self.name:
|
||||
return '{}.{}'.format(self.context.name, self.name)
|
||||
else:
|
||||
return '{}({})'.format(
|
||||
type(self).__name__,
|
||||
', '.join(map(str, self.parts)))
|
||||
|
||||
def predicate_context(self):
|
||||
return self.context
|
||||
|
||||
def predicate_leafs(self, leafs):
|
||||
"""
|
||||
Collect all leaf predicates into the `leafs` set.
|
||||
"""
|
||||
for part in self.parts:
|
||||
part.predicate_leafs(leafs)
|
||||
|
||||
|
||||
class And(Predicate):
|
||||
"""
|
||||
Computed predicate that is true if all parts are true.
|
||||
"""
|
||||
|
||||
precedence = 2
|
||||
|
||||
def __init__(self, *args):
|
||||
super(And, self).__init__(args)
|
||||
|
||||
def rust_predicate(self, prec):
|
||||
"""
|
||||
Return a Rust expression computing the value of this predicate.
|
||||
|
||||
The surrounding precedence determines whether parentheses are needed:
|
||||
|
||||
0. An `if` statement.
|
||||
1. An `||` expression.
|
||||
2. An `&&` expression.
|
||||
3. A `!` expression.
|
||||
"""
|
||||
s = ' && '.join(p.rust_predicate(And.precedence) for p in self.parts)
|
||||
if prec > And.precedence:
|
||||
s = '({})'.format(s)
|
||||
return s
|
||||
|
||||
@staticmethod
|
||||
def combine(*args):
|
||||
"""
|
||||
Combine a sequence of predicates, allowing for `None` members.
|
||||
|
||||
Return a predicate that is true when all non-`None` arguments are true,
|
||||
or `None` if all of the arguments are `None`.
|
||||
"""
|
||||
args = tuple(p for p in args if p)
|
||||
if args == ():
|
||||
return None
|
||||
if len(args) == 1:
|
||||
return args[0]
|
||||
# We have multiple predicate args. Combine with `And`.
|
||||
return And(*args)
|
||||
|
||||
|
||||
class Or(Predicate):
|
||||
"""
|
||||
Computed predicate that is true if any parts are true.
|
||||
"""
|
||||
|
||||
precedence = 1
|
||||
|
||||
def __init__(self, *args):
|
||||
super(Or, self).__init__(args)
|
||||
|
||||
def rust_predicate(self, prec):
|
||||
s = ' || '.join(p.rust_predicate(Or.precedence) for p in self.parts)
|
||||
if prec > Or.precedence:
|
||||
s = '({})'.format(s)
|
||||
return s
|
||||
|
||||
|
||||
class Not(Predicate):
|
||||
"""
|
||||
Computed predicate that is true if its single part is false.
|
||||
"""
|
||||
|
||||
precedence = 3
|
||||
|
||||
def __init__(self, part):
|
||||
super(Not, self).__init__((part,))
|
||||
|
||||
def rust_predicate(self, prec):
|
||||
return '!' + self.parts[0].rust_predicate(Not.precedence)
|
||||
|
||||
|
||||
class FieldPredicate(object):
|
||||
"""
|
||||
An instruction predicate that performs a test on a single `FormatField`.
|
||||
|
||||
:param field: The `FormatField` to be tested.
|
||||
:param function: Boolean predicate function to call.
|
||||
:param args: Additional arguments for the predicate function.
|
||||
"""
|
||||
|
||||
def __init__(self, field, function, args):
|
||||
self.field = field
|
||||
self.function = function
|
||||
self.args = args
|
||||
|
||||
def __str__(self):
|
||||
args = (self.field.name,) + tuple(map(str, self.args))
|
||||
return '{}({})'.format(self.function, ', '.join(args))
|
||||
|
||||
def predicate_context(self):
|
||||
"""
|
||||
This predicate can be evaluated in the context of an instruction
|
||||
format.
|
||||
"""
|
||||
return self.field.format
|
||||
|
||||
def predicate_leafs(self, leafs):
|
||||
leafs.add(self)
|
||||
|
||||
def rust_predicate(self, prec):
|
||||
"""
|
||||
Return a string of Rust code that evaluates this predicate.
|
||||
"""
|
||||
# Prepend `field` to the predicate function arguments.
|
||||
args = (self.field.rust_name(),) + tuple(map(str, self.args))
|
||||
return 'predicates::{}({})'.format(self.function, ', '.join(args))
|
||||
|
||||
|
||||
class IsSignedInt(FieldPredicate):
|
||||
"""
|
||||
Instruction predicate that checks if an immediate instruction format field
|
||||
is representable as an n-bit two's complement integer.
|
||||
|
||||
:param field: `FormatField` to be checked.
|
||||
:param width: Number of bits in the allowed range.
|
||||
:param scale: Number of low bits that must be 0.
|
||||
|
||||
The predicate is true if the field is in the range:
|
||||
`-2^(width-1) -- 2^(width-1)-1`
|
||||
and a multiple of `2^scale`.
|
||||
"""
|
||||
|
||||
def __init__(self, field, width, scale=0):
|
||||
super(IsSignedInt, self).__init__(
|
||||
field, 'is_signed_int', (width, scale))
|
||||
self.width = width
|
||||
self.scale = scale
|
||||
assert width >= 0 and width <= 64
|
||||
assert scale >= 0 and scale < width
|
||||
|
||||
|
||||
class IsUnsignedInt(FieldPredicate):
|
||||
"""
|
||||
Instruction predicate that checks if an immediate instruction format field
|
||||
is representable as an n-bit unsigned complement integer.
|
||||
|
||||
:param field: `FormatField` to be checked.
|
||||
:param width: Number of bits in the allowed range.
|
||||
:param scale: Number of low bits that must be 0.
|
||||
|
||||
The predicate is true if the field is in the range:
|
||||
`0 -- 2^width - 1` and a multiple of `2^scale`.
|
||||
"""
|
||||
|
||||
def __init__(self, field, width, scale=0):
|
||||
super(IsUnsignedInt, self).__init__(
|
||||
field, 'is_unsigned_int', (width, scale))
|
||||
self.width = width
|
||||
self.scale = scale
|
||||
assert width >= 0 and width <= 64
|
||||
assert scale >= 0 and scale < width
|
||||
35
lib/cretonne/meta/cretonne/settings.py
Normal file
35
lib/cretonne/meta/cretonne/settings.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Cretonne shared settings.
|
||||
|
||||
This module defines settings are are relevant for all code generators.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import SettingGroup, BoolSetting, EnumSetting
|
||||
|
||||
group = SettingGroup('shared')
|
||||
|
||||
opt_level = EnumSetting(
|
||||
"""
|
||||
Optimization level:
|
||||
|
||||
- default: Very profitable optimizations enabled, none slow.
|
||||
- best: Enable all optimizations
|
||||
- fastest: Optimize for compile time by disabling most optimizations.
|
||||
""",
|
||||
'default', 'best', 'fastest')
|
||||
|
||||
is_64bit = BoolSetting("Enable 64-bit code generation")
|
||||
|
||||
enable_float = BoolSetting(
|
||||
"""Enable the use of floating-point instructions""",
|
||||
default=True)
|
||||
|
||||
enable_simd = BoolSetting(
|
||||
"""Enable the use of SIMD instructions.""",
|
||||
default=True)
|
||||
|
||||
enable_atomics = BoolSetting(
|
||||
"""Enable the use of atomic instructions""",
|
||||
default=True)
|
||||
|
||||
group.close(globals())
|
||||
28
lib/cretonne/meta/cretonne/test_ast.py
Normal file
28
lib/cretonne/meta/cretonne/test_ast.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from __future__ import absolute_import
|
||||
from unittest import TestCase
|
||||
from doctest import DocTestSuite
|
||||
from . import ast
|
||||
from .base import jump, iadd
|
||||
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
tests.addTests(DocTestSuite(ast))
|
||||
return tests
|
||||
|
||||
|
||||
x = 'x'
|
||||
y = 'y'
|
||||
a = 'a'
|
||||
|
||||
|
||||
class TestPatterns(TestCase):
|
||||
def test_apply(self):
|
||||
i = jump(x, y)
|
||||
self.assertEqual(repr(i), "Apply(jump, ('x', 'y'))")
|
||||
|
||||
i = iadd.i32(x, y)
|
||||
self.assertEqual(repr(i), "Apply(iadd.i32, ('x', 'y'))")
|
||||
|
||||
def test_single_ins(self):
|
||||
pat = a << iadd.i32(x, y)
|
||||
self.assertEqual(repr(pat), "('a',) << Apply(iadd.i32, ('x', 'y'))")
|
||||
64
lib/cretonne/meta/cretonne/test_typevar.py
Normal file
64
lib/cretonne/meta/cretonne/test_typevar.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import absolute_import
|
||||
from unittest import TestCase
|
||||
from doctest import DocTestSuite
|
||||
from . import typevar
|
||||
from .typevar import TypeSet, TypeVar
|
||||
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
tests.addTests(DocTestSuite(typevar))
|
||||
return tests
|
||||
|
||||
|
||||
class TestTypeSet(TestCase):
|
||||
def test_invalid(self):
|
||||
with self.assertRaises(AssertionError):
|
||||
TypeSet(lanes=(2, 1))
|
||||
with self.assertRaises(AssertionError):
|
||||
TypeSet(ints=(32, 16))
|
||||
with self.assertRaises(AssertionError):
|
||||
TypeSet(floats=(32, 16))
|
||||
with self.assertRaises(AssertionError):
|
||||
TypeSet(bools=(32, 16))
|
||||
with self.assertRaises(AssertionError):
|
||||
TypeSet(ints=(32, 33))
|
||||
|
||||
def test_hash(self):
|
||||
a = TypeSet(lanes=True, ints=True, floats=True)
|
||||
b = TypeSet(lanes=True, ints=True, floats=True)
|
||||
c = TypeSet(lanes=True, ints=(8, 16), floats=True)
|
||||
self.assertEqual(a, b)
|
||||
self.assertNotEqual(a, c)
|
||||
s = set()
|
||||
s.add(a)
|
||||
self.assertTrue(a in s)
|
||||
self.assertTrue(b in s)
|
||||
self.assertFalse(c in s)
|
||||
|
||||
def test_hash_modified(self):
|
||||
a = TypeSet(lanes=True, ints=True, floats=True)
|
||||
s = set()
|
||||
s.add(a)
|
||||
a.max_int = 32
|
||||
# Can't rehash after modification.
|
||||
with self.assertRaises(AssertionError):
|
||||
a in s
|
||||
|
||||
|
||||
class TestTypeVar(TestCase):
|
||||
def test_functions(self):
|
||||
x = TypeVar('x', 'all ints', ints=True)
|
||||
with self.assertRaises(AssertionError):
|
||||
x.double_width()
|
||||
with self.assertRaises(AssertionError):
|
||||
x.half_width()
|
||||
|
||||
x2 = TypeVar('x2', 'i16 and up', ints=(16, 64))
|
||||
with self.assertRaises(AssertionError):
|
||||
x2.double_width()
|
||||
self.assertEqual(str(x2.half_width()), '`HalfWidth(x2)`')
|
||||
|
||||
x3 = TypeVar('x3', 'up to i32', ints=(8, 32))
|
||||
self.assertEqual(str(x3.double_width()), '`DoubleWidth(x3)`')
|
||||
with self.assertRaises(AssertionError):
|
||||
x3.half_width()
|
||||
59
lib/cretonne/meta/cretonne/test_xform.py
Normal file
59
lib/cretonne/meta/cretonne/test_xform.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from __future__ import absolute_import
|
||||
from unittest import TestCase
|
||||
from doctest import DocTestSuite
|
||||
from . import xform
|
||||
from .base import iadd, iadd_imm, iconst
|
||||
from .ast import Var
|
||||
from .xform import Rtl, XForm
|
||||
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
tests.addTests(DocTestSuite(xform))
|
||||
return tests
|
||||
|
||||
|
||||
x = Var('x')
|
||||
y = Var('y')
|
||||
a = Var('a')
|
||||
c = Var('c')
|
||||
|
||||
|
||||
class TestXForm(TestCase):
|
||||
def test_macro_pattern(self):
|
||||
src = Rtl(a << iadd_imm(x, y))
|
||||
dst = Rtl(
|
||||
c << iconst(y),
|
||||
a << iadd(x, c))
|
||||
XForm(src, dst)
|
||||
|
||||
def test_def_input(self):
|
||||
# Src pattern has a def which is an input in dst.
|
||||
src = Rtl(a << iadd_imm(x, 1))
|
||||
dst = Rtl(y << iadd_imm(a, 1))
|
||||
with self.assertRaisesRegexp(
|
||||
AssertionError,
|
||||
"'a' used as both input and def"):
|
||||
XForm(src, dst)
|
||||
|
||||
def test_input_def(self):
|
||||
# Converse of the above.
|
||||
src = Rtl(y << iadd_imm(a, 1))
|
||||
dst = Rtl(a << iadd_imm(x, 1))
|
||||
with self.assertRaisesRegexp(
|
||||
AssertionError,
|
||||
"'a' used as both input and def"):
|
||||
XForm(src, dst)
|
||||
|
||||
def test_extra_input(self):
|
||||
src = Rtl(a << iadd_imm(x, 1))
|
||||
dst = Rtl(a << iadd(x, y))
|
||||
with self.assertRaisesRegexp(AssertionError, "extra inputs in dst"):
|
||||
XForm(src, dst)
|
||||
|
||||
def test_double_def(self):
|
||||
src = Rtl(
|
||||
a << iadd_imm(x, 1),
|
||||
a << iadd(x, y))
|
||||
dst = Rtl(a << iadd(x, y))
|
||||
with self.assertRaisesRegexp(AssertionError, "'a' multiply defined"):
|
||||
XForm(src, dst)
|
||||
38
lib/cretonne/meta/cretonne/types.py
Normal file
38
lib/cretonne/meta/cretonne/types.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""
|
||||
The cretonne.types module predefines all the Cretonne scalar types.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import ScalarType, IntType, FloatType, BoolType
|
||||
|
||||
#: Boolean.
|
||||
b1 = ScalarType(
|
||||
'b1', 0,
|
||||
"""
|
||||
A boolean value that is either true or false.
|
||||
""")
|
||||
|
||||
b8 = BoolType(8) #: 8-bit bool.
|
||||
b16 = BoolType(16) #: 16-bit bool.
|
||||
b32 = BoolType(32) #: 32-bit bool.
|
||||
b64 = BoolType(64) #: 64-bit bool.
|
||||
|
||||
i8 = IntType(8) #: 8-bit int.
|
||||
i16 = IntType(16) #: 16-bit int.
|
||||
i32 = IntType(32) #: 32-bit int.
|
||||
i64 = IntType(64) #: 64-bit int.
|
||||
|
||||
#: IEEE single precision.
|
||||
f32 = FloatType(
|
||||
32, """
|
||||
A 32-bit floating point type represented in the IEEE 754-2008
|
||||
*binary32* interchange format. This corresponds to the :c:type:`float`
|
||||
type in most C implementations.
|
||||
""")
|
||||
|
||||
#: IEEE double precision.
|
||||
f64 = FloatType(
|
||||
64, """
|
||||
A 64-bit floating point type represented in the IEEE 754-2008
|
||||
*binary64* interchange format. This corresponds to the :c:type:`double`
|
||||
type in most C implementations.
|
||||
""")
|
||||
317
lib/cretonne/meta/cretonne/typevar.py
Normal file
317
lib/cretonne/meta/cretonne/typevar.py
Normal file
@@ -0,0 +1,317 @@
|
||||
"""
|
||||
Type variables for Parametric polymorphism.
|
||||
|
||||
Cretonne instructions and instruction transformations can be specified to be
|
||||
polymorphic by using type variables.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import math
|
||||
from . import value
|
||||
|
||||
|
||||
MAX_LANES = 256
|
||||
MAX_BITS = 64
|
||||
|
||||
|
||||
def is_power_of_two(x):
|
||||
return x > 0 and x & (x-1) == 0
|
||||
|
||||
|
||||
def int_log2(x):
|
||||
return int(math.log(x, 2))
|
||||
|
||||
|
||||
class TypeSet(object):
|
||||
"""
|
||||
A set of types.
|
||||
|
||||
We don't allow arbitrary subsets of types, but use a parametrized approach
|
||||
instead.
|
||||
|
||||
Objects of this class can be used as dictionary keys.
|
||||
|
||||
Parametrized type sets are specified in terms of ranges:
|
||||
|
||||
- The permitted range of vector lanes, where 1 indicates a scalar type.
|
||||
- The permitted range of integer types.
|
||||
- The permitted range of floating point types, and
|
||||
- The permitted range of boolean types.
|
||||
|
||||
The ranges are inclusive from smallest bit-width to largest bit-width.
|
||||
|
||||
A typeset representing scalar integer types `i8` through `i32`:
|
||||
|
||||
>>> TypeSet(ints=(8, 32))
|
||||
TypeSet(lanes=(1, 1), ints=(8, 32))
|
||||
|
||||
Passing `True` instead of a range selects all available scalar types:
|
||||
|
||||
>>> TypeSet(ints=True)
|
||||
TypeSet(lanes=(1, 1), ints=(8, 64))
|
||||
>>> TypeSet(floats=True)
|
||||
TypeSet(lanes=(1, 1), floats=(32, 64))
|
||||
>>> TypeSet(bools=True)
|
||||
TypeSet(lanes=(1, 1), bools=(1, 64))
|
||||
|
||||
Similarly, passing `True` for the lanes selects all possible scalar and
|
||||
vector types:
|
||||
|
||||
>>> TypeSet(lanes=True, ints=True)
|
||||
TypeSet(lanes=(1, 256), ints=(8, 64))
|
||||
|
||||
:param lanes: `(min, max)` inclusive range of permitted vector lane counts.
|
||||
:param ints: `(min, max)` inclusive range of permitted scalar integer
|
||||
widths.
|
||||
:param floats: `(min, max)` inclusive range of permitted scalar floating
|
||||
point widths.
|
||||
:param bools: `(min, max)` inclusive range of permitted scalar boolean
|
||||
widths.
|
||||
"""
|
||||
|
||||
def __init__(self, lanes=None, ints=None, floats=None, bools=None):
|
||||
if lanes:
|
||||
if lanes is True:
|
||||
lanes = (1, MAX_LANES)
|
||||
self.min_lanes, self.max_lanes = lanes
|
||||
assert is_power_of_two(self.min_lanes)
|
||||
assert is_power_of_two(self.max_lanes)
|
||||
assert self.max_lanes <= MAX_LANES
|
||||
else:
|
||||
self.min_lanes = 1
|
||||
self.max_lanes = 1
|
||||
assert self.min_lanes <= self.max_lanes
|
||||
|
||||
if ints:
|
||||
if ints is True:
|
||||
ints = (8, MAX_BITS)
|
||||
self.min_int, self.max_int = ints
|
||||
assert is_power_of_two(self.min_int)
|
||||
assert is_power_of_two(self.max_int)
|
||||
assert self.max_int <= MAX_BITS
|
||||
assert self.min_int <= self.max_int
|
||||
else:
|
||||
self.min_int = None
|
||||
self.max_int = None
|
||||
|
||||
if floats:
|
||||
if floats is True:
|
||||
floats = (32, 64)
|
||||
self.min_float, self.max_float = floats
|
||||
assert is_power_of_two(self.min_float)
|
||||
assert self.min_float >= 32
|
||||
assert is_power_of_two(self.max_float)
|
||||
assert self.max_float <= 64
|
||||
assert self.min_float <= self.max_float
|
||||
else:
|
||||
self.min_float = None
|
||||
self.max_float = None
|
||||
|
||||
if bools:
|
||||
if bools is True:
|
||||
bools = (1, MAX_BITS)
|
||||
self.min_bool, self.max_bool = bools
|
||||
assert is_power_of_two(self.min_bool)
|
||||
assert is_power_of_two(self.max_bool)
|
||||
assert self.max_bool <= MAX_BITS
|
||||
assert self.min_bool <= self.max_bool
|
||||
else:
|
||||
self.min_bool = None
|
||||
self.max_bool = None
|
||||
|
||||
def typeset_key(self):
|
||||
"""Key tuple used for hashing and equality."""
|
||||
return (self.min_lanes, self.max_lanes,
|
||||
self.min_int, self.max_int,
|
||||
self.min_float, self.max_float,
|
||||
self.min_bool, self.max_bool)
|
||||
|
||||
def __hash__(self):
|
||||
h = hash(self.typeset_key())
|
||||
assert h == getattr(self, 'prev_hash', h), "TypeSet changed!"
|
||||
self.prev_hash = h
|
||||
return h
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.typeset_key() == other.typeset_key()
|
||||
|
||||
def __repr__(self):
|
||||
s = 'TypeSet(lanes=({}, {})'.format(self.min_lanes, self.max_lanes)
|
||||
if self.min_int is not None:
|
||||
s += ', ints=({}, {})'.format(self.min_int, self.max_int)
|
||||
if self.min_float is not None:
|
||||
s += ', floats=({}, {})'.format(self.min_float, self.max_float)
|
||||
if self.min_bool is not None:
|
||||
s += ', bools=({}, {})'.format(self.min_bool, self.max_bool)
|
||||
return s + ')'
|
||||
|
||||
def emit_fields(self, fmt):
|
||||
"""Emit field initializers for this typeset."""
|
||||
fmt.comment(repr(self))
|
||||
fields = ('lanes', 'int', 'float', 'bool')
|
||||
for field in fields:
|
||||
min_val = getattr(self, 'min_' + field)
|
||||
max_val = getattr(self, 'max_' + field)
|
||||
if min_val is None:
|
||||
fmt.line('min_{}: 0,'.format(field))
|
||||
fmt.line('max_{}: 0,'.format(field))
|
||||
else:
|
||||
fmt.line('min_{}: {},'.format(
|
||||
field, int_log2(min_val)))
|
||||
fmt.line('max_{}: {},'.format(
|
||||
field, int_log2(max_val) + 1))
|
||||
|
||||
def __iand__(self, other):
|
||||
"""
|
||||
Intersect self with other type set.
|
||||
|
||||
>>> a = TypeSet(lanes=True, ints=(16, 32))
|
||||
>>> a
|
||||
TypeSet(lanes=(1, 256), ints=(16, 32))
|
||||
>>> b = TypeSet(lanes=(4, 16), ints=True)
|
||||
>>> a &= b
|
||||
>>> a
|
||||
TypeSet(lanes=(4, 16), ints=(16, 32))
|
||||
|
||||
>>> a = TypeSet(lanes=True, bools=(1, 8))
|
||||
>>> b = TypeSet(lanes=True, bools=(16, 32))
|
||||
>>> a &= b
|
||||
>>> a
|
||||
TypeSet(lanes=(1, 256))
|
||||
"""
|
||||
self.min_lanes = max(self.min_lanes, other.min_lanes)
|
||||
self.max_lanes = min(self.max_lanes, other.max_lanes)
|
||||
|
||||
self.min_int = max(self.min_int, other.min_int)
|
||||
self.max_int = min(self.max_int, other.max_int)
|
||||
if self.min_int > self.max_int:
|
||||
self.min_int = None
|
||||
self.max_int = None
|
||||
|
||||
self.min_float = max(self.min_float, other.min_float)
|
||||
self.max_float = min(self.max_float, other.max_float)
|
||||
if self.min_float > self.max_float:
|
||||
self.min_float = None
|
||||
self.max_float = None
|
||||
|
||||
self.min_bool = max(self.min_bool, other.min_bool)
|
||||
self.max_bool = min(self.max_bool, other.max_bool)
|
||||
if self.min_bool > self.max_bool:
|
||||
self.min_bool = None
|
||||
self.max_bool = None
|
||||
|
||||
return self
|
||||
|
||||
|
||||
class TypeVar(object):
|
||||
"""
|
||||
Type variables can be used in place of concrete types when defining
|
||||
instructions. This makes the instructions *polymorphic*.
|
||||
|
||||
A type variable is restricted to vary over a subset of the value types.
|
||||
This subset is specified by a set of flags that control the permitted base
|
||||
types and whether the type variable can assume scalar or vector types, or
|
||||
both.
|
||||
|
||||
:param name: Short name of type variable used in instruction descriptions.
|
||||
:param doc: Documentation string.
|
||||
:param ints: Allow all integer base types, or `(min, max)` bit-range.
|
||||
:param floats: Allow all floating point base types, or `(min, max)`
|
||||
bit-range.
|
||||
:param bools: Allow all boolean base types, or `(min, max)` bit-range.
|
||||
:param scalars: Allow type variable to assume scalar types.
|
||||
:param simd: Allow type variable to assume vector types, or `(min, max)`
|
||||
lane count range.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, name, doc,
|
||||
ints=False, floats=False, bools=False,
|
||||
scalars=True, simd=False,
|
||||
base=None, derived_func=None):
|
||||
self.name = name
|
||||
self.__doc__ = doc
|
||||
self.is_derived = isinstance(base, TypeVar)
|
||||
if base:
|
||||
assert self.is_derived
|
||||
assert derived_func
|
||||
self.base = base
|
||||
self.derived_func = derived_func
|
||||
self.name = '{}({})'.format(derived_func, base.name)
|
||||
else:
|
||||
min_lanes = 1 if scalars else 2
|
||||
if simd:
|
||||
if simd is True:
|
||||
max_lanes = MAX_LANES
|
||||
else:
|
||||
min_lanes, max_lanes = simd
|
||||
assert not scalars or min_lanes <= 2
|
||||
else:
|
||||
max_lanes = 1
|
||||
|
||||
self.type_set = TypeSet(
|
||||
lanes=(min_lanes, max_lanes),
|
||||
ints=ints,
|
||||
floats=floats,
|
||||
bools=bools)
|
||||
|
||||
def __str__(self):
|
||||
return "`{}`".format(self.name)
|
||||
|
||||
def lane_of(self):
|
||||
"""
|
||||
Return a derived type variable that is the scalar lane type of this
|
||||
type variable.
|
||||
|
||||
When this type variable assumes a scalar type, the derived type will be
|
||||
the same scalar type.
|
||||
"""
|
||||
return TypeVar(None, None, base=self, derived_func='LaneOf')
|
||||
|
||||
def as_bool(self):
|
||||
"""
|
||||
Return a derived type variable that has the same vector geometry as
|
||||
this type variable, but with boolean lanes. Scalar types map to `b1`.
|
||||
"""
|
||||
return TypeVar(None, None, base=self, derived_func='AsBool')
|
||||
|
||||
def half_width(self):
|
||||
"""
|
||||
Return a derived type variable that has the same number of vector lanes
|
||||
as this one, but the lanes are half the width.
|
||||
"""
|
||||
ts = self.type_set
|
||||
if ts.min_int:
|
||||
assert ts.min_int > 8, "Can't halve all integer types"
|
||||
if ts.min_float:
|
||||
assert ts.min_float > 32, "Can't halve all float types"
|
||||
if ts.min_bool:
|
||||
assert ts.min_bool > 8, "Can't halve all boolean types"
|
||||
|
||||
return TypeVar(None, None, base=self, derived_func='HalfWidth')
|
||||
|
||||
def double_width(self):
|
||||
"""
|
||||
Return a derived type variable that has the same number of vector lanes
|
||||
as this one, but the lanes are double the width.
|
||||
"""
|
||||
ts = self.type_set
|
||||
if ts.max_int:
|
||||
assert ts.max_int < MAX_BITS, "Can't double all integer types."
|
||||
if ts.max_float:
|
||||
assert ts.max_float < MAX_BITS, "Can't double all float types."
|
||||
if ts.max_bool:
|
||||
assert ts.max_bool < MAX_BITS, "Can't double all boolean types."
|
||||
|
||||
return TypeVar(None, None, base=self, derived_func='DoubleWidth')
|
||||
|
||||
def operand_kind(self):
|
||||
# When a `TypeVar` object is used to describe the type of an `Operand`
|
||||
# in an instruction definition, the kind of that operand is an SSA
|
||||
# value.
|
||||
return value
|
||||
|
||||
def free_typevar(self):
|
||||
if self.is_derived:
|
||||
return self.base
|
||||
else:
|
||||
return self
|
||||
182
lib/cretonne/meta/cretonne/xform.py
Normal file
182
lib/cretonne/meta/cretonne/xform.py
Normal file
@@ -0,0 +1,182 @@
|
||||
"""
|
||||
Instruction transformations.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from .ast import Def, Var, Apply
|
||||
|
||||
|
||||
SRCCTX = 1
|
||||
DSTCTX = 2
|
||||
|
||||
|
||||
class Rtl(object):
|
||||
"""
|
||||
Register Transfer Language list.
|
||||
|
||||
An RTL object contains a list of register assignments in the form of `Def`
|
||||
objects and/or Apply objects for side-effecting instructions.
|
||||
|
||||
An RTL list can represent both a source pattern to be matched, or a
|
||||
destination pattern to be inserted.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.rtl = args
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.rtl)
|
||||
|
||||
|
||||
class XForm(object):
|
||||
"""
|
||||
An instruction transformation consists of a source and destination pattern.
|
||||
|
||||
Patterns are expressed in *register transfer language* as tuples of
|
||||
`ast.Def` or `ast.Expr` nodes.
|
||||
|
||||
A legalization pattern must have a source pattern containing only a single
|
||||
instruction.
|
||||
|
||||
>>> from .base import iconst, iadd, iadd_imm
|
||||
>>> a = Var('a')
|
||||
>>> c = Var('c')
|
||||
>>> v = Var('v')
|
||||
>>> x = Var('x')
|
||||
>>> XForm(
|
||||
... Rtl(c << iconst(v),
|
||||
... a << iadd(x, c)),
|
||||
... Rtl(a << iadd_imm(x, v)))
|
||||
XForm(inputs=[Var(v), Var(x)], defs=[Var(c, d=01), Var(a, d=11)],
|
||||
c << iconst(v)
|
||||
a << iadd(x, c)
|
||||
=>
|
||||
a << iadd_imm(x, v)
|
||||
)
|
||||
"""
|
||||
|
||||
def __init__(self, src, dst):
|
||||
self.src = src
|
||||
self.dst = dst
|
||||
# Variables that are inputs to the source pattern.
|
||||
self.inputs = list()
|
||||
# Variables defined in either src or dst.
|
||||
self.defs = list()
|
||||
|
||||
# Rewrite variables in src and dst RTL lists to our own copies.
|
||||
# Map name -> private Var.
|
||||
symtab = dict()
|
||||
self._rewrite_rtl(src, symtab, SRCCTX)
|
||||
num_src_inputs = len(self.inputs)
|
||||
self._rewrite_rtl(dst, symtab, DSTCTX)
|
||||
|
||||
# Check for inconsistently used inputs.
|
||||
for i in self.inputs:
|
||||
if i.defctx:
|
||||
raise AssertionError(
|
||||
"'{}' used as both input and def".format(i))
|
||||
|
||||
# Check for spurious inputs in dst.
|
||||
if len(self.inputs) > num_src_inputs:
|
||||
raise AssertionError(
|
||||
"extra inputs in dst RTL: {}".format(
|
||||
self.inputs[num_src_inputs:]))
|
||||
|
||||
def __repr__(self):
|
||||
s = "XForm(inputs={}, defs={},\n ".format(self.inputs, self.defs)
|
||||
s += '\n '.join(str(n) for n in self.src)
|
||||
s += '\n=>\n '
|
||||
s += '\n '.join(str(n) for n in self.dst)
|
||||
s += '\n)'
|
||||
return s
|
||||
|
||||
def _rewrite_rtl(self, rtl, symtab, context):
|
||||
for line in rtl:
|
||||
if isinstance(line, Def):
|
||||
line.defs = tuple(
|
||||
self._rewrite_defs(line.defs, symtab, context))
|
||||
expr = line.expr
|
||||
else:
|
||||
expr = line
|
||||
self._rewrite_expr(expr, symtab, context)
|
||||
|
||||
def _rewrite_expr(self, expr, symtab, context):
|
||||
"""
|
||||
Find all uses of variables in `expr` and replace them with our own
|
||||
local symbols.
|
||||
"""
|
||||
|
||||
# Accept a whole expression tree.
|
||||
stack = [expr]
|
||||
while len(stack) > 0:
|
||||
expr = stack.pop()
|
||||
expr.args = tuple(
|
||||
self._rewrite_uses(expr, stack, symtab, context))
|
||||
|
||||
def _rewrite_defs(self, defs, symtab, context):
|
||||
"""
|
||||
Given a tuple of symbols defined in a Def, rewrite them to local
|
||||
symbols. Yield the new locals.
|
||||
"""
|
||||
for sym in defs:
|
||||
name = str(sym)
|
||||
if name in symtab:
|
||||
var = symtab[name]
|
||||
if var.defctx & context:
|
||||
raise AssertionError("'{}' multiply defined".format(name))
|
||||
else:
|
||||
var = Var(name)
|
||||
symtab[name] = var
|
||||
self.defs.append(var)
|
||||
var.defctx |= context
|
||||
yield var
|
||||
|
||||
def _rewrite_uses(self, expr, stack, symtab, context):
|
||||
"""
|
||||
Given an `Apply` expr, rewrite all uses in its arguments to local
|
||||
variables. Yield a sequence of new arguments.
|
||||
|
||||
Append any `Apply` arguments to `stack`.
|
||||
"""
|
||||
for arg, operand in zip(expr.args, expr.inst.ins):
|
||||
# Nested instructions are allowed. Visit recursively.
|
||||
if isinstance(arg, Apply):
|
||||
stack.push(arg)
|
||||
yield arg
|
||||
continue
|
||||
if not isinstance(arg, Var):
|
||||
assert not operand.is_value(), "Value arg must be `Var`"
|
||||
yield arg
|
||||
continue
|
||||
# This is supposed to be a symbolic value reference.
|
||||
name = str(arg)
|
||||
if name in symtab:
|
||||
var = symtab[name]
|
||||
# The variable must be used consistenty as a def or input.
|
||||
if var.defctx and (var.defctx & context) == 0:
|
||||
raise AssertionError(
|
||||
"'{}' used as both input and def"
|
||||
.format(name))
|
||||
else:
|
||||
# First time use of variable.
|
||||
var = Var(name)
|
||||
symtab[name] = var
|
||||
self.inputs.append(var)
|
||||
yield var
|
||||
|
||||
|
||||
class XFormGroup(object):
|
||||
"""
|
||||
A group of related transformations.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.xforms = list()
|
||||
|
||||
def legalize(self, src, dst):
|
||||
"""
|
||||
Add a legalization pattern to this group.
|
||||
|
||||
:param src: Single `Def` or `Apply` to be legalized.
|
||||
:param dst: `Rtl` list of replacement instructions.
|
||||
"""
|
||||
self.xforms.append(XForm(Rtl(src), dst))
|
||||
36
lib/cretonne/meta/gen_build_deps.py
Normal file
36
lib/cretonne/meta/gen_build_deps.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
Generate build dependencies for Cargo.
|
||||
|
||||
The `build.py` script is invoked by cargo when building libcretonne to
|
||||
generate Rust code from the instruction descriptions. Cargo needs to know when
|
||||
it is necessary to rerun the build script.
|
||||
|
||||
If the build script outputs lines of the form:
|
||||
|
||||
cargo:rerun-if-changed=/path/to/file
|
||||
|
||||
cargo will rerun the build script when those files have changed since the last
|
||||
build.
|
||||
"""
|
||||
from __future__ import absolute_import, print_function
|
||||
import os
|
||||
from os.path import dirname, abspath, join
|
||||
|
||||
|
||||
def source_files(top):
|
||||
"""
|
||||
Recursively find all interesting source files and directories in the
|
||||
directory tree starting at top. Yield a path to each file.
|
||||
"""
|
||||
for (dirpath, dirnames, filenames) in os.walk(top):
|
||||
yield dirpath
|
||||
for f in filenames:
|
||||
if f.endswith('.py'):
|
||||
yield join(dirpath, f)
|
||||
|
||||
|
||||
def generate():
|
||||
print("Dependencies from meta language directory:")
|
||||
meta = dirname(abspath(__file__))
|
||||
for path in source_files(meta):
|
||||
print("cargo:rerun-if-changed=" + path)
|
||||
453
lib/cretonne/meta/gen_encoding.py
Normal file
453
lib/cretonne/meta/gen_encoding.py
Normal file
@@ -0,0 +1,453 @@
|
||||
"""
|
||||
Generate sources for instruction encoding.
|
||||
|
||||
The tables and functions generated here support the `TargetIsa::encode()`
|
||||
function which determines if a given instruction is legal, and if so, it's
|
||||
`Encoding` data which consists of a *recipe* and some *encoding* bits.
|
||||
|
||||
The `encode` function doesn't actually generate the binary machine bits. Each
|
||||
recipe has a corresponding hand-written function to do that after registers
|
||||
are allocated.
|
||||
|
||||
This is the information available to us:
|
||||
|
||||
- The instruction to be encoded as an `Inst` reference.
|
||||
- The data-flow graph containing the instruction, giving us access to the
|
||||
`InstructionData` representation and the types of all values involved.
|
||||
- A target ISA instance with shared and ISA-specific settings for evaluating
|
||||
ISA predicates.
|
||||
- The currently active CPU mode is determined by the ISA.
|
||||
|
||||
## Level 1 table lookup
|
||||
|
||||
The CPU mode provides the first table. The key is the instruction's controlling
|
||||
type variable. If the instruction is not polymorphic, use `VOID` for the type
|
||||
variable. The table values are level 2 tables.
|
||||
|
||||
## Level 2 table lookup
|
||||
|
||||
The level 2 table is keyed by the instruction's opcode. The table values are
|
||||
*encoding lists*.
|
||||
|
||||
The two-level table lookup allows the level 2 tables to be much smaller with
|
||||
good locality. Code in any given function usually only uses a few different
|
||||
types, so many of the level 2 tables will be cold.
|
||||
|
||||
## Encoding lists
|
||||
|
||||
An encoding list is a non-empty sequence of list entries. Each entry has
|
||||
one of these forms:
|
||||
|
||||
1. Instruction predicate, encoding recipe, and encoding bits. If the
|
||||
instruction predicate is true, use this recipe and bits.
|
||||
2. ISA predicate and skip-count. If the ISA predicate is false, skip the next
|
||||
*skip-count* entries in the list. If the skip count is zero, stop
|
||||
completely.
|
||||
3. Stop. End of list marker. If this is reached, the instruction does not have
|
||||
a legal encoding.
|
||||
|
||||
The instruction predicate is also used to distinguish between polymorphic
|
||||
instructions with different types for secondary type variables.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import srcgen
|
||||
from constant_hash import compute_quadratic
|
||||
from unique_table import UniqueSeqTable
|
||||
from collections import OrderedDict, defaultdict
|
||||
import math
|
||||
import itertools
|
||||
|
||||
|
||||
def emit_instp(instp, fmt):
|
||||
"""
|
||||
Emit code for matching an instruction predicate against an
|
||||
`InstructionData` reference called `inst`.
|
||||
|
||||
The generated code is a pattern match that falls through if the instruction
|
||||
has an unexpected format. This should lead to a panic.
|
||||
"""
|
||||
iform = instp.predicate_context()
|
||||
|
||||
# Which fiels do we need in the InstructionData pattern match?
|
||||
if iform.boxed_storage:
|
||||
fields = 'ref data'
|
||||
else:
|
||||
# Collect the leaf predicates
|
||||
leafs = set()
|
||||
instp.predicate_leafs(leafs)
|
||||
# All the leafs are FieldPredicate instances. Here we just care about
|
||||
# the field names.
|
||||
fields = ', '.join(sorted(set(p.field.name for p in leafs)))
|
||||
|
||||
with fmt.indented('{} => {{'.format(instp.number), '}'):
|
||||
with fmt.indented(
|
||||
'if let InstructionData::{} {{ {}, .. }} = *inst {{'
|
||||
.format(iform.name, fields), '}'):
|
||||
fmt.line('return {};'.format(instp.rust_predicate(0)))
|
||||
|
||||
|
||||
def emit_instps(instps, fmt):
|
||||
"""
|
||||
Emit a function for matching instruction predicates.
|
||||
"""
|
||||
|
||||
with fmt.indented(
|
||||
'pub fn check_instp(inst: &InstructionData, instp_idx: u16) ' +
|
||||
'-> bool {', '}'):
|
||||
with fmt.indented('match instp_idx {', '}'):
|
||||
for instp in instps:
|
||||
emit_instp(instp, fmt)
|
||||
fmt.line('_ => panic!("Invalid instruction predicate")')
|
||||
|
||||
# The match cases will fall through if the instruction format is wrong.
|
||||
fmt.line('panic!("Bad format {:?}/{} for instp {}",')
|
||||
fmt.line(' InstructionFormat::from(inst),')
|
||||
fmt.line(' inst.opcode(),')
|
||||
fmt.line(' instp_idx);')
|
||||
|
||||
|
||||
# Encoding lists are represented as u16 arrays.
|
||||
CODE_BITS = 16
|
||||
PRED_BITS = 12
|
||||
PRED_MASK = (1 << PRED_BITS) - 1
|
||||
|
||||
# 0..CODE_ALWAYS means: Check instruction predicate and use the next two
|
||||
# entries as a (recipe, encbits) pair if true. CODE_ALWAYS is the always-true
|
||||
# predicate, smaller numbers refer to instruction predicates.
|
||||
CODE_ALWAYS = PRED_MASK
|
||||
|
||||
# Codes above CODE_ALWAYS indicate an ISA predicate to be tested.
|
||||
# `x & PRED_MASK` is the ISA predicate number to test.
|
||||
# `(x >> PRED_BITS)*3` is the number of u16 table entries to skip if the ISA
|
||||
# predicate is false. (The factor of three corresponds to the (inst-pred,
|
||||
# recipe, encbits) triples.
|
||||
#
|
||||
# Finally, CODE_FAIL indicates the end of the list.
|
||||
CODE_FAIL = (1 << CODE_BITS) - 1
|
||||
|
||||
|
||||
def seq_doc(enc):
|
||||
"""
|
||||
Return a tuple containing u16 representations of the instruction predicate
|
||||
an recipe / encbits.
|
||||
|
||||
Also return a doc string.
|
||||
"""
|
||||
if enc.instp:
|
||||
p = enc.instp.number
|
||||
doc = '--> {} when {}'.format(enc, enc.instp)
|
||||
else:
|
||||
p = CODE_ALWAYS
|
||||
doc = '--> {}'.format(enc)
|
||||
assert p <= CODE_ALWAYS
|
||||
return ((p, enc.recipe.number, enc.encbits), doc)
|
||||
|
||||
|
||||
class EncList(object):
|
||||
"""
|
||||
List of instructions for encoding a given type + opcode pair.
|
||||
|
||||
An encoding list contains a sequence of predicates and encoding recipes,
|
||||
all encoded as u16 values.
|
||||
|
||||
:param inst: The instruction opcode being encoded.
|
||||
:param ty: Value of the controlling type variable, or `None`.
|
||||
"""
|
||||
|
||||
def __init__(self, inst, ty):
|
||||
self.inst = inst
|
||||
self.ty = ty
|
||||
# List of applicable Encoding instances.
|
||||
# These will have different predicates.
|
||||
self.encodings = []
|
||||
|
||||
def name(self):
|
||||
name = self.inst.name
|
||||
if self.ty:
|
||||
name = '{}.{}'.format(name, self.ty.name)
|
||||
if self.encodings:
|
||||
name += ' ({})'.format(self.encodings[0].cpumode)
|
||||
return name
|
||||
|
||||
def by_isap(self):
|
||||
"""
|
||||
Group the encodings by ISA predicate without reordering them.
|
||||
|
||||
Yield a sequence of `(isap, (encs...))` tuples where `isap` is the ISA
|
||||
predicate or `None`, and `(encs...)` is a tuple of encodings that all
|
||||
have the same ISA predicate.
|
||||
"""
|
||||
maxlen = CODE_FAIL >> PRED_BITS
|
||||
for isap, group in itertools.groupby(
|
||||
self.encodings, lambda enc: enc.isap):
|
||||
group = tuple(group)
|
||||
# This probably never happens, but we can't express more than
|
||||
# maxlen encodings per isap.
|
||||
while len(group) > maxlen:
|
||||
yield (isap, group[0..maxlen])
|
||||
group = group[maxlen:]
|
||||
yield (isap, group)
|
||||
|
||||
def encode(self, seq_table, doc_table, isa):
|
||||
"""
|
||||
Encode this list as a sequence of u16 numbers.
|
||||
|
||||
Adds the sequence to `seq_table` and records the returned offset as
|
||||
`self.offset`.
|
||||
|
||||
Adds comment lines to `doc_table` keyed by seq_table offsets.
|
||||
"""
|
||||
words = list()
|
||||
docs = list()
|
||||
|
||||
# Group our encodings by isap.
|
||||
for isap, group in self.by_isap():
|
||||
if isap:
|
||||
# We have an ISA predicate covering `glen` encodings.
|
||||
pnum = isa.settings.predicate_number[isap]
|
||||
glen = len(group)
|
||||
doc = 'skip {}x3 unless {}'.format(glen, isap)
|
||||
docs.append((len(words), doc))
|
||||
words.append((glen << PRED_BITS) | pnum)
|
||||
|
||||
for enc in group:
|
||||
seq, doc = seq_doc(enc)
|
||||
docs.append((len(words), doc))
|
||||
words.extend(seq)
|
||||
|
||||
# Terminate the list.
|
||||
words.append(CODE_FAIL)
|
||||
|
||||
self.offset = seq_table.add(words)
|
||||
|
||||
# Add doc comments.
|
||||
doc_table[self.offset].append(
|
||||
'{:06x}: {}'.format(self.offset, self.name()))
|
||||
for pos, doc in docs:
|
||||
doc_table[self.offset + pos].append(doc)
|
||||
|
||||
|
||||
class Level2Table(object):
|
||||
"""
|
||||
Level 2 table mapping instruction opcodes to `EncList` objects.
|
||||
|
||||
:param ty: Controlling type variable of all entries, or `None`.
|
||||
"""
|
||||
|
||||
def __init__(self, ty):
|
||||
self.ty = ty
|
||||
# Maps inst -> EncList
|
||||
self.lists = OrderedDict()
|
||||
|
||||
def __getitem__(self, inst):
|
||||
ls = self.lists.get(inst)
|
||||
if not ls:
|
||||
ls = EncList(inst, self.ty)
|
||||
self.lists[inst] = ls
|
||||
return ls
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.lists.values())
|
||||
|
||||
def layout_hashtable(self, level2_hashtables, level2_doc):
|
||||
"""
|
||||
Compute the hash table mapping opcode -> enclist.
|
||||
|
||||
Append the hash table to `level2_hashtables` and record the offset.
|
||||
"""
|
||||
hash_table = compute_quadratic(
|
||||
self.lists.values(),
|
||||
lambda enclist: enclist.inst.number)
|
||||
|
||||
self.hash_table_offset = len(level2_hashtables)
|
||||
self.hash_table_len = len(hash_table)
|
||||
|
||||
level2_doc[self.hash_table_offset].append(
|
||||
'{:06x}: {}, {} entries'.format(
|
||||
self.hash_table_offset,
|
||||
self.ty.name,
|
||||
self.hash_table_len))
|
||||
level2_hashtables.extend(hash_table)
|
||||
|
||||
|
||||
class Level1Table(object):
|
||||
"""
|
||||
Level 1 table mapping types to `Level2` objects.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.tables = OrderedDict()
|
||||
|
||||
def __getitem__(self, ty):
|
||||
tbl = self.tables.get(ty)
|
||||
if not tbl:
|
||||
tbl = Level2Table(ty)
|
||||
self.tables[ty] = tbl
|
||||
return tbl
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.tables.values())
|
||||
|
||||
|
||||
def make_tables(cpumode):
|
||||
"""
|
||||
Generate tables for `cpumode` as described above.
|
||||
"""
|
||||
table = Level1Table()
|
||||
for enc in cpumode.encodings:
|
||||
ty = enc.ctrl_typevar()
|
||||
inst = enc.inst
|
||||
table[ty][inst].encodings.append(enc)
|
||||
return table
|
||||
|
||||
|
||||
def encode_enclists(level1, seq_table, doc_table, isa):
|
||||
"""
|
||||
Compute encodings and doc comments for encoding lists in `level1`.
|
||||
"""
|
||||
for level2 in level1:
|
||||
for enclist in level2:
|
||||
enclist.encode(seq_table, doc_table, isa)
|
||||
|
||||
|
||||
def emit_enclists(seq_table, doc_table, fmt):
|
||||
with fmt.indented(
|
||||
'pub static ENCLISTS: [u16; {}] = ['.format(len(seq_table.table)),
|
||||
'];'):
|
||||
line = ''
|
||||
for idx, entry in enumerate(seq_table.table):
|
||||
if idx in doc_table:
|
||||
if line:
|
||||
fmt.line(line)
|
||||
line = ''
|
||||
for doc in doc_table[idx]:
|
||||
fmt.comment(doc)
|
||||
line += '{:#06x}, '.format(entry)
|
||||
if line:
|
||||
fmt.line(line)
|
||||
|
||||
|
||||
def encode_level2_hashtables(level1, level2_hashtables, level2_doc):
|
||||
for level2 in level1:
|
||||
level2.layout_hashtable(level2_hashtables, level2_doc)
|
||||
|
||||
|
||||
def emit_level2_hashtables(level2_hashtables, offt, level2_doc, fmt):
|
||||
"""
|
||||
Emit the big concatenation of level 2 hash tables.
|
||||
"""
|
||||
with fmt.indented(
|
||||
'pub static LEVEL2: [Level2Entry<{}>; {}] = ['
|
||||
.format(offt, len(level2_hashtables)),
|
||||
'];'):
|
||||
for offset, entry in enumerate(level2_hashtables):
|
||||
if offset in level2_doc:
|
||||
for doc in level2_doc[offset]:
|
||||
fmt.comment(doc)
|
||||
if entry:
|
||||
fmt.line(
|
||||
'Level2Entry ' +
|
||||
'{{ opcode: Opcode::{}, offset: {:#08x} }},'
|
||||
.format(entry.inst.camel_name, entry.offset))
|
||||
else:
|
||||
fmt.line(
|
||||
'Level2Entry ' +
|
||||
'{ opcode: Opcode::NotAnOpcode, offset: 0 },')
|
||||
|
||||
|
||||
def emit_level1_hashtable(cpumode, level1, offt, fmt):
|
||||
"""
|
||||
Emit a level 1 hash table for `cpumode`.
|
||||
"""
|
||||
hash_table = compute_quadratic(
|
||||
level1.tables.values(),
|
||||
lambda level2: level2.ty.number)
|
||||
|
||||
with fmt.indented(
|
||||
'pub static LEVEL1_{}: [Level1Entry<{}>; {}] = ['
|
||||
.format(cpumode.name.upper(), offt, len(hash_table)), '];'):
|
||||
for level2 in hash_table:
|
||||
if level2:
|
||||
l2l = int(math.log(level2.hash_table_len, 2))
|
||||
assert l2l > 0, "Hash table too small"
|
||||
fmt.line(
|
||||
'Level1Entry ' +
|
||||
'{{ ty: types::{}, log2len: {}, offset: {:#08x} }},'
|
||||
.format(
|
||||
level2.ty.name.upper(),
|
||||
l2l,
|
||||
level2.hash_table_offset))
|
||||
else:
|
||||
# Empty entry.
|
||||
fmt.line(
|
||||
'Level1Entry ' +
|
||||
'{ ty: types::VOID, log2len: 0, offset: 0 },')
|
||||
|
||||
|
||||
def offset_type(length):
|
||||
"""
|
||||
Compute an appropriate Rust integer type to use for offsets into a table of
|
||||
the given length.
|
||||
"""
|
||||
if length <= 0x10000:
|
||||
return 'u16'
|
||||
else:
|
||||
assert length <= 0x100000000, "Table too big"
|
||||
return 'u32'
|
||||
|
||||
|
||||
def emit_recipe_names(isa, fmt):
|
||||
"""
|
||||
Emit a table of encoding recipe names keyed by recipe number.
|
||||
|
||||
This is used for pretty-printing encodings.
|
||||
"""
|
||||
with fmt.indented(
|
||||
'pub static RECIPE_NAMES: [&\'static str; {}] = ['
|
||||
.format(len(isa.all_recipes)), '];'):
|
||||
for r in isa.all_recipes:
|
||||
fmt.line('"{}",'.format(r.name))
|
||||
|
||||
|
||||
def gen_isa(isa, fmt):
|
||||
# First assign numbers to relevant instruction predicates and generate the
|
||||
# check_instp() function..
|
||||
emit_instps(isa.all_instps, fmt)
|
||||
|
||||
# Level1 tables, one per CPU mode
|
||||
level1_tables = dict()
|
||||
|
||||
# Tables for enclists with comments.
|
||||
seq_table = UniqueSeqTable()
|
||||
doc_table = defaultdict(list)
|
||||
|
||||
# Single table containing all the level2 hash tables.
|
||||
level2_hashtables = list()
|
||||
level2_doc = defaultdict(list)
|
||||
|
||||
for cpumode in isa.cpumodes:
|
||||
level2_doc[len(level2_hashtables)].append(cpumode.name)
|
||||
level1 = make_tables(cpumode)
|
||||
level1_tables[cpumode] = level1
|
||||
encode_enclists(level1, seq_table, doc_table, isa)
|
||||
encode_level2_hashtables(level1, level2_hashtables, level2_doc)
|
||||
|
||||
# Level 1 table encodes offsets into the level 2 table.
|
||||
level1_offt = offset_type(len(level2_hashtables))
|
||||
# Level 2 tables encodes offsets into seq_table.
|
||||
level2_offt = offset_type(len(seq_table.table))
|
||||
|
||||
emit_enclists(seq_table, doc_table, fmt)
|
||||
emit_level2_hashtables(level2_hashtables, level2_offt, level2_doc, fmt)
|
||||
for cpumode in isa.cpumodes:
|
||||
emit_level1_hashtable(
|
||||
cpumode, level1_tables[cpumode], level1_offt, fmt)
|
||||
|
||||
emit_recipe_names(isa, fmt)
|
||||
|
||||
|
||||
def generate(isas, out_dir):
|
||||
for isa in isas:
|
||||
fmt = srcgen.Formatter()
|
||||
gen_isa(isa, fmt)
|
||||
fmt.update_file('encoding-{}.rs'.format(isa.name), out_dir)
|
||||
534
lib/cretonne/meta/gen_instr.py
Normal file
534
lib/cretonne/meta/gen_instr.py
Normal file
@@ -0,0 +1,534 @@
|
||||
"""
|
||||
Generate sources with instruction info.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import srcgen
|
||||
import constant_hash
|
||||
from unique_table import UniqueTable, UniqueSeqTable
|
||||
import cretonne
|
||||
|
||||
|
||||
def gen_formats(fmt):
|
||||
"""Generate an instruction format enumeration"""
|
||||
|
||||
fmt.doc_comment('An instruction format')
|
||||
fmt.doc_comment('')
|
||||
fmt.doc_comment('Every opcode has a corresponding instruction format')
|
||||
fmt.doc_comment('which is represented by both the `InstructionFormat`')
|
||||
fmt.doc_comment('and the `InstructionData` enums.')
|
||||
fmt.line('#[derive(Copy, Clone, PartialEq, Eq, Debug)]')
|
||||
with fmt.indented('pub enum InstructionFormat {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
fmt.line(f.name + ',')
|
||||
fmt.line()
|
||||
|
||||
# Emit a From<InstructionData> which also serves to verify that
|
||||
# InstructionFormat and InstructionData are in sync.
|
||||
with fmt.indented(
|
||||
"impl<'a> From<&'a InstructionData> for InstructionFormat {", '}'):
|
||||
with fmt.indented(
|
||||
"fn from(inst: &'a InstructionData) -> InstructionFormat {",
|
||||
'}'):
|
||||
with fmt.indented('match *inst {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
fmt.line(('InstructionData::{} {{ .. }} => ' +
|
||||
'InstructionFormat::{},')
|
||||
.format(f.name, f.name))
|
||||
fmt.line()
|
||||
|
||||
|
||||
def gen_instruction_data_impl(fmt):
|
||||
"""
|
||||
Generate the boring parts of the InstructionData implementation.
|
||||
|
||||
These methods in `impl InstructionData` can be generated automatically from
|
||||
the instruction formats:
|
||||
|
||||
- `pub fn opcode(&self) -> Opcode`
|
||||
- `pub fn first_type(&self) -> Type`
|
||||
- `pub fn second_result(&self) -> Option<Value>`
|
||||
- `pub fn second_result_mut<'a>(&'a mut self) -> Option<&'a mut Value>`
|
||||
"""
|
||||
|
||||
# The `opcode` and `first_type` methods simply read the `opcode` and `ty`
|
||||
# members. This is really a workaround for Rust's enum types missing shared
|
||||
# members.
|
||||
with fmt.indented('impl InstructionData {', '}'):
|
||||
fmt.doc_comment('Get the opcode of this instruction.')
|
||||
with fmt.indented('pub fn opcode(&self) -> Opcode {', '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
fmt.line(
|
||||
'InstructionData::{} {{ opcode, .. }} => opcode,'
|
||||
.format(f.name))
|
||||
|
||||
fmt.doc_comment('Type of the first result, or `VOID`.')
|
||||
with fmt.indented('pub fn first_type(&self) -> Type {', '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
fmt.line(
|
||||
'InstructionData::{} {{ ty, .. }} => ty,'
|
||||
.format(f.name))
|
||||
|
||||
fmt.doc_comment('Mutable reference to the type of the first result.')
|
||||
with fmt.indented(
|
||||
'pub fn first_type_mut(&mut self) -> &mut Type {', '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
fmt.line(
|
||||
'InstructionData::{} {{ ref mut ty, .. }} => ty,'
|
||||
.format(f.name))
|
||||
|
||||
# Generate shared and mutable accessors for `second_result` which only
|
||||
# applies to instruction formats that can produce multiple results.
|
||||
# Everything else returns `None`.
|
||||
fmt.doc_comment('Second result value, if any.')
|
||||
with fmt.indented(
|
||||
'pub fn second_result(&self) -> Option<Value> {', '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
if f.multiple_results:
|
||||
fmt.line(
|
||||
'InstructionData::' + f.name +
|
||||
' { second_result, .. }' +
|
||||
' => Some(second_result),')
|
||||
else:
|
||||
# Single or no results.
|
||||
fmt.line(
|
||||
'InstructionData::{} {{ .. }} => None,'
|
||||
.format(f.name))
|
||||
|
||||
fmt.doc_comment('Mutable reference to second result value, if any.')
|
||||
with fmt.indented(
|
||||
"pub fn second_result_mut<'a>(&'a mut self)" +
|
||||
" -> Option<&'a mut Value> {", '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
if f.multiple_results:
|
||||
fmt.line(
|
||||
'InstructionData::' + f.name +
|
||||
' { ref mut second_result, .. }' +
|
||||
' => Some(second_result),')
|
||||
else:
|
||||
# Single or no results.
|
||||
fmt.line(
|
||||
'InstructionData::{} {{ .. }} => None,'
|
||||
.format(f.name))
|
||||
|
||||
fmt.doc_comment('Get the controlling type variable operand.')
|
||||
with fmt.indented(
|
||||
'pub fn typevar_operand(&self) -> Option<Value> {', '}'):
|
||||
with fmt.indented('match *self {', '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
n = 'InstructionData::' + f.name
|
||||
if f.typevar_operand is None:
|
||||
fmt.line(n + ' { .. } => None,')
|
||||
elif len(f.value_operands) == 1:
|
||||
# We have a single value operand called 'arg'.
|
||||
if f.boxed_storage:
|
||||
fmt.line(
|
||||
n + ' { ref data, .. } => Some(data.arg),')
|
||||
else:
|
||||
fmt.line(n + ' { arg, .. } => Some(arg),')
|
||||
else:
|
||||
# We have multiple value operands and an array `args`.
|
||||
# Which `args` index to use?
|
||||
# Map from index into f.kinds into f.value_operands
|
||||
# index.
|
||||
i = f.value_operands.index(f.typevar_operand)
|
||||
if f.boxed_storage:
|
||||
fmt.line(
|
||||
n +
|
||||
' { ref data, .. } => ' +
|
||||
('Some(data.args[{}]),'.format(i)))
|
||||
else:
|
||||
fmt.line(
|
||||
n +
|
||||
' {{ ref args, .. }} => Some(args[{}]),'
|
||||
.format(i))
|
||||
|
||||
|
||||
def collect_instr_groups(isas):
|
||||
seen = set()
|
||||
groups = []
|
||||
for isa in isas:
|
||||
for g in isa.instruction_groups:
|
||||
if g not in seen:
|
||||
groups.append(g)
|
||||
seen.add(g)
|
||||
return groups
|
||||
|
||||
|
||||
def gen_opcodes(groups, fmt):
|
||||
"""
|
||||
Generate opcode enumerations.
|
||||
|
||||
Return a list of all instructions.
|
||||
"""
|
||||
|
||||
fmt.doc_comment('An instruction opcode.')
|
||||
fmt.doc_comment('')
|
||||
fmt.doc_comment('All instructions from all supported ISAs are present.')
|
||||
fmt.line('#[derive(Copy, Clone, PartialEq, Eq, Debug)]')
|
||||
instrs = []
|
||||
with fmt.indented('pub enum Opcode {', '}'):
|
||||
fmt.line('NotAnOpcode,')
|
||||
for g in groups:
|
||||
for i in g.instructions:
|
||||
instrs.append(i)
|
||||
i.number = len(instrs)
|
||||
# Build a doc comment.
|
||||
prefix = ', '.join(o.name for o in i.outs)
|
||||
if prefix:
|
||||
prefix = prefix + ' = '
|
||||
suffix = ', '.join(o.name for o in i.ins)
|
||||
fmt.doc_comment(
|
||||
'`{}{} {}`. ({})'
|
||||
.format(prefix, i.name, suffix, i.format.name))
|
||||
# Document polymorphism.
|
||||
if i.is_polymorphic:
|
||||
if i.use_typevar_operand:
|
||||
fmt.doc_comment(
|
||||
'Type inferred from {}.'
|
||||
.format(i.ins[i.format.typevar_operand]))
|
||||
# Enum variant itself.
|
||||
fmt.line(i.camel_name + ',')
|
||||
fmt.line()
|
||||
|
||||
# Generate a private opcode_format table.
|
||||
with fmt.indented(
|
||||
'const OPCODE_FORMAT: [InstructionFormat; {}] = ['
|
||||
.format(len(instrs)),
|
||||
'];'):
|
||||
for i in instrs:
|
||||
fmt.format(
|
||||
'InstructionFormat::{}, // {}',
|
||||
i.format.name, i.name)
|
||||
fmt.line()
|
||||
|
||||
# Generate a private opcode_name function.
|
||||
with fmt.indented('fn opcode_name(opc: Opcode) -> &\'static str {', '}'):
|
||||
with fmt.indented('match opc {', '}'):
|
||||
fmt.line('Opcode::NotAnOpcode => "<not an opcode>",')
|
||||
for i in instrs:
|
||||
fmt.format('Opcode::{} => "{}",', i.camel_name, i.name)
|
||||
fmt.line()
|
||||
|
||||
# Generate an opcode hash table for looking up opcodes by name.
|
||||
hash_table = constant_hash.compute_quadratic(
|
||||
instrs,
|
||||
lambda i: constant_hash.simple_hash(i.name))
|
||||
with fmt.indented(
|
||||
'const OPCODE_HASH_TABLE: [Opcode; {}] = ['
|
||||
.format(len(hash_table)), '];'):
|
||||
for i in hash_table:
|
||||
if i is None:
|
||||
fmt.line('Opcode::NotAnOpcode,')
|
||||
else:
|
||||
fmt.format('Opcode::{},', i.camel_name)
|
||||
fmt.line()
|
||||
return instrs
|
||||
|
||||
|
||||
def get_constraint(op, ctrl_typevar, type_sets):
|
||||
"""
|
||||
Get the value type constraint for an SSA value operand, where
|
||||
`ctrl_typevar` is the controlling type variable.
|
||||
|
||||
Each operand constraint is represented as a string, one of:
|
||||
|
||||
- `Concrete(vt)`, where `vt` is a value type name.
|
||||
- `Free(idx)` where `idx` is an index into `type_sets`.
|
||||
- `Same`, `Lane`, `AsBool` for controlling typevar-derived constraints.
|
||||
"""
|
||||
t = op.typ
|
||||
assert t.operand_kind() is cretonne.value
|
||||
|
||||
# A concrete value type.
|
||||
if isinstance(t, cretonne.ValueType):
|
||||
return 'Concrete({})'.format(t.rust_name())
|
||||
|
||||
if t.free_typevar() is not ctrl_typevar:
|
||||
assert not t.is_derived
|
||||
return 'Free({})'.format(type_sets.add(t.type_set))
|
||||
|
||||
if t.is_derived:
|
||||
assert t.base is ctrl_typevar, "Not derived directly from ctrl_typevar"
|
||||
return t.derived_func
|
||||
|
||||
assert t is ctrl_typevar
|
||||
return 'Same'
|
||||
|
||||
|
||||
def gen_type_constraints(fmt, instrs):
|
||||
"""
|
||||
Generate value type constraints for all instructions.
|
||||
|
||||
- Emit a compact constant table of ValueTypeSet objects.
|
||||
- Emit a compact constant table of OperandConstraint objects.
|
||||
- Emit an opcode-indexed table of instruction constraints.
|
||||
|
||||
"""
|
||||
|
||||
# Table of TypeSet instances.
|
||||
type_sets = UniqueTable()
|
||||
|
||||
# Table of operand constraint sequences (as tuples). Each operand
|
||||
# constraint is represented as a string, one of:
|
||||
# - `Concrete(vt)`, where `vt` is a value type name.
|
||||
# - `Free(idx)` where `idx` isan index into `type_sets`.
|
||||
# - `Same`, `Lane`, `AsBool` for controlling typevar-derived constraints.
|
||||
operand_seqs = UniqueSeqTable()
|
||||
|
||||
# Preload table with constraints for typical binops.
|
||||
operand_seqs.add(['Same'] * 3)
|
||||
|
||||
# TypeSet indexes are encoded in 8 bits, with `0xff` reserved.
|
||||
typeset_limit = 0xff
|
||||
|
||||
fmt.comment('Table of opcode constraints.')
|
||||
with fmt.indented(
|
||||
'const OPCODE_CONSTRAINTS : [OpcodeConstraints; {}] = ['
|
||||
.format(len(instrs)), '];'):
|
||||
for i in instrs:
|
||||
# Collect constraints for the value results, not including
|
||||
# `variable_args` results which are always special cased.
|
||||
constraints = list()
|
||||
ctrl_typevar = None
|
||||
ctrl_typeset = typeset_limit
|
||||
if i.is_polymorphic:
|
||||
ctrl_typevar = i.ctrl_typevar
|
||||
ctrl_typeset = type_sets.add(ctrl_typevar.type_set)
|
||||
for idx in i.value_results:
|
||||
constraints.append(
|
||||
get_constraint(i.outs[idx], ctrl_typevar, type_sets))
|
||||
for idx in i.format.value_operands:
|
||||
constraints.append(
|
||||
get_constraint(i.ins[idx], ctrl_typevar, type_sets))
|
||||
offset = operand_seqs.add(constraints)
|
||||
fixed_results = len(i.value_results)
|
||||
use_typevar_operand = i.is_polymorphic and i.use_typevar_operand
|
||||
fmt.comment(
|
||||
'{}: fixed_results={}, use_typevar_operand={}'
|
||||
.format(i.camel_name, fixed_results, use_typevar_operand))
|
||||
fmt.comment('Constraints={}'.format(constraints))
|
||||
if i.is_polymorphic:
|
||||
fmt.comment(
|
||||
'Polymorphic over {}'.format(ctrl_typevar.type_set))
|
||||
# Compute the bit field encoding, c.f. instructions.rs.
|
||||
assert fixed_results < 8, "Bit field encoding too tight"
|
||||
flags = fixed_results
|
||||
if use_typevar_operand:
|
||||
flags |= 8
|
||||
|
||||
with fmt.indented('OpcodeConstraints {', '},'):
|
||||
fmt.line('flags: {:#04x},'.format(flags))
|
||||
fmt.line('typeset_offset: {},'.format(ctrl_typeset))
|
||||
fmt.line('constraint_offset: {},'.format(offset))
|
||||
|
||||
fmt.comment('Table of value type sets.')
|
||||
assert len(type_sets.table) <= typeset_limit, "Too many type sets"
|
||||
with fmt.indented(
|
||||
'const TYPE_SETS : [ValueTypeSet; {}] = ['
|
||||
.format(len(type_sets.table)), '];'):
|
||||
for ts in type_sets.table:
|
||||
with fmt.indented('ValueTypeSet {', '},'):
|
||||
ts.emit_fields(fmt)
|
||||
|
||||
fmt.comment('Table of operand constraint sequences.')
|
||||
with fmt.indented(
|
||||
'const OPERAND_CONSTRAINTS : [OperandConstraint; {}] = ['
|
||||
.format(len(operand_seqs.table)), '];'):
|
||||
for c in operand_seqs.table:
|
||||
fmt.line('OperandConstraint::{},'.format(c))
|
||||
|
||||
|
||||
def gen_format_constructor(iform, fmt):
|
||||
"""
|
||||
Emit a method for creating and inserting inserting an `iform` instruction,
|
||||
where `iform` is an instruction format.
|
||||
|
||||
Instruction formats that can produce multiple results take a `ctrl_typevar`
|
||||
argument for deducing the result types. Others take a `result_type`
|
||||
argument.
|
||||
"""
|
||||
|
||||
# Construct method arguments.
|
||||
args = ['&mut self', 'opcode: Opcode']
|
||||
|
||||
if iform.multiple_results:
|
||||
args.append('ctrl_typevar: Type')
|
||||
# `dfg::make_inst_results` will compute the result type.
|
||||
result_type = 'types::VOID'
|
||||
else:
|
||||
args.append('result_type: Type')
|
||||
result_type = 'result_type'
|
||||
|
||||
# Normal operand arguments.
|
||||
for idx, kind in enumerate(iform.kinds):
|
||||
args.append('op{}: {}'.format(idx, kind.rust_type))
|
||||
|
||||
proto = '{}({}) -> Inst'.format(iform.name, ', '.join(args))
|
||||
fmt.line('#[allow(non_snake_case)]')
|
||||
with fmt.indented('pub fn {} {{'.format(proto), '}'):
|
||||
# Generate the instruction data.
|
||||
with fmt.indented(
|
||||
'let data = InstructionData::{} {{'.format(iform.name), '};'):
|
||||
fmt.line('opcode: opcode,')
|
||||
fmt.line('ty: {},'.format(result_type))
|
||||
if iform.multiple_results:
|
||||
fmt.line('second_result: Value::default(),')
|
||||
if iform.boxed_storage:
|
||||
with fmt.indented(
|
||||
'data: Box::new(instructions::{}Data {{'
|
||||
.format(iform.name), '}),'):
|
||||
gen_member_inits(iform, fmt)
|
||||
else:
|
||||
gen_member_inits(iform, fmt)
|
||||
|
||||
# Create result values if necessary.
|
||||
if iform.multiple_results:
|
||||
fmt.line('let inst = self.insert_inst(data);')
|
||||
fmt.line('self.dfg.make_inst_results(inst, ctrl_typevar);')
|
||||
fmt.line('inst')
|
||||
else:
|
||||
fmt.line('self.insert_inst(data)')
|
||||
|
||||
|
||||
def gen_member_inits(iform, fmt):
|
||||
"""
|
||||
Emit member initializers for an `iform` instruction.
|
||||
"""
|
||||
|
||||
# Values first.
|
||||
if len(iform.value_operands) == 1:
|
||||
fmt.line('arg: op{},'.format(iform.value_operands[0]))
|
||||
elif len(iform.value_operands) > 1:
|
||||
fmt.line('args: [{}],'.format(
|
||||
', '.join('op{}'.format(i) for i in iform.value_operands)))
|
||||
|
||||
# Immediates and entity references.
|
||||
for idx, member in enumerate(iform.members):
|
||||
if member:
|
||||
fmt.line('{}: op{},'.format(member, idx))
|
||||
|
||||
|
||||
def gen_inst_builder(inst, fmt):
|
||||
"""
|
||||
Emit a method for generating the instruction `inst`.
|
||||
|
||||
The method will create and insert an instruction, then return the result
|
||||
values, or the instruction reference itself for instructions that don't
|
||||
have results.
|
||||
"""
|
||||
|
||||
# Construct method arguments.
|
||||
args = ['&mut self']
|
||||
|
||||
# The controlling type variable will be inferred from the input values if
|
||||
# possible. Otherwise, it is the first method argument.
|
||||
if inst.is_polymorphic and not inst.use_typevar_operand:
|
||||
args.append('{}: Type'.format(inst.ctrl_typevar.name))
|
||||
|
||||
tmpl_types = list()
|
||||
into_args = list()
|
||||
for op in inst.ins:
|
||||
if isinstance(op.kind, cretonne.ImmediateKind):
|
||||
t = 'T{}{}'.format(1 + len(tmpl_types), op.kind.name)
|
||||
tmpl_types.append('{}: Into<{}>'.format(t, op.kind.rust_type))
|
||||
into_args.append(op.name)
|
||||
else:
|
||||
t = op.kind.rust_type
|
||||
args.append('{}: {}'.format(op.name, t))
|
||||
|
||||
# Return the inst reference for result-less instructions.
|
||||
if len(inst.value_results) == 0:
|
||||
rtype = 'Inst'
|
||||
elif len(inst.value_results) == 1:
|
||||
rtype = 'Value'
|
||||
else:
|
||||
rvals = ', '.join(len(inst.value_results) * ['Value'])
|
||||
rtype = '({})'.format(rvals)
|
||||
|
||||
method = inst.name
|
||||
if method == 'return':
|
||||
# Avoid Rust keywords
|
||||
method = '_' + method
|
||||
|
||||
if len(tmpl_types) > 0:
|
||||
tmpl = '<{}>'.format(', '.join(tmpl_types))
|
||||
else:
|
||||
tmpl = ''
|
||||
proto = '{}{}({}) -> {}'.format(method, tmpl, ', '.join(args), rtype)
|
||||
|
||||
fmt.line('#[allow(non_snake_case)]')
|
||||
with fmt.indented('pub fn {} {{'.format(proto), '}'):
|
||||
# Convert all of the `Into<>` arguments.
|
||||
for arg in into_args:
|
||||
fmt.line('let {} = {}.into();'.format(arg, arg))
|
||||
|
||||
# Arguments for instruction constructor.
|
||||
args = ['Opcode::' + inst.camel_name]
|
||||
|
||||
if inst.is_polymorphic and not inst.use_typevar_operand:
|
||||
# This was an explicit method argument.
|
||||
args.append(inst.ctrl_typevar.name)
|
||||
elif len(inst.value_results) == 0:
|
||||
args.append('types::VOID')
|
||||
elif inst.is_polymorphic:
|
||||
# Infer the controlling type variable from the input operands.
|
||||
fmt.line(
|
||||
'let ctrl_typevar = self.dfg.value_type({});'
|
||||
.format(inst.ins[inst.format.typevar_operand].name))
|
||||
args.append('ctrl_typevar')
|
||||
else:
|
||||
# This non-polymorphic instruction has a fixed result type.
|
||||
args.append(
|
||||
'types::' +
|
||||
inst.outs[inst.value_results[0]].typ.name.upper())
|
||||
|
||||
args.extend(op.name for op in inst.ins)
|
||||
args = ', '.join(args)
|
||||
fmt.line('let inst = self.{}({});'.format(inst.format.name, args))
|
||||
|
||||
if len(inst.value_results) == 0:
|
||||
fmt.line('inst')
|
||||
elif len(inst.value_results) == 1:
|
||||
fmt.line('self.dfg.first_result(inst)')
|
||||
else:
|
||||
fmt.line('let mut results = self.dfg.inst_results(inst);')
|
||||
fmt.line('({})'.format(', '.join(
|
||||
len(inst.value_results) * ['results.next().unwrap()'])))
|
||||
|
||||
|
||||
def gen_builder(insts, fmt):
|
||||
"""
|
||||
Generate a Builder trait with methods for all instructions.
|
||||
"""
|
||||
fmt.doc_comment(
|
||||
'Methods for inserting instructions by instruction format.')
|
||||
with fmt.indented("impl<'a> Builder<'a> {", '}'):
|
||||
for f in cretonne.InstructionFormat.all_formats:
|
||||
gen_format_constructor(f, fmt)
|
||||
|
||||
fmt.doc_comment('Methods for inserting instructions by opcode.')
|
||||
with fmt.indented("impl<'a> Builder<'a> {", '}'):
|
||||
for inst in insts:
|
||||
gen_inst_builder(inst, fmt)
|
||||
|
||||
|
||||
def generate(isas, out_dir):
|
||||
groups = collect_instr_groups(isas)
|
||||
|
||||
# opcodes.rs
|
||||
fmt = srcgen.Formatter()
|
||||
gen_formats(fmt)
|
||||
gen_instruction_data_impl(fmt)
|
||||
instrs = gen_opcodes(groups, fmt)
|
||||
gen_type_constraints(fmt, instrs)
|
||||
fmt.update_file('opcodes.rs', out_dir)
|
||||
|
||||
# builder.rs
|
||||
fmt = srcgen.Formatter()
|
||||
gen_builder(instrs, fmt)
|
||||
fmt.update_file('builder.rs', out_dir)
|
||||
262
lib/cretonne/meta/gen_settings.py
Normal file
262
lib/cretonne/meta/gen_settings.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""
|
||||
Generate sources with settings.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import srcgen
|
||||
from unique_table import UniqueSeqTable
|
||||
import constant_hash
|
||||
from cretonne import camel_case, BoolSetting, NumSetting, EnumSetting, settings
|
||||
|
||||
|
||||
def gen_enum_types(sgrp, fmt):
|
||||
"""
|
||||
Emit enum types for any enum settings.
|
||||
"""
|
||||
for setting in sgrp.settings:
|
||||
if not isinstance(setting, EnumSetting):
|
||||
continue
|
||||
ty = camel_case(setting.name)
|
||||
fmt.line('#[derive(Debug, PartialEq, Eq)]')
|
||||
fmt.line(
|
||||
'pub enum {} {{ {} }}'
|
||||
.format(ty, ", ".join(camel_case(v) for v in setting.values)))
|
||||
|
||||
|
||||
def gen_getter(setting, sgrp, fmt):
|
||||
"""
|
||||
Emit a getter function for `setting`.
|
||||
"""
|
||||
fmt.doc_comment(setting.__doc__)
|
||||
|
||||
if isinstance(setting, BoolSetting):
|
||||
proto = 'pub fn {}(&self) -> bool'.format(setting.name)
|
||||
with fmt.indented(proto + ' {', '}'):
|
||||
fmt.line(
|
||||
'self.numbered_predicate({})'
|
||||
.format(sgrp.predicate_number[setting]))
|
||||
elif isinstance(setting, NumSetting):
|
||||
proto = 'pub fn {}(&self) -> u8'.format(setting.name)
|
||||
with fmt.indented(proto + ' {', '}'):
|
||||
fmt.line('self.bytes[{}]'.format(setting.byte_offset))
|
||||
elif isinstance(setting, EnumSetting):
|
||||
ty = camel_case(setting.name)
|
||||
proto = 'pub fn {}(&self) -> {}'.format(setting.name, ty)
|
||||
with fmt.indented(proto + ' {', '}'):
|
||||
with fmt.indented(
|
||||
'match self.bytes[{}] {{'
|
||||
.format(setting.byte_offset), '}'):
|
||||
for i, v in enumerate(setting.values):
|
||||
fmt.line('{} => {}::{},'.format(i, ty, camel_case(v)))
|
||||
fmt.line('_ => panic!("Invalid enum value")')
|
||||
else:
|
||||
raise AssertionError("Unknown setting kind")
|
||||
|
||||
|
||||
def gen_pred_getter(pred, sgrp, fmt):
|
||||
"""
|
||||
Emit a getter for a pre-computed predicate.
|
||||
"""
|
||||
fmt.doc_comment('Computed predicate `{}`.'.format(pred.rust_predicate(0)))
|
||||
proto = 'pub fn {}(&self) -> bool'.format(pred.name)
|
||||
with fmt.indented(proto + ' {', '}'):
|
||||
fmt.line(
|
||||
'self.numbered_predicate({})'
|
||||
.format(sgrp.predicate_number[pred]))
|
||||
|
||||
|
||||
def gen_getters(sgrp, fmt):
|
||||
"""
|
||||
Emit getter functions for all the settings in fmt.
|
||||
"""
|
||||
fmt.doc_comment("User-defined settings.")
|
||||
with fmt.indented('impl Flags {', '}'):
|
||||
# Dynamic numbered predicate getter.
|
||||
with fmt.indented(
|
||||
'pub fn numbered_predicate(&self, p: usize) -> bool {', '}'):
|
||||
fmt.line(
|
||||
'self.bytes[{} + p/8] & (1 << (p%8)) != 0'
|
||||
.format(sgrp.boolean_offset))
|
||||
for setting in sgrp.settings:
|
||||
gen_getter(setting, sgrp, fmt)
|
||||
for pred in sgrp.named_predicates:
|
||||
gen_pred_getter(pred, sgrp, fmt)
|
||||
|
||||
|
||||
def gen_descriptors(sgrp, fmt):
|
||||
"""
|
||||
Generate the DESCRIPTORS and ENUMERATORS tables.
|
||||
"""
|
||||
|
||||
enums = UniqueSeqTable()
|
||||
|
||||
with fmt.indented(
|
||||
'static DESCRIPTORS: [detail::Descriptor; {}] = ['
|
||||
.format(len(sgrp.settings)),
|
||||
'];'):
|
||||
for idx, setting in enumerate(sgrp.settings):
|
||||
setting.descriptor_index = idx
|
||||
with fmt.indented('detail::Descriptor {', '},'):
|
||||
fmt.line('name: "{}",'.format(setting.name))
|
||||
fmt.line('offset: {},'.format(setting.byte_offset))
|
||||
if isinstance(setting, BoolSetting):
|
||||
fmt.line(
|
||||
'detail: detail::Detail::Bool {{ bit: {} }},'
|
||||
.format(setting.bit_offset))
|
||||
elif isinstance(setting, NumSetting):
|
||||
fmt.line('detail: detail::Detail::Num,')
|
||||
elif isinstance(setting, EnumSetting):
|
||||
offs = enums.add(setting.values)
|
||||
fmt.line(
|
||||
'detail: detail::Detail::Enum ' +
|
||||
'{{ last: {}, enumerators: {} }},'
|
||||
.format(len(setting.values)-1, offs))
|
||||
else:
|
||||
raise AssertionError("Unknown setting kind")
|
||||
|
||||
with fmt.indented(
|
||||
'static ENUMERATORS: [&\'static str; {}] = ['
|
||||
.format(len(enums.table)),
|
||||
'];'):
|
||||
for txt in enums.table:
|
||||
fmt.line('"{}",'.format(txt))
|
||||
|
||||
def hash_setting(s):
|
||||
return constant_hash.simple_hash(s.name)
|
||||
|
||||
hash_table = constant_hash.compute_quadratic(sgrp.settings, hash_setting)
|
||||
with fmt.indented(
|
||||
'static HASH_TABLE: [u16; {}] = ['
|
||||
.format(len(hash_table)),
|
||||
'];'):
|
||||
for h in hash_table:
|
||||
if h is None:
|
||||
fmt.line('0xffff,')
|
||||
else:
|
||||
fmt.line('{},'.format(h.descriptor_index))
|
||||
|
||||
|
||||
def gen_template(sgrp, fmt):
|
||||
"""
|
||||
Emit a Template constant.
|
||||
"""
|
||||
v = [0] * sgrp.settings_size
|
||||
for setting in sgrp.settings:
|
||||
v[setting.byte_offset] |= setting.default_byte()
|
||||
|
||||
with fmt.indented(
|
||||
'static TEMPLATE: detail::Template = detail::Template {', '};'):
|
||||
fmt.line('name: "{}",'.format(sgrp.name))
|
||||
fmt.line('descriptors: &DESCRIPTORS,')
|
||||
fmt.line('enumerators: &ENUMERATORS,')
|
||||
fmt.line('hash_table: &HASH_TABLE,')
|
||||
vs = ', '.join('{:#04x}'.format(x) for x in v)
|
||||
fmt.line('defaults: &[ {} ],'.format(vs))
|
||||
|
||||
fmt.doc_comment(
|
||||
'Create a `settings::Builder` for the {} settings group.'
|
||||
.format(sgrp.name))
|
||||
with fmt.indented('pub fn builder() -> Builder {', '}'):
|
||||
fmt.line('Builder::new(&TEMPLATE)')
|
||||
|
||||
|
||||
def gen_display(sgrp, fmt):
|
||||
"""
|
||||
Generate the Display impl for Flags.
|
||||
"""
|
||||
with fmt.indented('impl fmt::Display for Flags {', '}'):
|
||||
with fmt.indented(
|
||||
'fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {',
|
||||
'}'):
|
||||
fmt.line('try!(writeln!(f, "[{}]"));'.format(sgrp.name))
|
||||
with fmt.indented('for d in &DESCRIPTORS {', '}'):
|
||||
fmt.line('try!(write!(f, "{} = ", d.name));')
|
||||
fmt.line(
|
||||
'try!(TEMPLATE.format_toml_value(d.detail,' +
|
||||
'self.bytes[d.offset as usize], f));')
|
||||
fmt.line('try!(writeln!(f, ""));')
|
||||
fmt.line('Ok(())')
|
||||
|
||||
|
||||
def gen_constructor(sgrp, parent, fmt):
|
||||
"""
|
||||
Generate a Flags constructor.
|
||||
"""
|
||||
|
||||
with fmt.indented('impl Flags {', '}'):
|
||||
args = 'builder: &Builder'
|
||||
if sgrp.parent:
|
||||
p = sgrp.parent
|
||||
args = '{}: &{}::Flags, {}'.format(p.name, p.qual_mod, args)
|
||||
with fmt.indented(
|
||||
'pub fn new({}) -> Flags {{'.format(args), '}'):
|
||||
fmt.line('let bvec = builder.state_for("{}");'.format(sgrp.name))
|
||||
fmt.line('let mut bytes = [0; {}];'.format(sgrp.byte_size()))
|
||||
fmt.line('assert_eq!(bvec.len(), {});'.format(sgrp.settings_size))
|
||||
with fmt.indented(
|
||||
'for (i, b) in bvec.iter().enumerate() {', '}'):
|
||||
fmt.line('bytes[i] = *b;')
|
||||
|
||||
# Stop here without predicates.
|
||||
if len(sgrp.predicate_number) == sgrp.boolean_settings:
|
||||
fmt.line('Flags { bytes: bytes }')
|
||||
return
|
||||
|
||||
# Now compute the predicates.
|
||||
fmt.line(
|
||||
'let mut {} = Flags {{ bytes: bytes }};'
|
||||
.format(sgrp.name))
|
||||
|
||||
for pred, number in sgrp.predicate_number.items():
|
||||
# Don't compute our own settings.
|
||||
if number < sgrp.boolean_settings:
|
||||
continue
|
||||
if pred.name:
|
||||
fmt.comment(
|
||||
'Precompute #{} ({}).'.format(number, pred.name))
|
||||
else:
|
||||
fmt.comment('Precompute #{}.'.format(number))
|
||||
with fmt.indented(
|
||||
'if {} {{'.format(pred.rust_predicate(0)),
|
||||
'}'):
|
||||
fmt.line(
|
||||
'{}.bytes[{}] |= 1 << {};'
|
||||
.format(
|
||||
sgrp.name,
|
||||
sgrp.boolean_offset + number // 8,
|
||||
number % 8))
|
||||
|
||||
fmt.line(sgrp.name)
|
||||
|
||||
|
||||
def gen_group(sgrp, fmt):
|
||||
"""
|
||||
Generate a Flags struct representing `sgrp`.
|
||||
"""
|
||||
fmt.line('#[derive(Clone)]')
|
||||
fmt.doc_comment('Flags group `{}`.'.format(sgrp.name))
|
||||
with fmt.indented('pub struct Flags {', '}'):
|
||||
fmt.line('bytes: [u8; {}],'.format(sgrp.byte_size()))
|
||||
|
||||
gen_constructor(sgrp, None, fmt)
|
||||
gen_enum_types(sgrp, fmt)
|
||||
gen_getters(sgrp, fmt)
|
||||
gen_descriptors(sgrp, fmt)
|
||||
gen_template(sgrp, fmt)
|
||||
gen_display(sgrp, fmt)
|
||||
|
||||
|
||||
def generate(isas, out_dir):
|
||||
# Generate shared settings.
|
||||
fmt = srcgen.Formatter()
|
||||
settings.group.qual_mod = 'settings'
|
||||
gen_group(settings.group, fmt)
|
||||
fmt.update_file('settings.rs', out_dir)
|
||||
|
||||
# Generate ISA-specific settings.
|
||||
for isa in isas:
|
||||
if isa.settings:
|
||||
isa.settings.qual_mod = 'isa::{}::settings'.format(
|
||||
isa.settings.name)
|
||||
fmt = srcgen.Formatter()
|
||||
gen_group(isa.settings, fmt)
|
||||
fmt.update_file('settings-{}.rs'.format(isa.name), out_dir)
|
||||
51
lib/cretonne/meta/gen_types.py
Normal file
51
lib/cretonne/meta/gen_types.py
Normal file
@@ -0,0 +1,51 @@
|
||||
"""
|
||||
Generate sources with type info.
|
||||
|
||||
This generates a `types.rs` file which is included in
|
||||
`libcretonne/ir/types/rs`. The file provides constant definitions for the most
|
||||
commonly used types, including all of the scalar types.
|
||||
|
||||
This ensures that Python and Rust use the same type numbering.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import srcgen
|
||||
from cretonne import ValueType
|
||||
|
||||
|
||||
def emit_type(ty, fmt):
|
||||
"""
|
||||
Emit a constant definition of a single value type.
|
||||
"""
|
||||
name = ty.name.upper()
|
||||
fmt.doc_comment(ty.__doc__)
|
||||
fmt.line(
|
||||
'pub const {}: Type = Type({:#x});'
|
||||
.format(name, ty.number))
|
||||
|
||||
|
||||
def emit_vectors(bits, fmt):
|
||||
"""
|
||||
Emit definition for all vector types with `bits` total size.
|
||||
"""
|
||||
size = bits // 8
|
||||
for ty in ValueType.all_scalars:
|
||||
mb = ty.membytes
|
||||
if mb == 0 or mb >= size:
|
||||
continue
|
||||
emit_type(ty.by(size // mb), fmt)
|
||||
|
||||
|
||||
def emit_types(fmt):
|
||||
for ty in ValueType.all_scalars:
|
||||
emit_type(ty, fmt)
|
||||
# Emit vector definitions for common SIMD sizes.
|
||||
emit_vectors(64, fmt)
|
||||
emit_vectors(128, fmt)
|
||||
emit_vectors(256, fmt)
|
||||
emit_vectors(512, fmt)
|
||||
|
||||
|
||||
def generate(out_dir):
|
||||
fmt = srcgen.Formatter()
|
||||
emit_types(fmt)
|
||||
fmt.update_file('types.rs', out_dir)
|
||||
17
lib/cretonne/meta/isa/__init__.py
Normal file
17
lib/cretonne/meta/isa/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""
|
||||
Cretonne target ISA definitions
|
||||
-------------------------------
|
||||
|
||||
The :py:mod:`isa` package contains sub-packages for each target instruction set
|
||||
architecture supported by Cretonne.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import riscv
|
||||
|
||||
|
||||
def all_isas():
|
||||
"""
|
||||
Get a list of all the supported target ISAs. Each target ISA is represented
|
||||
as a :py:class:`cretonne.TargetISA` instance.
|
||||
"""
|
||||
return [riscv.isa]
|
||||
32
lib/cretonne/meta/isa/riscv/__init__.py
Normal file
32
lib/cretonne/meta/isa/riscv/__init__.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
RISC-V Target
|
||||
-------------
|
||||
|
||||
`RISC-V <http://riscv.org/>`_ is an open instruction set architecture
|
||||
originally developed at UC Berkeley. It is a RISC-style ISA with either a
|
||||
32-bit (RV32I) or 64-bit (RV32I) base instruction set and a number of optional
|
||||
extensions:
|
||||
|
||||
RV32M / RV64M
|
||||
Integer multiplication and division.
|
||||
|
||||
RV32A / RV64A
|
||||
Atomics.
|
||||
|
||||
RV32F / RV64F
|
||||
Single-precision IEEE floating point.
|
||||
|
||||
RV32D / RV64D
|
||||
Double-precision IEEE floating point.
|
||||
|
||||
RV32G / RV64G
|
||||
General purpose instruction sets. This represents the union of the I, M, A,
|
||||
F, and D instruction sets listed above.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from . import defs
|
||||
from . import encodings, settings # noqa
|
||||
|
||||
# Re-export the primary target ISA definition.
|
||||
isa = defs.isa.finish()
|
||||
14
lib/cretonne/meta/isa/riscv/defs.py
Normal file
14
lib/cretonne/meta/isa/riscv/defs.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
RISC-V definitions.
|
||||
|
||||
Commonly used definitions.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from cretonne import TargetISA, CPUMode
|
||||
import cretonne.base
|
||||
|
||||
isa = TargetISA('riscv', [cretonne.base.instructions])
|
||||
|
||||
# CPU modes for 32-bit and 64-bit operation.
|
||||
RV32 = CPUMode('RV32', isa)
|
||||
RV64 = CPUMode('RV64', isa)
|
||||
54
lib/cretonne/meta/isa/riscv/encodings.py
Normal file
54
lib/cretonne/meta/isa/riscv/encodings.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
RISC-V Encodings.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from cretonne import base
|
||||
from .defs import RV32, RV64
|
||||
from .recipes import OPIMM, OPIMM32, OP, OP32, R, Rshamt, I
|
||||
from .settings import use_m
|
||||
|
||||
# Basic arithmetic binary instructions are encoded in an R-type instruction.
|
||||
for inst, inst_imm, f3, f7 in [
|
||||
(base.iadd, base.iadd_imm, 0b000, 0b0000000),
|
||||
(base.isub, None, 0b000, 0b0100000),
|
||||
(base.bxor, base.bxor_imm, 0b100, 0b0000000),
|
||||
(base.bor, base.bor_imm, 0b110, 0b0000000),
|
||||
(base.band, base.band_imm, 0b111, 0b0000000)
|
||||
]:
|
||||
RV32.enc(inst.i32, R, OP(f3, f7))
|
||||
RV64.enc(inst.i64, R, OP(f3, f7))
|
||||
|
||||
# Immediate versions for add/xor/or/and.
|
||||
if inst_imm:
|
||||
RV32.enc(inst_imm.i32, I, OPIMM(f3))
|
||||
RV64.enc(inst_imm.i64, I, OPIMM(f3))
|
||||
|
||||
# 32-bit ops in RV64.
|
||||
RV64.enc(base.iadd.i32, R, OP32(0b000, 0b0000000))
|
||||
RV64.enc(base.isub.i32, R, OP32(0b000, 0b0100000))
|
||||
# There are no andiw/oriw/xoriw variations.
|
||||
RV64.enc(base.iadd_imm.i32, I, OPIMM32(0b000))
|
||||
|
||||
# Dynamic shifts have the same masking semantics as the cton base instructions.
|
||||
for inst, inst_imm, f3, f7 in [
|
||||
(base.ishl, base.ishl_imm, 0b001, 0b0000000),
|
||||
(base.ushr, base.ushr_imm, 0b101, 0b0000000),
|
||||
(base.sshr, base.sshr_imm, 0b101, 0b0100000),
|
||||
]:
|
||||
RV32.enc(inst.i32.i32, R, OP(f3, f7))
|
||||
RV64.enc(inst.i64.i64, R, OP(f3, f7))
|
||||
RV64.enc(inst.i32.i32, R, OP32(f3, f7))
|
||||
# Allow i32 shift amounts in 64-bit shifts.
|
||||
RV64.enc(inst.i64.i32, R, OP(f3, f7))
|
||||
RV64.enc(inst.i32.i64, R, OP32(f3, f7))
|
||||
|
||||
# Immediate shifts.
|
||||
RV32.enc(inst_imm.i32, Rshamt, OPIMM(f3, f7))
|
||||
RV64.enc(inst_imm.i64, Rshamt, OPIMM(f3, f7))
|
||||
RV64.enc(inst_imm.i32, Rshamt, OPIMM32(f3, f7))
|
||||
|
||||
# "M" Standard Extension for Integer Multiplication and Division.
|
||||
# Gated by the `use_m` flag.
|
||||
RV32.enc(base.imul.i32, R, OP(0b000, 0b0000001), isap=use_m)
|
||||
RV64.enc(base.imul.i64, R, OP(0b000, 0b0000001), isap=use_m)
|
||||
RV64.enc(base.imul.i32, R, OP32(0b000, 0b0000001), isap=use_m)
|
||||
68
lib/cretonne/meta/isa/riscv/recipes.py
Normal file
68
lib/cretonne/meta/isa/riscv/recipes.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
RISC-V Encoding recipes.
|
||||
|
||||
The encoding recipes defined here more or less correspond to the RISC-V native
|
||||
instruction formats described in the reference:
|
||||
|
||||
The RISC-V Instruction Set Manual
|
||||
Volume I: User-Level ISA
|
||||
Version 2.1
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from cretonne import EncRecipe
|
||||
from cretonne.formats import Binary, BinaryImm
|
||||
from cretonne.predicates import IsSignedInt
|
||||
|
||||
# The low 7 bits of a RISC-V instruction is the base opcode. All 32-bit
|
||||
# instructions have 11 as the two low bits, with bits 6:2 determining the base
|
||||
# opcode.
|
||||
#
|
||||
# Encbits for the 32-bit recipes are opcode[6:2] | (funct3 << 5) | ...
|
||||
# The functions below encode the encbits.
|
||||
|
||||
|
||||
def LOAD(funct3):
|
||||
assert funct3 <= 0b111
|
||||
return 0b00000 | (funct3 << 5)
|
||||
|
||||
|
||||
def STORE(funct3):
|
||||
assert funct3 <= 0b111
|
||||
return 0b01000 | (funct3 << 5)
|
||||
|
||||
|
||||
def BRANCH(funct3):
|
||||
assert funct3 <= 0b111
|
||||
return 0b11000 | (funct3 << 5)
|
||||
|
||||
|
||||
def OPIMM(funct3, funct7=0):
|
||||
assert funct3 <= 0b111
|
||||
return 0b00100 | (funct3 << 5) | (funct7 << 8)
|
||||
|
||||
|
||||
def OPIMM32(funct3, funct7=0):
|
||||
assert funct3 <= 0b111
|
||||
return 0b00110 | (funct3 << 5) | (funct7 << 8)
|
||||
|
||||
|
||||
def OP(funct3, funct7):
|
||||
assert funct3 <= 0b111
|
||||
assert funct7 <= 0b1111111
|
||||
return 0b01100 | (funct3 << 5) | (funct7 << 8)
|
||||
|
||||
|
||||
def OP32(funct3, funct7):
|
||||
assert funct3 <= 0b111
|
||||
assert funct7 <= 0b1111111
|
||||
return 0b01110 | (funct3 << 5) | (funct7 << 8)
|
||||
|
||||
|
||||
# R-type 32-bit instructions: These are mostly binary arithmetic instructions.
|
||||
# The encbits are `opcode[6:2] | (funct3 << 5) | (funct7 << 8)
|
||||
R = EncRecipe('R', Binary)
|
||||
|
||||
# R-type with an immediate shift amount instead of rs2.
|
||||
Rshamt = EncRecipe('Rshamt', BinaryImm)
|
||||
|
||||
I = EncRecipe('I', BinaryImm, instp=IsSignedInt(BinaryImm.imm, 12))
|
||||
28
lib/cretonne/meta/isa/riscv/settings.py
Normal file
28
lib/cretonne/meta/isa/riscv/settings.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
RISC-V settings.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from cretonne import SettingGroup, BoolSetting
|
||||
from cretonne.predicates import And
|
||||
import cretonne.settings as shared
|
||||
from .defs import isa
|
||||
|
||||
isa.settings = SettingGroup('riscv', parent=shared.group)
|
||||
|
||||
supports_m = BoolSetting("CPU supports the 'M' extension (mul/div)")
|
||||
supports_a = BoolSetting("CPU supports the 'A' extension (atomics)")
|
||||
supports_f = BoolSetting("CPU supports the 'F' extension (float)")
|
||||
supports_d = BoolSetting("CPU supports the 'D' extension (double)")
|
||||
|
||||
enable_m = BoolSetting(
|
||||
"Enable the use of 'M' instructions if available",
|
||||
default=True)
|
||||
|
||||
use_m = And(supports_m, enable_m)
|
||||
use_a = And(supports_a, shared.enable_atomics)
|
||||
use_f = And(supports_f, shared.enable_float)
|
||||
use_d = And(supports_d, shared.enable_float)
|
||||
|
||||
full_float = And(shared.enable_simd, supports_f, supports_d)
|
||||
|
||||
isa.settings.close(globals())
|
||||
123
lib/cretonne/meta/srcgen.py
Normal file
123
lib/cretonne/meta/srcgen.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""
|
||||
Source code generator.
|
||||
|
||||
The `srcgen` module contains generic helper routines and classes for generating
|
||||
source code.
|
||||
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
class Formatter(object):
|
||||
"""
|
||||
Source code formatter class.
|
||||
|
||||
- Collect source code to be written to a file.
|
||||
- Keep track of indentation.
|
||||
|
||||
Indentation example:
|
||||
|
||||
>>> f = Formatter()
|
||||
>>> f.line('Hello line 1')
|
||||
>>> f.writelines()
|
||||
Hello line 1
|
||||
>>> f.indent_push()
|
||||
>>> f.comment('Nested comment')
|
||||
>>> f.indent_pop()
|
||||
>>> f.format('Back {} again', 'home')
|
||||
>>> f.writelines()
|
||||
Hello line 1
|
||||
// Nested comment
|
||||
Back home again
|
||||
|
||||
"""
|
||||
|
||||
shiftwidth = 4
|
||||
|
||||
def __init__(self):
|
||||
self.indent = ''
|
||||
self.lines = []
|
||||
|
||||
def indent_push(self):
|
||||
"""Increase current indentation level by one."""
|
||||
self.indent += ' ' * self.shiftwidth
|
||||
|
||||
def indent_pop(self):
|
||||
"""Decrease indentation by one level."""
|
||||
assert self.indent != '', 'Already at top level indentation'
|
||||
self.indent = self.indent[0:-self.shiftwidth]
|
||||
|
||||
def line(self, s=None):
|
||||
"""Add an indented line."""
|
||||
if s:
|
||||
self.lines.append('{}{}\n'.format(self.indent, s))
|
||||
else:
|
||||
self.lines.append('\n')
|
||||
|
||||
def outdented_line(self, s):
|
||||
"""
|
||||
Emit a line outdented one level.
|
||||
|
||||
This is used for '} else {' and similar things inside a single indented
|
||||
block.
|
||||
"""
|
||||
self.lines.append('{}{}\n'.format(self.indent[0:-self.shiftwidth], s))
|
||||
|
||||
def writelines(self, f=None):
|
||||
"""Write all lines to `f`."""
|
||||
if not f:
|
||||
f = sys.stdout
|
||||
f.writelines(self.lines)
|
||||
|
||||
def update_file(self, filename, directory):
|
||||
if directory is not None:
|
||||
filename = os.path.join(directory, filename)
|
||||
with open(filename, 'w') as f:
|
||||
self.writelines(f)
|
||||
|
||||
class _IndentedScope(object):
|
||||
def __init__(self, fmt, after):
|
||||
self.fmt = fmt
|
||||
self.after = after
|
||||
|
||||
def __enter__(self):
|
||||
self.fmt.indent_push()
|
||||
|
||||
def __exit__(self, t, v, tb):
|
||||
self.fmt.indent_pop()
|
||||
if self.after:
|
||||
self.fmt.line(self.after)
|
||||
|
||||
def indented(self, before=None, after=None):
|
||||
"""
|
||||
Return a scope object for use with a `with` statement:
|
||||
|
||||
>>> f = Formatter()
|
||||
>>> with f.indented('prefix {', '} suffix'):
|
||||
... f.line('hello')
|
||||
>>> f.writelines()
|
||||
prefix {
|
||||
hello
|
||||
} suffix
|
||||
|
||||
The optional `before` and `after` parameters are surrounding lines
|
||||
which are *not* indented.
|
||||
"""
|
||||
if before:
|
||||
self.line(before)
|
||||
return self._IndentedScope(self, after)
|
||||
|
||||
def format(self, fmt, *args):
|
||||
self.line(fmt.format(*args))
|
||||
|
||||
def comment(self, s):
|
||||
"""Add a comment line."""
|
||||
self.line('// ' + s)
|
||||
|
||||
def doc_comment(self, s):
|
||||
"""Add a (multi-line) documentation comment."""
|
||||
s = re.sub('^', self.indent + '/// ', s, flags=re.M) + '\n'
|
||||
self.lines.append(s)
|
||||
8
lib/cretonne/meta/test_constant_hash.py
Normal file
8
lib/cretonne/meta/test_constant_hash.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from __future__ import absolute_import
|
||||
import doctest
|
||||
import constant_hash
|
||||
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
tests.addTests(doctest.DocTestSuite(constant_hash))
|
||||
return tests
|
||||
8
lib/cretonne/meta/test_srcgen.py
Normal file
8
lib/cretonne/meta/test_srcgen.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from __future__ import absolute_import
|
||||
import doctest
|
||||
import srcgen
|
||||
|
||||
|
||||
def load_tests(loader, tests, ignore):
|
||||
tests.addTests(doctest.DocTestSuite(srcgen))
|
||||
return tests
|
||||
68
lib/cretonne/meta/unique_table.py
Normal file
68
lib/cretonne/meta/unique_table.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
Generate a table of unique items.
|
||||
|
||||
The `UniqueTable` class collects items into an array, removing duplicates. Each
|
||||
item is mapped to its offset in the final array.
|
||||
|
||||
This is a compression technique for compile-time generated tables.
|
||||
"""
|
||||
|
||||
|
||||
class UniqueTable:
|
||||
"""
|
||||
Collect items into the `table` list, removing duplicates.
|
||||
"""
|
||||
def __init__(self):
|
||||
# List of items added in order.
|
||||
self.table = list()
|
||||
# Map item -> index.
|
||||
self.index = dict()
|
||||
|
||||
def add(self, item):
|
||||
"""
|
||||
Add a single item to the table if it isn't already there.
|
||||
|
||||
Return the offset into `self.table` of the item.
|
||||
"""
|
||||
if item in self.index:
|
||||
return self.index[item]
|
||||
|
||||
idx = len(self.table)
|
||||
self.index[item] = idx
|
||||
self.table.append(item)
|
||||
return idx
|
||||
|
||||
|
||||
class UniqueSeqTable:
|
||||
"""
|
||||
Collect sequences into the `table` list, removing duplicates.
|
||||
|
||||
Sequences don't have to be of the same length.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.table = list()
|
||||
# Map seq -> index.
|
||||
self.index = dict()
|
||||
|
||||
def add(self, seq):
|
||||
"""
|
||||
Add a sequence of items to the table. If the table already contains the
|
||||
items in `seq` in the same order, use those instead.
|
||||
|
||||
Return the offset into `self.table` of the beginning of `seq`.
|
||||
"""
|
||||
if len(seq) == 0:
|
||||
return 0
|
||||
seq = tuple(seq)
|
||||
if seq in self.index:
|
||||
return self.index[seq]
|
||||
|
||||
idx = len(self.table)
|
||||
self.table.extend(seq)
|
||||
|
||||
# Add seq and all sub-sequences to `index`.
|
||||
for length in range(1, len(seq) + 1):
|
||||
for offset in range(len(seq) - length + 1):
|
||||
self.index[seq[offset:offset+length]] = idx + offset
|
||||
|
||||
return idx
|
||||
Reference in New Issue
Block a user