Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion fetch-repos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

QONNX_COMMIT="0630ceaee17799096d1750abcfb5bbe0a2877888"
QONNX_COMMIT="0bc01a47ab43383efceeefeec7ab67e9a2ecb871"
FINN_EXP_COMMIT="0724be21111a21f0d81a072fccc1c446e053f851"
BREVITAS_COMMIT="4617f7bd136e96fa21c7f76e3c7e2e37fe563837"
CNPY_COMMIT="8c82362372ce600bbd1cf11d64661ab69d38d7de"
Expand Down
21 changes: 10 additions & 11 deletions notebooks/advanced/2_custom_op.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"To make sure our custom op is available, it needs to be registered. The best practice for this is to create a submodule under `qonnx.custom_op` which includes a `custom_op` dictionary that maps strings (op names) to classes (op implementations). Since we're in a Jupyter notebook we'll just hijack it at runtime like this:"
"To make sure our custom op is available, it needs to be registered."
]
},
{
Expand All @@ -139,15 +139,15 @@
"metadata": {},
"outputs": [],
"source": [
"import qonnx.custom_op.general as general\n",
"general.custom_op[\"MyPythonPowerOp\"] = MyPythonPowerOp"
"from qonnx.custom_op.registry import add_op_to_domain\n",
"add_op_to_domain(\"qonnx.custom_op.general\", MyPythonPowerOp)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see which custom ops are registered under this submodule by looking at the dictionary:"
"We can see which custom ops are registered under this submodule by using a helper function:"
]
},
{
Expand All @@ -156,7 +156,8 @@
"metadata": {},
"outputs": [],
"source": [
"general.custom_op"
"from qonnx.custom_op.registry import get_ops_in_domain\n",
"get_ops_in_domain(\"qonnx.custom_op.general\")"
]
},
{
Expand Down Expand Up @@ -412,7 +413,7 @@
"outputs": [],
"source": [
"# register our new op\n",
"general.custom_op[\"MyMixedPowerOp\"] = MyMixedPowerOp\n",
"add_op_to_domain(\"qonnx.custom_op.general\", MyMixedPowerOp)\n",
"\n",
"# make graph with new op\n",
"mixedop_graph = make_graph(input_shape, 2, op_type = \"MyMixedPowerOp\")\n",
Expand All @@ -432,10 +433,8 @@
"metadata": {},
"outputs": [],
"source": [
"from qonnx.custom_op.registry import getCustomOp\n",
"\n",
"# get FINN wrapper for this node, with all the functionality\n",
"op_inst = getCustomOp(mixedop_graph.model.graph.node[0])\n",
"op_inst = ModelWrapper(mixedop_graph.model).get_customop_wrapper(mixedop_graph.model.graph.node[0])\n",
"print(\"Available functions: \" + str(dir(op_inst)))\n",
"# query some attributes\n",
"print(\"codegen_dir: \" + op_inst.get_nodeattr(\"codegen_dir\"))\n",
Expand Down Expand Up @@ -471,7 +470,7 @@
" # check node type before we do anything\n",
" if node.op_type == \"MyMixedPowerOp\":\n",
" # get FINN wrapper for this node, with all the functions\n",
" op_inst = getCustomOp(node)\n",
" op_inst = self.ref_input_model.get_customop_wrapper(node)\n",
" if not os.path.isdir(op_inst.get_nodeattr(\"codegen_dir\")):\n",
" # call the codegen function we defined\n",
" # this will modify the underlying node by setting attribute\n",
Expand Down Expand Up @@ -512,7 +511,7 @@
"metadata": {},
"outputs": [],
"source": [
"new_op_inst = getCustomOp(mixedop_graph_new.graph.node[0])\n",
"new_op_inst = mixedop_graph_new.get_customop_wrapper(mixedop_graph_new.graph.node[0])\n",
"codegen_dir = new_op_inst.get_nodeattr(\"codegen_dir\")\n",
"print(codegen_dir)"
]
Expand Down
5 changes: 1 addition & 4 deletions src/finn/custom_op/fpgadataflow/channelwise_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,10 +235,7 @@ def execute_node(self, context, graph):
outputs=[outp],
)

opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
model_func = qonnx_make_model(graph_func, **onnx_kwargs)
model_func = qonnx_make_model(graph_func)
idict = {node.input[0]: inp_values, node.input[1]: param_values}
sess = rt.InferenceSession(model_func.SerializeToString())
result = sess.run(None, idict)
Expand Down
3 changes: 1 addition & 2 deletions src/finn/custom_op/fpgadataflow/convolutioninputgenerator.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,7 @@ def execute_node(self, context, graph):
outputs=[outp],
)

opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
opset_imports = [helper.make_opsetid("qonnx.custom_op.general", 1)]
onnx_kwargs = {"opset_imports": opset_imports}
model_im2col = ModelWrapper(qonnx_make_model(graph_im2col, **onnx_kwargs))
model_im2col.set_tensor_datatype(node.input[0], self.get_input_datatype())
Expand Down
5 changes: 1 addition & 4 deletions src/finn/custom_op/fpgadataflow/labelselect.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,10 +152,7 @@ def execute_node(self, context, graph):
outputs=[val_outp, outp],
)

opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
model_topk = qonnx_make_model(graph_topk, **onnx_kwargs)
model_topk = qonnx_make_model(graph_topk)
idict = {node.input[0]: inp_values, "k_inp": [k]}
sess = rt.InferenceSession(model_topk.SerializeToString())
result = sess.run(None, idict)
Expand Down
3 changes: 1 addition & 2 deletions src/finn/custom_op/fpgadataflow/lookup.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,7 @@ def execute_node(self, context, graph):
outputs=[outp],
)

opset_version = 13
opset_imports = [helper.make_opsetid("", opset_version)]
opset_imports = [helper.make_opsetid("", 13)]
onnx_kwargs = {"opset_imports": opset_imports}
model_gather = qonnx_make_model(graph_gather, **onnx_kwargs)
idict = {node.input[0]: inp_values, node.input[1]: data_values}
Expand Down
3 changes: 1 addition & 2 deletions src/finn/custom_op/fpgadataflow/upsampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,7 @@ def execute_node(self, context, graph):
outputs=[outp],
)

opset_version = 13
opset_imports = [helper.make_opsetid("", opset_version)]
opset_imports = [helper.make_opsetid("", 13)]
onnx_kwargs = {"opset_imports": opset_imports}
model_resize = qonnx_make_model(graph_resize, **onnx_kwargs)
idict = {node.input[0]: inp_values, "scales": scales_val}
Expand Down
8 changes: 7 additions & 1 deletion src/finn/transformation/qonnx/fold_quant_weights.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from qonnx.transformation.infer_shapes import InferShapes
from qonnx.transformation.quant_constant_folding import FoldTransposeIntoQuantInit
from qonnx.transformation.remove import remove_node_and_rewire
from qonnx.util.basic import get_preferred_qonnx_opset


class FoldQuantWeights(Transformation):
Expand All @@ -46,6 +47,7 @@ def apply(self, model):
node_ind = 0
graph_modified = False
execution_context = model.make_empty_exec_context()
opset_imports = model.get_opset_imports()
for n in graph.node:
node_ind += 1
if n.op_type == "Quant" or n.op_type == "BipolarQuant":
Expand Down Expand Up @@ -89,7 +91,11 @@ def apply(self, model):
unity_scale = (scale.flatten() == 1.0).all()
# this node has no dynamic inputs, only constant ones -- so we can
# do constant folding.
oxe.execute_node(n, execution_context, graph)
if n.domain in opset_imports:
opset_version = opset_imports[n.domain]
else:
opset_version = get_preferred_qonnx_opset()
oxe.execute_node(n, execution_context, graph, opset_version)
q_node_output = execution_context[node_out]
# Check we can directly constant fold
if unity_scale:
Expand Down
4 changes: 3 additions & 1 deletion src/finn/transformation/streamline/reorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,9 @@ def apply(self, model):
exec_ctx = model.make_empty_exec_context()
exec_ctx[conv_in_name] = conv_in_const
# execute the conv node only
execute_node(conv_node, exec_ctx, model.graph)
opset_imports = model.get_opset_imports()
opset_version = opset_imports[conv_node.domain]
execute_node(conv_node, exec_ctx, model.graph, opset_version)
# retrieve the conv output
Anew = exec_ctx[end_name]

Expand Down