Layer h_quantize: FixedPointQuantizer is not supported by hls4ml.
Closed this issue · 2 comments
AnouarITI commented
summary
I downloaded the latest version of hls4ml, v0.8.1. Then, I trained a CNN model using the HGQ QAT method. However, when I try to convert it to a proxy, I get an error message that Layer h_quantize: FixedPointQuantizer is not supported by hls4ml.
Code to reproduce
from datetime import datetime
import numpy as np
import tensorflow as tf
import tensorboard
print(tf.__version__)
print(tensorboard.__version__)
from tensorflow import keras
from keras.models import Model
from keras.optimizers import Adam
from keras.losses import SparseCategoricalCrossentropy
from HGQ.layers import HQuantize, HDense, HActivation, PMaxPooling2D, HConv2DBatchNorm, PFlatten, PAveragePooling2D
beta = 3e-8
model = keras.Sequential([
HQuantize(beta=beta),
HConv2DBatchNorm(32, (5,5), activation='relu', beta=beta),
PMaxPooling2D(2, strides = (2,2)),
HConv2DBatchNorm(32, (5,5), activation='relu', beta=beta),
PMaxPooling2D(2, strides = (2,2)),
HConv2DBatchNorm(16, (3,3), activation='relu', beta=beta),
PAveragePooling2D(2, strides = (4,4)),
PFlatten(),
HDense(64, activation='relu', beta=beta),
HDense(32, activation='relu', beta=beta),
HDense(7, beta=beta),
])
from keras.models import load_model
model: keras.Model = load_model('HGQ_CNN_3e6.h5')
import h5py as h5
f_test = h5.File('../test_set.h5','r+')
X_test = f_test['X'][:]
Y_test = f_test['Y'][:]
X_test = X_test.reshape(X_test.shape[0], 32, 32, 2)
score = model.evaluate(X_test, np.argmax(Y_test, axis=1), verbose=0, batch_size=4096)
print(score[1]*100) #56%
from HGQ import trace_minmax, to_proxy_model
trace_minmax(model, X_test)
proxy = to_proxy_model(model) #here i get the error
saved model
here is the saved model (Attached
HGQ_CNN_3e6.zip)
Error
Cell In[8], line 1
----> 1 proxy = to_proxy_model(model)
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:368, in to_proxy_model(model, aggressive, accum_fp_max_offset, unary_lut_max_table_size)
366 if accum_fp_max_offset is not None and accum_fp_max_offset < 0:
367 warn('You are using a negative value for bias_accum_bits. Please make sure you know what you are doing.')
--> 368 proxy = convert_model(model, layer_xformer=ProxyLayerXFormer('WRAP' if aggressive else 'SAT').__call__)
369 if unary_lut_max_table_size > 0:
370 proxy = convert_model(proxy, layer_xformer=partial(xfr_to_unary_lut, max_table_size=unary_lut_max_table_size))
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:214, in convert_model(model, layer_xformer)
212 if len(inputs) == 1:
213 inputs = inputs[0]
--> 214 outs = apply_layer(model, inputs, layer_xformer=layer_xformer)
215 return keras.Model(inputs, outs)
File /usr/lib/python3.10/functools.py:889, in singledispatch.<locals>.wrapper(*args, **kw)
885 if not args:
886 raise TypeError(f'{funcname} requires at least '
887 '1 positional argument')
--> 889 return dispatch(args[0].__class__)(*args, **kw)
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:192, in _(layer, inp_tensors, namer, layer_xformer)
190 satisfied = {node: tensor for node, tensor in zip(input_nodes, inp_tensors)}
191 get_out_tensor = partial(_get_out_tensor, dependencies_dict, satisfied, namer=namer, layer_xformer=layer_xformer)
--> 192 outs = [get_out_tensor(output_node) for output_node in output_nodes]
193 return outs[0] if len(outs) == 1 else outs
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:192, in <listcomp>(.0)
190 satisfied = {node: tensor for node, tensor in zip(input_nodes, inp_tensors)}
191 get_out_tensor = partial(_get_out_tensor, dependencies_dict, satisfied, namer=namer, layer_xformer=layer_xformer)
--> 192 outs = [get_out_tensor(output_node) for output_node in output_nodes]
193 return outs[0] if len(outs) == 1 else outs
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:125, in _get_out_tensor(dependencies_dict, satisfied, node, namer, layer_xformer)
123 inps.append(satisfied[req])
124 else:
--> 125 inp = _get_out_tensor(dependencies_dict, satisfied, req, namer, layer_xformer)
126 inps.append(inp)
127 if len(inps) == 1:
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:125, in _get_out_tensor(dependencies_dict, satisfied, node, namer, layer_xformer)
123 inps.append(satisfied[req])
124 else:
--> 125 inp = _get_out_tensor(dependencies_dict, satisfied, req, namer, layer_xformer)
126 inps.append(inp)
127 if len(inps) == 1:
[... skipping similar frames: _get_out_tensor at line 125 (7 times)]
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:125, in _get_out_tensor(dependencies_dict, satisfied, node, namer, layer_xformer)
123 inps.append(satisfied[req])
124 else:
--> 125 inp = _get_out_tensor(dependencies_dict, satisfied, req, namer, layer_xformer)
126 inps.append(inp)
127 if len(inps) == 1:
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:130, in _get_out_tensor(dependencies_dict, satisfied, node, namer, layer_xformer)
127 if len(inps) == 1:
128 inps = inps[0]
--> 130 satisfied[node] = apply_layer(layer, inps, namer=namer, layer_xformer=layer_xformer)
131 return satisfied[node]
File /usr/lib/python3.10/functools.py:889, in singledispatch.<locals>.wrapper(*args, **kw)
885 if not args:
886 raise TypeError(f'{funcname} requires at least '
887 '1 positional argument')
--> 889 return dispatch(args[0].__class__)(*args, **kw)
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:157, in apply_layer(layer, inp_tensors, namer, layer_xformer)
155 assert n < 1024, f'layer_transformer does not converge for layer (name: {layer.name if layer is not None else None}).'
156 layer = layer_xf
--> 157 layer_xf = layer_xformer(layer)
158 n += 1
160 if layer_xf is not None:
File /usr/lib/python3.10/functools.py:926, in singledispatchmethod.__get__.<locals>._method(*args, **kwargs)
924 def _method(*args, **kwargs):
925 method = self.dispatcher.dispatch(args[0].__class__)
--> 926 return method.__get__(obj, cls)(*args, **kwargs)
File /usr/local/lib/python3.10/dist-packages/HGQ/proxy/convert.py:286, in ProxyLayerXFormer.__call__(self, layer)
284 return layer
285 cls_name = layer.__class__.__name__
--> 286 assert cls_name in layer_handlers, f'Layer {layer.name}: {cls_name} is not supported by hls4ml.'
287 return layer
AssertionError: Layer h_quantize: FixedPointQuantizer is not supported by hls4ml.
calad0i commented
What's the version of hls4ml are you using? Probably this is because you are using a version before commit 7f2850263f4a2638c0145c29a7381efe4685ba5c
. If you installed hls4ml
from pypi, please install from the github main
branch with pip install 'git+https://github.com/fastmachinelearning/hls4ml'
.
AnouarITI commented
Thank you. It is generating the HLS project now!