Setting training for the encoder and decoder when converting to TFLite.
Also add selected TF ops to TFLite converter. PiperOrigin-RevId: 557520277
This commit is contained in:
parent
ee217ceb67
commit
9e45e2b6e9
|
@ -231,8 +231,8 @@ class FaceStylizer(object):
|
||||||
|
|
||||||
# Create an end-to-end model by concatenating encoder and decoder
|
# Create an end-to-end model by concatenating encoder and decoder
|
||||||
inputs = tf.keras.Input(shape=(256, 256, 3))
|
inputs = tf.keras.Input(shape=(256, 256, 3))
|
||||||
x = self._encoder(inputs)
|
x = self._encoder(inputs, training=True)
|
||||||
x = self._decoder({'inputs': x + self.w_avg})
|
x = self._decoder({'inputs': x + self.w_avg}, training=True)
|
||||||
x = x['image'][-1]
|
x = x['image'][-1]
|
||||||
# Scale the data range from [-1, 1] to [0, 1] to support running inference
|
# Scale the data range from [-1, 1] to [0, 1] to support running inference
|
||||||
# on both CPU and GPU.
|
# on both CPU and GPU.
|
||||||
|
@ -241,6 +241,10 @@ class FaceStylizer(object):
|
||||||
|
|
||||||
face_stylizer_model_buffer = model_util.convert_to_tflite(
|
face_stylizer_model_buffer = model_util.convert_to_tflite(
|
||||||
model=model,
|
model=model,
|
||||||
|
supported_ops=[
|
||||||
|
tf.lite.OpsSet.TFLITE_BUILTINS,
|
||||||
|
tf.lite.OpsSet.SELECT_TF_OPS,
|
||||||
|
],
|
||||||
preprocess=self._preprocessor,
|
preprocess=self._preprocessor,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user