VeriSilicon/tflite-vx-delegate

sync tf lite models for test

hibillchou opened this issue · 2 comments

@hibillchou

could you comment how to generate this?

  • verify python
import os
import sys
import numpy as np
import tensorflow as tf
import tflite_runtime.interpreter as tflite

def flatten_result_mspe_check(predict, golden, threshold=1e-2, show_detail_result=False): 
    mspe = 0	
        
    pd = np.array(predict)
    gt = np.array(golden)
    pd = pd.flatten()
    gt = gt.flatten()
    if(pd.size != gt.size):
        print(f'The output between prediction{pd.size} and golden{gt.size} is not match.')
        raise Exception()
    gt_mean = gt.mean()
    for idx in range(len(gt)):
        if gt[idx] == 0:
            if gt_mean == 0:
                continue
            mspe += pd[idx]/gt_mean
        else:
            mspe += np.square((pd[idx] - gt[idx])/gt[idx])
    mspe = mspe/len(pd)
    if show_detail_result:
        pd_top = (-pd).argsort()[:5]
        gt_top = (-gt).argsort()[:5]
        print(pd_top)
        print(gt_top)
        print(mspe)
        
    # if all of elements are below threshold, return 0(PASS) else -1.
    if(mspe < threshold):
        return True
    else:
        return False

if __name__ == '__main__':

    model = sys.argv[1]

    # change Increase Recursion Limit
    #sys.setrecursionlimit(9000)
    sys.setrecursionlimit(9000)
    print(sys.getrecursionlimit())

    # Load the TFLite model and allocate tensors.
    gpuInterpreter = tflite.Interpreter(model)
    gpuDelegate = tflite.load_delegate('libnnapi.so')
    #gpuDelegate = tflite.load_delegate('libvx_delegate.so')
    gpuInterpreter._interpreter.ModifyGraphWithDelegate(gpuDelegate._get_native_delegate_pointer())
    gpuInterpreter.allocate_tensors()

    cpuInterpreter = tflite.Interpreter(model)
    cpuInterpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = gpuInterpreter.get_input_details()
    output_details = gpuInterpreter.get_output_details()
    print(model,file=open("output.txt", "a"))

    # Test the model on random input data.
    print('==================Input==================',file=open("output.txt", "a"))
    for i in range(len(input_details)):
        # Test the model on random input data.
        input_shape = input_details[i]['shape']
        input_data = np.array(np.random.random_sample(input_shape), input_details[i]['dtype'])
        print(input_data,file=open("output.txt", "a"))
        gpuInterpreter.set_tensor(input_details[i]['index'], input_data)
        cpuInterpreter.set_tensor(input_details[i]['index'], input_data)
        #print('Input[%d]: %s' % (i, input_shape))
    print('========================================',file=open("output.txt", "a"))


    gpuInterpreter.invoke()
    cpuInterpreter.invoke()

    print('==================Output==================',file=open("output.txt", "a"))
    # The function `get_tensor()` returns a copy of the tensor data.
    # Use `tensor()` in order to get a pointer to the tensor.
    isValid = True
    for i in range(len(output_details)):
        output_data = gpuInterpreter.get_tensor(output_details[i]['index'])
        compared_output_data = cpuInterpreter.get_tensor(output_details[i]['index'])
        #print('Output[%d]: %s' % (i, output_data))

        for gpu_data, cpu_data in zip(output_data.flatten(), compared_output_data.flatten()):
            print('gpu_data:', gpu_data, file=open("output.txt", "a"))
            print('cpu_data:', cpu_data, file=open("output.txt", "a"))
            #print('gpu_data:', gpu_data)
            #print('cpu_data:', cpu_data)
            isValid = flatten_result_mspe_check(gpu_data, cpu_data, threshold=1e-3, show_detail_result=True)
            if isValid == False:
                print("FAILED")
                print("FAILED",file=open("output.txt", "a"))
                break;
            
    if isValid == True:
        print("PASS")
        print("PASS",file=open("output.txt", "a"))
    print('==========================================',file=open("output.txt", "a"))
cd <work dir>/tensorflow
apt-get install npm
npm install -g @bazel/bazelisk
bazel build //tensorflow/lite/testing:generate_examples
sudo apt install python3-dev python3-pip
apt install python3-dev python3-pip
pip install -U pip numpy wheel
bazel build //tensorflow/tools/pip_package:build_pip_package
bazel build //tensorflow/tools/pip_package:build_pip_package
bazel build //tensorflow/lite/toco
./bazel-bin/tensorflow/lite/testing/generate_examples --test_sets stack --toco ./bazel-bin/tensorflow/lite/toco/toco --zip_to_output stack.zip <des dir>