ModelTC / NNLQP

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Runtime Onnx Model Error

karthickai opened this issue · comments

Thanks for the dataset. None of your onnx model dataset is not working on onnx-runtime. I tried to convert it to PyTorch also is not working. For example while try to run this model dataset/multi_platform/onnx/nnmeter_alexnet/nnmeter_alexnet_transform_0122.onnx
Got this error
return np.asarray(data, dtype=storage_np_dtype).astype(np_dtype).reshape(dims)
ValueError: cannot reshape array of size 0 into shape (90,3,3,3)

Our latency predictor only requries structures and attributes of onnx models.
The weights are not required.
For saving space, the uploaded onnx models do not contain weights.
You can try to fill onnx models with random weights and infer them by onnxruntime, as follows,

import onnx
import numpy as np
import onnxruntime as ort


def fill_onnx_weight(G):
    for init in G.graph.initializer:
        fill_a_tensor(init)
    for node in G.graph.node:
        # conv weight may be constant node
        if node.op_type == "Constant":
           for attr in node.attribute:
               if attr.type == onnx.AttributeProto.AttributeType.TENSOR:
                   fill_a_tensor(attr.t)


def fill_a_tensor(T, min_len=10):

    def stat_size(T):
        L = 0
        L = max(L, len(T.float_data))
        L = max(L, len(T.double_data))
        L = max(L, len(T.int32_data))
        L = max(L, len(T.int64_data))
        L = max(L, len(T.uint64_data))
        L = max(L, len(T.raw_data) // 4)
        return L

    # in case of overlapping the shape constant & initializer
    length = np.prod(T.dims)
    # real size == 0 need to fill too
    if stat_size(T) > 0:
        return

    if T.data_type == onnx.TensorProto.FLOAT:
        T.float_data[:] = np.random.rand(length) - 0.5

    elif T.data_type == onnx.TensorProto.DOUBLE:
        T.double_data[:] = np.random.rand(length) - 0.5

    elif T.data_type == onnx.TensorProto.COMPLEX64:
        T.float_data[:] = np.random.rand(length * 2) - 0.5

    elif T.data_type == onnx.TensorProto.COMPLEX128:
        T.double_data[:] = np.random.rand(length * 2) - 0.5

    elif T.data_type == onnx.TensorProto.INT64:
        T.int64_data[:] = (np.random.rand(length) * 127).astype(np.int64) - 64

    elif T.data_type == onnx.TensorProto.UINT32 or \
        T.data_type == onnx.TensorProto.UINT64:
        T.uint64_data[:] = (np.random.rand(length) * 127).astype(np.uint64)

    elif T.data_type == onnx.TensorProto.UINT8 or \
        T.data_type == onnx.TensorProto.INT8 or \
        T.data_type == onnx.TensorProto.UINT16 or \
        T.data_type == onnx.TensorProto.INT16 or \
        T.data_type == onnx.TensorProto.INT32 or \
        T.data_type == onnx.TensorProto.BOOL or \
        T.data_type == onnx.TensorProto.FLOAT16 or \
        T.data_type == onnx.TensorProto.BFLOAT16:
        T.int32_data[:] = (np.random.rand(length) * 127).astype(np.int32) - 64

    else:
        #onnx.TensorProto.UNDEFINED,
        #onnx.TensorProto.STRING,
        pass


def pass_remove_ceil_mode_for_MaxPool(G):
    for node in G.graph.node:
        if node.op_type.endswith("Pool"):
            for attr in node.attribute:
                if attr.name == "ceil_mode":
                    print("remove attirbute ceil_mode for {}: {}".format(node.op_type, node.name))
                    node.attribute.remove(attr)


if __name__ == "__main__":
    onnx_path = "dataset/multi_platform/onnx/nnmeter_alexnet/nnmeter_alexnet_transform_0122.onnx"
    G = onnx.load(onnx_path)
    fill_onnx_weight(G)
    pass_remove_ceil_mode_for_MaxPool(G)
    # onnx.save(G, onnx_path.replace(".onnx", "_new.onnx"))

    sess = ort.InferenceSession(G.SerializeToString())
    sess.set_providers(['CPUExecutionProvider'])
    input_name = sess.get_inputs()[0].name
    input_shape = sess.get_inputs()[0].shape
    output_name = sess.get_outputs()[0].name
    data = (np.random.rand(*input_shape) - 0.5).astype(np.float32)

    print("run with input {}, shape={}".format(input_name, data.shape))
    outputs = sess.run([output_name], {input_name : data})
    for idx, o in enumerate(outputs):
        print("output {} shape: {}".format(idx, o.shape))