Time benchmark script
dnth opened this issue · comments
Include time benchmark script from here - https://github.com/openvinotoolkit/openvino_notebooks/blob/main/notebooks/102-pytorch-to-openvino/102-pytorch-onnx-to-openvino.ipynb
num_images = 100
with torch.no_grad():
start = time.perf_counter()
for _ in range(num_images):
model(torch.as_tensor(input_image).float())
end = time.perf_counter()
time_torch = end - start
print(
f"PyTorch model on CPU: {time_torch/num_images:.3f} seconds per image, "
f"FPS: {num_images/time_torch:.2f}"
)
compiled_model_onnx = core.compile_model(model=model_onnx, device_name="CPU")
start = time.perf_counter()
for _ in range(num_images):
compiled_model_onnx([normalized_input_image])
end = time.perf_counter()
time_onnx = end - start
print(
f"ONNX model in OpenVINO Runtime/CPU: {time_onnx/num_images:.3f} "
f"seconds per image, FPS: {num_images/time_onnx:.2f}"
)
compiled_model_ir = core.compile_model(model=model_ir, device_name="CPU")
start = time.perf_counter()
for _ in range(num_images):
compiled_model_ir([input_image])
end = time.perf_counter()
time_ir = end - start
print(
f"OpenVINO IR model in OpenVINO Runtime/CPU: {time_ir/num_images:.3f} "
f"seconds per image, FPS: {num_images/time_ir:.2f}"
)
if "GPU" in core.available_devices:
compiled_model_onnx_gpu = core.compile_model(model=model_onnx, device_name="GPU")
start = time.perf_counter()
for _ in range(num_images):
compiled_model_onnx_gpu([input_image])
end = time.perf_counter()
time_onnx_gpu = end - start
print(
f"ONNX model in OpenVINO/GPU: {time_onnx_gpu/num_images:.3f} "
f"seconds per image, FPS: {num_images/time_onnx_gpu:.2f}"
)
compiled_model_ir_gpu = core.compile_model(model=model_ir, device_name="GPU")
start = time.perf_counter()
for _ in range(num_images):
compiled_model_ir_gpu([input_image])
end = time.perf_counter()
time_ir_gpu = end - start
print(
f"IR model in OpenVINO/GPU: {time_ir_gpu/num_images:.3f} "
f"seconds per image, FPS: {num_images/time_ir_gpu:.2f}"
)