Skip to content
Snippets Groups Projects

Infrastructure for Machine Learning inference with ONNX Runtime

Merged Xiangyang Ju requested to merge xju/athena:mr_onnx_onetool into main
Files
37
# Copyright (C) 2002-2024 CERN for the benefit of the ATLAS collaboration
from AthenaConfiguration.AthConfigFlags import AthConfigFlags
from AthenaConfiguration.Enums import FlagEnum
class OnnxRuntimeType(FlagEnum):
CPU = 'CPU'
CUDA = 'CUDA'
# possible future backends. Uncomment when implemented.
# DML = 'DML'
# DNNL = 'DNNL'
# NUPHAR = 'NUPHAR'
# OPENVINO = 'OPENVINO'
# ROCM = 'ROCM'
# TENSORRT = 'TENSORRT'
# VITISAI = 'VITISAI'
# VULKAN = 'VULKAN'
def createOnnxRuntimeFlags():
icf = AthConfigFlags()
icf.addFlag("AthOnnx.ExecutionProvider", OnnxRuntimeType.CPU, type=OnnxRuntimeType)
return icf
if __name__ == "__main__":
flags = createOnnxRuntimeFlags()
flags.dump()
Loading