Failed to optimize Whisper tiny by gpu (CUDA)
XciciciX opened this issue · 0 comments
Describe the bug
On Windows, I tried to use CUDA to optimize whisper medium fp32 and fp16, they both failed to get the model at the final prepost step. The bug is onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for DecoderMaskedMultiHeadAttention(1) node with name 'Attention_0
To Reproduce
olive run --config whisper_gpu_fp32.json
Expected behavior
Get the final optimized whisper ONNX model successfully through gpu on Windows. Run the test inference script successfully.
Olive config
{
"input_model": {
"type": "PyTorchModel",
"config": {
"model_script": "code/user_script.py",
"script_dir": "code",
"hf_config": {
"model_class": "WhisperForConditionalGeneration",
"model_name": "openai/whisper-tiny.en",
"components": [
{
"name": "encoder_decoder_init",
"io_config": "get_encdec_io_config",
"component_func": "get_encoder_decoder_init",
"dummy_inputs_func": "encoder_decoder_init_dummy_inputs"
},
{
"name": "decoder",
"io_config": "get_dec_io_config",
"component_func": "get_decoder",
"dummy_inputs_func": "decoder_dummy_inputs"
}
],
"from_pretrained_args": {
"attn_implementation": "eager"
}
}
}
},
"systems": {
"local_system": {
"type": "LocalSystem",
"config": {
"accelerators": [
{
"device": "gpu",
"execution_providers": [
"CUDAExecutionProvider"
]
}
]
}
}
},
"evaluators": {
"common_evaluator": {
"metrics": [
{
"name": "latency",
"type": "latency",
"sub_types": [
{
"name": "avg",
"priority": 1
}
],
"user_config": {
"user_script": "code/user_script.py",
"script_dir": "code",
"data_dir": "data",
"dataloader_func": "whisper_dataloader",
"func_kwargs": {
"dataloader_func": {
"model_name": "openai/whisper-tiny.en",
"use_audio_decoder": true
}
}
}
}
]
}
},
"passes": {
"conversion": {
"type": "OnnxConversion",
"config": {
"target_opset": 17
}
},
"transformers_optimization": {
"type": "OrtTransformersOptimization",
"config": {
"optimization_options": {
"use_multi_head_attention": true
},
"use_gpu": true
}
},
"insert_beam_search": {
"type": "InsertBeamSearch",
"config": {
"use_forced_decoder_ids": false,
"use_logits_processor": false,
"fp16": false
}
},
"prepost": {
"type": "AppendPrePostProcessingOps",
"config": {
"tool_command": "whisper",
"tool_command_args": {
"model_name": "openai/whisper-tiny.en",
"use_audio_decoder": true
},
"target_opset": 17
}
}
},
"engine": {
"log_severity_level": 0,
"host": "local_system",
"target": "local_system",
"evaluator": "common_evaluator",
"evaluate_input_model": false,
"clean_cache": false,
"cache_dir": "cache",
"output_dir": "models",
"output_name": "whisper_gpu_fp32"
}
}
Other information
- OS: Windows
- Olive version: 0.5.2
- ONNXRuntime package and version: onnxruntime-gpu: 1.18.0
Additional context
Failed to run Olive on gpu-cuda. Traceback (most recent call last): File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\engine\engine.py", line 337, in run_accelerator output_footprint = self.run_no_search( ^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\engine\engine.py", line 429, in run_no_search should_prune, signal, model_ids = self._run_passes( ^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\engine\engine.py", line 844, in _run_passes signal = self._evaluate_model(model_config, model_id, data_root, evaluator_config, accelerator_spec) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\engine\engine.py", line 1042, in _evaluate_model signal = self.target.evaluate_model(model_config, data_root, metrics, accelerator_spec) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\systems\local.py", line 47, in evaluate_model return evaluator.evaluate(model, data_root, metrics, device=device, execution_providers=execution_providers) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\evaluator\olive_evaluator.py", line 205, in evaluate metrics_res[metric.name] = self._evaluate_latency( ^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\evaluator\olive_evaluator.py", line 123, in _evaluate_latency latencies = self._evaluate_raw_latency( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\evaluator\olive_evaluator.py", line 762, in _evaluate_raw_latency return self._evaluate_onnx_latency(model, metric, dataloader, post_func, device, execution_providers) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\evaluator\olive_evaluator.py", line 535, in _evaluate_onnx_latency session, inference_settings = OnnxEvaluator.get_session_wrapper( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\evaluator\olive_evaluator.py", line 430, in get_session_wrapper session = model.prepare_session( ^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\model\handler\onnx.py", line 113, in prepare_session return get_ort_inference_session( ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\olive\common\ort_inference.py", line 121, in get_ort_inference_session session = ort.InferenceSession( ^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\onnxruntime\capi\onnxruntime_inference_collection.py", line 419, in __init__ self._create_inference_session(providers, provider_options, disabled_optimizers) File "C:\Users\AMD\miniconda3\envs\test\Lib\site-packages\onnxruntime\capi\onnxruntime_inference_collection.py", line 483, in _create_inference_session sess.initialize_session(providers, provider_options, disabled_optimizers) onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for DecoderMaskedMultiHeadAttention(1) node with name 'Attention_0'