dlxj commited on
Commit ·
f5d2dd3
1
Parent(s): 7965430
update nemo==2.8.0.rc0
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- nemo/README.md +26 -0
- nemo/{collections/diffusion/models → agents}/__init__.py +1 -1
- nemo/{collections/diffusion/data → agents/voice_agent}/__init__.py +1 -1
- nemo/{export → agents/voice_agent/pipecat}/__init__.py +4 -6
- nemo/{collections/diffusion/encoders → agents/voice_agent/pipecat/frames}/__init__.py +1 -1
- nemo/{collections/nlp/data/entity_linking/__init__.py → agents/voice_agent/pipecat/frames/frames.py} +13 -2
- nemo/{collections/diffusion → agents/voice_agent/pipecat/processors}/__init__.py +1 -1
- nemo/agents/voice_agent/pipecat/processors/frameworks/__init__.py +13 -0
- nemo/agents/voice_agent/pipecat/processors/frameworks/rtvi.py +72 -0
- nemo/agents/voice_agent/pipecat/services/__init__.py +13 -0
- nemo/{collections/multimodal/speech_llm → agents/voice_agent/pipecat/services/nemo}/__init__.py +6 -2
- nemo/agents/voice_agent/pipecat/services/nemo/audio_logger.py +844 -0
- nemo/agents/voice_agent/pipecat/services/nemo/diar.py +360 -0
- nemo/agents/voice_agent/pipecat/services/nemo/llm.py +760 -0
- nemo/agents/voice_agent/pipecat/services/nemo/streaming_asr.py +319 -0
- nemo/agents/voice_agent/pipecat/services/nemo/streaming_diar.py +212 -0
- nemo/agents/voice_agent/pipecat/services/nemo/stt.py +316 -0
- nemo/agents/voice_agent/pipecat/services/nemo/tts.py +892 -0
- nemo/agents/voice_agent/pipecat/services/nemo/turn_taking.py +441 -0
- nemo/agents/voice_agent/pipecat/services/nemo/utils.py +197 -0
- nemo/agents/voice_agent/pipecat/transports/__init__.py +13 -0
- nemo/agents/voice_agent/pipecat/transports/base_input.py +58 -0
- nemo/{collections/nlp/data/language_modeling/megatron/length_distribution_type.py → agents/voice_agent/pipecat/transports/base_transport.py} +5 -6
- nemo/agents/voice_agent/pipecat/transports/network/__init__.py +13 -0
- nemo/agents/voice_agent/pipecat/transports/network/websocket_server.py +304 -0
- nemo/agents/voice_agent/pipecat/utils/__init__.py +13 -0
- nemo/agents/voice_agent/pipecat/utils/text/__init__.py +13 -0
- nemo/agents/voice_agent/pipecat/utils/text/simple_text_aggregator.py +238 -0
- nemo/{collections/vlm/clip/data → agents/voice_agent/utils}/__init__.py +1 -2
- nemo/agents/voice_agent/utils/config_manager.py +312 -0
- nemo/agents/voice_agent/utils/tool_calling/__init__.py +13 -0
- nemo/agents/voice_agent/utils/tool_calling/basic_tools.py +72 -0
- nemo/agents/voice_agent/utils/tool_calling/mixins.py +104 -0
- nemo/collections/asr/README.md +37 -0
- nemo/collections/asr/data/audio_to_diar_label.py +40 -857
- nemo/collections/asr/data/audio_to_diar_label_lhotse.py +38 -6
- nemo/collections/asr/data/audio_to_eou_label_lhotse.py +524 -0
- nemo/collections/asr/data/audio_to_label.py +3 -1
- nemo/collections/asr/data/audio_to_text.py +3 -18
- nemo/collections/asr/data/audio_to_text_dali.py +11 -6
- nemo/collections/asr/data/audio_to_text_dataset.py +27 -13
- nemo/collections/asr/data/audio_to_text_lhotse.py +23 -2
- nemo/collections/asr/data/audio_to_text_lhotse_prompt.py +177 -0
- nemo/collections/asr/data/audio_to_text_lhotse_prompted.py +138 -5
- nemo/collections/asr/data/audio_to_text_lhotse_speaker.py +97 -0
- nemo/collections/asr/data/data_simulation.py +123 -157
- nemo/collections/asr/data/ssl_dataset.py +27 -35
- nemo/collections/asr/data/text_to_text.py +6 -4
- nemo/collections/asr/inference/__init__.py +13 -0
- nemo/collections/asr/inference/factory/__init__.py +13 -0
nemo/README.md
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
NeMo (**Ne**ural **Mo**dules) is a toolkit for creating AI applications built around **neural modules**, conceptual blocks of neural networks that take *typed* inputs and produce *typed* outputs.
|
| 2 |
+
|
| 3 |
+
## **collections/**
|
| 4 |
+
* **ASR** - Collection of modules and models for building speech recognition networks.
|
| 5 |
+
* **TTS** - Collection of modules and models for building speech synthesis networks.
|
| 6 |
+
* **Audio** - Collection of modules and models for building audio processing networks.
|
| 7 |
+
* **SpeechLM2** - Collection of modules and models for building multimodal LLM.
|
| 8 |
+
|
| 9 |
+
## **core/**
|
| 10 |
+
Provides fundamental APIs and utilities for NeMo modules, including:
|
| 11 |
+
- **Classes** - Base classes for datasets, models, and losses.
|
| 12 |
+
- **Config** - Configuration management utilities.
|
| 13 |
+
- **Neural Types** - Typed inputs/outputs for module interaction.
|
| 14 |
+
- **Optim** - Optimizers and learning rate schedulers.
|
| 15 |
+
|
| 16 |
+
## **lightning/**
|
| 17 |
+
Integration with PyTorch Lightning for training and distributed execution:
|
| 18 |
+
- **Strategies & Plugins** - Custom Lightning strategies.
|
| 19 |
+
- **Fabric** - Lightweight wrapper for model training.
|
| 20 |
+
- **Checkpointing & Logging** - Utilities for managing model states.
|
| 21 |
+
|
| 22 |
+
## **utils/**
|
| 23 |
+
General utilities for debugging, distributed training, logging, and model management:
|
| 24 |
+
- **callbacks/** - Hooks for training processes.
|
| 25 |
+
- **loggers/** - Logging utilities for different backends.
|
| 26 |
+
- **debugging & profiling** - Performance monitoring tools.
|
nemo/{collections/diffusion/models → agents}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
nemo/{collections/diffusion/data → agents/voice_agent}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
nemo/{export → agents/voice_agent/pipecat}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -12,9 +12,7 @@
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
-
|
| 16 |
-
use_tensorrt = True
|
| 17 |
try:
|
| 18 |
-
|
| 19 |
-
except
|
| 20 |
-
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
|
|
|
|
|
|
| 15 |
try:
|
| 16 |
+
import pipecat
|
| 17 |
+
except ImportError:
|
| 18 |
+
raise ImportError("pipecat is not installed. Please install it with `pip install pipecat-ai`.")
|
nemo/{collections/diffusion/encoders → agents/voice_agent/pipecat/frames}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
nemo/{collections/nlp/data/entity_linking/__init__.py → agents/voice_agent/pipecat/frames/frames.py}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -12,4 +12,15 @@
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
+
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
import numpy as np
|
| 18 |
+
from pipecat.frames.frames import DataFrame
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@dataclass
|
| 22 |
+
class DiarResultFrame(DataFrame):
|
| 23 |
+
"""Diarization frame."""
|
| 24 |
+
|
| 25 |
+
diar_result: np.ndarray | int
|
| 26 |
+
stream_id: str = "default"
|
nemo/{collections/diffusion → agents/voice_agent/pipecat/processors}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
nemo/agents/voice_agent/pipecat/processors/frameworks/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/pipecat/processors/frameworks/rtvi.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from loguru import logger
|
| 17 |
+
from pipecat.frames.frames import Frame, LLMFullResponseEndFrame, LLMFullResponseStartFrame, TTSTextFrame
|
| 18 |
+
from pipecat.observers.base_observer import FramePushed
|
| 19 |
+
from pipecat.processors.frameworks.rtvi import (
|
| 20 |
+
RTVIBotLLMStartedMessage,
|
| 21 |
+
RTVIBotLLMStoppedMessage,
|
| 22 |
+
RTVIBotTranscriptionMessage,
|
| 23 |
+
RTVIBotTTSTextMessage,
|
| 24 |
+
)
|
| 25 |
+
from pipecat.processors.frameworks.rtvi import RTVIObserver as _RTVIObserver
|
| 26 |
+
from pipecat.processors.frameworks.rtvi import RTVIProcessor, RTVITextMessageData
|
| 27 |
+
from pipecat.transports.base_output import BaseOutputTransport
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class RTVIObserver(_RTVIObserver):
|
| 31 |
+
"""
|
| 32 |
+
An observer that processes RTVI frames and pushes them to the transport.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, rtvi: RTVIProcessor, *args, **kwargs):
|
| 36 |
+
super().__init__(rtvi, *args, **kwargs)
|
| 37 |
+
|
| 38 |
+
async def on_push_frame(self, data: FramePushed):
|
| 39 |
+
"""Process a frame being pushed through the pipeline.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
data: Frame push event data containing source, frame, direction, and timestamp.
|
| 43 |
+
"""
|
| 44 |
+
src = data.source
|
| 45 |
+
frame: Frame = data.frame
|
| 46 |
+
|
| 47 |
+
if frame.id in self._frames_seen:
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
if not self._params.bot_llm_enabled:
|
| 51 |
+
if isinstance(frame, LLMFullResponseStartFrame):
|
| 52 |
+
await self.send_rtvi_message(RTVIBotLLMStartedMessage())
|
| 53 |
+
self._frames_seen.add(frame.id)
|
| 54 |
+
elif isinstance(frame, LLMFullResponseEndFrame):
|
| 55 |
+
await self.send_rtvi_message(RTVIBotLLMStoppedMessage())
|
| 56 |
+
self._frames_seen.add(frame.id)
|
| 57 |
+
elif isinstance(frame, TTSTextFrame) and isinstance(src, BaseOutputTransport):
|
| 58 |
+
message = RTVIBotTTSTextMessage(data=RTVITextMessageData(text=frame.text))
|
| 59 |
+
await self.send_rtvi_message(message)
|
| 60 |
+
await self._push_bot_transcription(frame.text)
|
| 61 |
+
self._frames_seen.add(frame.id)
|
| 62 |
+
else:
|
| 63 |
+
await super().on_push_frame(data)
|
| 64 |
+
else:
|
| 65 |
+
await super().on_push_frame(data)
|
| 66 |
+
|
| 67 |
+
async def _push_bot_transcription(self, text: str):
|
| 68 |
+
"""Push accumulated bot transcription as a message."""
|
| 69 |
+
if len(text.strip()) > 0:
|
| 70 |
+
message = RTVIBotTranscriptionMessage(data=RTVITextMessageData(text=text))
|
| 71 |
+
logger.debug(f"Pushing bot transcription: `{text}`")
|
| 72 |
+
await self.send_rtvi_message(message)
|
nemo/agents/voice_agent/pipecat/services/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/{collections/multimodal/speech_llm → agents/voice_agent/pipecat/services/nemo}/__init__.py
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -12,4 +12,8 @@
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
+
from .diar import NemoDiarService
|
| 16 |
+
from .llm import HuggingFaceLLMService
|
| 17 |
+
from .stt import NemoSTTService
|
| 18 |
+
from .tts import NeMoFastPitchHiFiGANTTSService
|
| 19 |
+
from .turn_taking import NeMoTurnTakingService
|
nemo/agents/voice_agent/pipecat/services/nemo/audio_logger.py
ADDED
|
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import threading
|
| 17 |
+
import wave
|
| 18 |
+
from datetime import datetime
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import Optional, Union
|
| 21 |
+
|
| 22 |
+
import librosa
|
| 23 |
+
import numpy as np
|
| 24 |
+
from loguru import logger
|
| 25 |
+
from pipecat.frames.frames import TranscriptionFrame
|
| 26 |
+
from pipecat.observers.base_observer import BaseObserver, FramePushed
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class AudioLogger:
|
| 30 |
+
"""
|
| 31 |
+
Utility class for logging audio data and transcriptions during voice agent interactions.
|
| 32 |
+
|
| 33 |
+
This logger saves:
|
| 34 |
+
- Audio files in WAV format
|
| 35 |
+
- Transcriptions with metadata in JSON format
|
| 36 |
+
- Session information and metadata
|
| 37 |
+
|
| 38 |
+
File structure:
|
| 39 |
+
log_dir/
|
| 40 |
+
├── session_YYYYMMDD_HHMMSS/
|
| 41 |
+
│ ├── user/
|
| 42 |
+
│ │ ├── 00001_HHMMSS.wav
|
| 43 |
+
│ │ ├── 00001_HHMMSS.json
|
| 44 |
+
│ │ ├── 00002_HHMMSS.wav
|
| 45 |
+
│ │ └── 00002_HHMMSS.json
|
| 46 |
+
│ ├── agent/
|
| 47 |
+
│ │ ├── 00001_HHMMSS.wav
|
| 48 |
+
│ │ ├── 00001_HHMMSS.json
|
| 49 |
+
│ └── session_metadata.json
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
log_dir: Base directory for storing logs (default: "./audio_logs")
|
| 53 |
+
session_id: Optional custom session ID. If None, auto-generated from timestamp
|
| 54 |
+
enabled: Whether logging is enabled (default: True)
|
| 55 |
+
|
| 56 |
+
# 12/19/2025 Note: Stereo conversation recording is implemented,
|
| 57 |
+
# but -0.8 seconds offset needs to be applied to make the session sound synced.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
log_dir: Union[str, Path] = "./audio_logs",
|
| 63 |
+
session_id: Optional[str] = None,
|
| 64 |
+
enabled: bool = True,
|
| 65 |
+
user_audio_sample_rate: int = 16000,
|
| 66 |
+
pre_roll_time_sec: float = 0.8,
|
| 67 |
+
round_precision: int = 2,
|
| 68 |
+
):
|
| 69 |
+
self.enabled = enabled
|
| 70 |
+
if not self.enabled:
|
| 71 |
+
logger.info("[AudioLogger] AudioLogger is disabled")
|
| 72 |
+
return
|
| 73 |
+
|
| 74 |
+
self.log_dir = Path(log_dir)
|
| 75 |
+
|
| 76 |
+
# Generate session ID if not provided
|
| 77 |
+
self.session_start_time = datetime.now()
|
| 78 |
+
if session_id is None:
|
| 79 |
+
session_id = f"session_{self.session_start_time.strftime('%Y%m%d_%H%M%S')}"
|
| 80 |
+
self.first_audio_timestamp = None
|
| 81 |
+
self.session_id = session_id
|
| 82 |
+
self.session_dir = self.log_dir / session_id
|
| 83 |
+
|
| 84 |
+
# Create directories
|
| 85 |
+
self.user_dir = self.session_dir / "user"
|
| 86 |
+
self.agent_dir = self.session_dir / "agent"
|
| 87 |
+
|
| 88 |
+
self.user_dir.mkdir(parents=True, exist_ok=True)
|
| 89 |
+
self.agent_dir.mkdir(parents=True, exist_ok=True)
|
| 90 |
+
|
| 91 |
+
# Counters for file naming (thread-safe)
|
| 92 |
+
self._user_counter = 0
|
| 93 |
+
self._agent_counter = 0
|
| 94 |
+
self._turn_index = 0 # Turn index for conversation turns
|
| 95 |
+
self._current_speaker = None # Track current speaker for turn transitions
|
| 96 |
+
self._agent_turn_start_time = None # Captured when BotStartedSpeakingFrame is received
|
| 97 |
+
self._lock = threading.Lock()
|
| 98 |
+
self.staged_metadata = None
|
| 99 |
+
self._staged_audio_data = None
|
| 100 |
+
self._pre_roll_time_sec = pre_roll_time_sec
|
| 101 |
+
self._round_precision = round_precision
|
| 102 |
+
|
| 103 |
+
self.turn_audio_buffer = []
|
| 104 |
+
self.continuous_user_audio_buffer = []
|
| 105 |
+
self.turn_transcription_buffer = []
|
| 106 |
+
|
| 107 |
+
# Stereo conversation recording (left=agent, right=user)
|
| 108 |
+
self._stereo_conversation_filename = "conversation_stereo.wav"
|
| 109 |
+
self._stereo_conversation_file = self.session_dir / self._stereo_conversation_filename
|
| 110 |
+
self._stereo_sample_rate = user_audio_sample_rate # Use user audio sample rate (downsample agent audio)
|
| 111 |
+
self._stereo_audio_buffer_left: list = [] # Agent audio (left channel)
|
| 112 |
+
self._stereo_audio_buffer_right: list = [] # User audio (right channel)
|
| 113 |
+
|
| 114 |
+
# Session metadata
|
| 115 |
+
# agent_entries is a list of lists: each sublist contains segments for one turn
|
| 116 |
+
# e.g., [[seg1, seg2, seg3], [seg4, seg5], ...] where each [] is a turn
|
| 117 |
+
self.session_metadata = {
|
| 118 |
+
"session_id": session_id,
|
| 119 |
+
"start_time": self.session_start_time.isoformat(),
|
| 120 |
+
"user_entries": [],
|
| 121 |
+
"agent_entries": [], # List of turns, each turn is a list of segments
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
logger.info(f"[AudioLogger] AudioLogger initialized: {self.session_dir}")
|
| 125 |
+
|
| 126 |
+
def append_continuous_user_audio(self, audio_data: bytes):
|
| 127 |
+
"""
|
| 128 |
+
Append audio data to the continuous user audio buffer for stereo conversation.
|
| 129 |
+
|
| 130 |
+
This method should be called for EVERY audio frame received from the user,
|
| 131 |
+
regardless of VAD state, to record the complete conversation audio.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
audio_data: Raw audio data as bytes
|
| 135 |
+
"""
|
| 136 |
+
if not self.enabled:
|
| 137 |
+
return
|
| 138 |
+
|
| 139 |
+
self.continuous_user_audio_buffer.append(audio_data)
|
| 140 |
+
|
| 141 |
+
def _resample_audio(
|
| 142 |
+
self,
|
| 143 |
+
audio_data: Union[bytes, np.ndarray],
|
| 144 |
+
orig_sr: int,
|
| 145 |
+
target_sr: int,
|
| 146 |
+
) -> np.ndarray:
|
| 147 |
+
"""
|
| 148 |
+
Resample audio data to a target sample rate using librosa.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
audio_data: Audio data as bytes (int16) or numpy array
|
| 152 |
+
orig_sr: Original sample rate
|
| 153 |
+
target_sr: Target sample rate
|
| 154 |
+
|
| 155 |
+
Returns:
|
| 156 |
+
Resampled audio as numpy array (float32)
|
| 157 |
+
"""
|
| 158 |
+
# Convert bytes to numpy array if needed
|
| 159 |
+
if isinstance(audio_data, bytes):
|
| 160 |
+
audio_array = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32) / 32768.0
|
| 161 |
+
elif audio_data.dtype == np.int16:
|
| 162 |
+
audio_array = audio_data.astype(np.float32) / 32768.0
|
| 163 |
+
else:
|
| 164 |
+
audio_array = audio_data.astype(np.float32)
|
| 165 |
+
|
| 166 |
+
# Resample if needed
|
| 167 |
+
if orig_sr != target_sr:
|
| 168 |
+
audio_array = librosa.resample(audio_array, orig_sr=orig_sr, target_sr=target_sr)
|
| 169 |
+
|
| 170 |
+
return audio_array
|
| 171 |
+
|
| 172 |
+
def _append_to_stereo_conversation(
|
| 173 |
+
self,
|
| 174 |
+
audio_data: Union[bytes, np.ndarray],
|
| 175 |
+
channel: str,
|
| 176 |
+
start_time: float,
|
| 177 |
+
sample_rate: int,
|
| 178 |
+
):
|
| 179 |
+
"""
|
| 180 |
+
Append audio to the stereo conversation buffer at the correct time position.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
audio_data: Audio data as bytes or numpy array
|
| 184 |
+
channel: "left" for agent, "right" for user
|
| 185 |
+
start_time: Start time in seconds from session start
|
| 186 |
+
sample_rate: Sample rate of the input audio
|
| 187 |
+
"""
|
| 188 |
+
if not self.enabled:
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
try:
|
| 192 |
+
# Resample to stereo sample rate if needed
|
| 193 |
+
audio_float = self._resample_audio(audio_data, sample_rate, self._stereo_sample_rate)
|
| 194 |
+
|
| 195 |
+
# Calculate the sample position for this audio
|
| 196 |
+
start_sample = int(start_time * self._stereo_sample_rate)
|
| 197 |
+
|
| 198 |
+
# Get the appropriate buffer
|
| 199 |
+
if channel == "left":
|
| 200 |
+
buffer = self._stereo_audio_buffer_left
|
| 201 |
+
else:
|
| 202 |
+
buffer = self._stereo_audio_buffer_right
|
| 203 |
+
|
| 204 |
+
# Extend buffer with zeros if needed to reach start position
|
| 205 |
+
current_length = len(buffer)
|
| 206 |
+
if start_sample > current_length:
|
| 207 |
+
buffer.extend([0.0] * (start_sample - current_length))
|
| 208 |
+
|
| 209 |
+
# Append or overwrite audio samples
|
| 210 |
+
for i, sample in enumerate(audio_float):
|
| 211 |
+
pos = start_sample + i
|
| 212 |
+
if pos < len(buffer):
|
| 213 |
+
# Mix with existing audio (in case of overlap)
|
| 214 |
+
buffer[pos] = np.clip(buffer[pos] + sample, -1.0, 1.0)
|
| 215 |
+
else:
|
| 216 |
+
buffer.append(sample)
|
| 217 |
+
|
| 218 |
+
logger.debug(
|
| 219 |
+
f"[AudioLogger] Appended {len(audio_float)} samples to {channel} channel "
|
| 220 |
+
f"at position {start_sample} (buffer now {len(buffer)} samples)"
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
except Exception as e:
|
| 224 |
+
logger.error(f"[AudioLogger] Error appending to stereo conversation: {e}")
|
| 225 |
+
|
| 226 |
+
def save_stereo_conversation(self):
|
| 227 |
+
"""
|
| 228 |
+
Save the stereo conversation buffer to a WAV file.
|
| 229 |
+
Left channel = Agent, Right channel = User.
|
| 230 |
+
|
| 231 |
+
User audio comes from continuous_user_audio_buffer (not affected by VAD).
|
| 232 |
+
"""
|
| 233 |
+
if not self.enabled:
|
| 234 |
+
return
|
| 235 |
+
|
| 236 |
+
if not self._stereo_audio_buffer_left and not self.continuous_user_audio_buffer:
|
| 237 |
+
logger.warning("[AudioLogger] No stereo conversation audio to save")
|
| 238 |
+
return
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
# Build right channel (user) from continuous buffer
|
| 242 |
+
# This is raw bytes at user sample rate, no resampling needed since stereo uses user sample rate
|
| 243 |
+
if self.continuous_user_audio_buffer:
|
| 244 |
+
continuous_audio_bytes = b"".join(self.continuous_user_audio_buffer)
|
| 245 |
+
right_array = np.frombuffer(continuous_audio_bytes, dtype=np.int16).astype(np.float32) / 32768.0
|
| 246 |
+
else:
|
| 247 |
+
right_array = np.array([], dtype=np.float32)
|
| 248 |
+
|
| 249 |
+
left_array = np.array(self._stereo_audio_buffer_left, dtype=np.float32)
|
| 250 |
+
|
| 251 |
+
# Pad the shorter buffer with zeros
|
| 252 |
+
max_length = max(len(left_array), len(right_array))
|
| 253 |
+
|
| 254 |
+
# Pad to same length
|
| 255 |
+
if len(left_array) < max_length:
|
| 256 |
+
left_array = np.pad(left_array, (0, max_length - len(left_array)))
|
| 257 |
+
if len(right_array) < max_length:
|
| 258 |
+
right_array = np.pad(right_array, (0, max_length - len(right_array)))
|
| 259 |
+
|
| 260 |
+
# Create stereo array (interleaved: L, R, L, R, ...)
|
| 261 |
+
stereo_array = np.column_stack((left_array, right_array))
|
| 262 |
+
|
| 263 |
+
# Convert to int16
|
| 264 |
+
stereo_int16 = (stereo_array * 32767).astype(np.int16)
|
| 265 |
+
|
| 266 |
+
# Save as WAV
|
| 267 |
+
with wave.open(str(self._stereo_conversation_file), 'wb') as wav_file: # type: ignore[union-attr]
|
| 268 |
+
wav_file.setnchannels(2) # Stereo
|
| 269 |
+
wav_file.setsampwidth(2) # 16-bit
|
| 270 |
+
wav_file.setframerate(self._stereo_sample_rate)
|
| 271 |
+
wav_file.writeframes(stereo_int16.tobytes())
|
| 272 |
+
|
| 273 |
+
duration_sec = max_length / self._stereo_sample_rate
|
| 274 |
+
logger.info(
|
| 275 |
+
f"[AudioLogger] Saved stereo conversation: {self._stereo_conversation_file} "
|
| 276 |
+
f"({duration_sec:.2f} seconds, {max_length} samples)"
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
except Exception as e:
|
| 280 |
+
logger.error(f"[AudioLogger] Error saving stereo conversation: {e}")
|
| 281 |
+
|
| 282 |
+
def get_time_from_start_of_session(self, timestamp: datetime = None) -> float:
|
| 283 |
+
"""Get the time from the start of the session to the given datetime string."""
|
| 284 |
+
# get the time difference in seconds.
|
| 285 |
+
if self.first_audio_timestamp is None:
|
| 286 |
+
raise ValueError("First audio timestamp is not set. Aborting time calculation.")
|
| 287 |
+
time_diff = (timestamp if timestamp else datetime.now()) - self.first_audio_timestamp
|
| 288 |
+
return time_diff.total_seconds()
|
| 289 |
+
|
| 290 |
+
def _get_next_counter(self, speaker: str) -> int:
|
| 291 |
+
"""Get the next counter value for a speaker in a thread-safe manner."""
|
| 292 |
+
with self._lock:
|
| 293 |
+
if speaker == "user":
|
| 294 |
+
self._user_counter += 1
|
| 295 |
+
return self._user_counter
|
| 296 |
+
else:
|
| 297 |
+
self._agent_counter += 1
|
| 298 |
+
return self._agent_counter
|
| 299 |
+
|
| 300 |
+
def increment_turn_index(self, speaker: str = None) -> int:
|
| 301 |
+
"""
|
| 302 |
+
Increment the turn index if the speaker has changed.
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
speaker: "user" or "agent". If provided, only increments
|
| 306 |
+
if this is different from the current speaker.
|
| 307 |
+
If None, always increments.
|
| 308 |
+
|
| 309 |
+
Returns:
|
| 310 |
+
The current turn index after any increment.
|
| 311 |
+
"""
|
| 312 |
+
with self._lock:
|
| 313 |
+
if speaker is None:
|
| 314 |
+
# Always increment if no speaker specified
|
| 315 |
+
self._turn_index += 1
|
| 316 |
+
logger.debug(f"[AudioLogger] Turn index incremented to {self._turn_index}")
|
| 317 |
+
elif speaker != self._current_speaker:
|
| 318 |
+
# Only increment if speaker changed
|
| 319 |
+
self._current_speaker = speaker
|
| 320 |
+
self._turn_index += 1
|
| 321 |
+
# Reset agent turn start time when speaker changes
|
| 322 |
+
if speaker == "agent":
|
| 323 |
+
self._agent_turn_start_time = None
|
| 324 |
+
logger.debug(
|
| 325 |
+
f"[AudioLogger] Speaker changed to {speaker}, turn index incremented to {self._turn_index}"
|
| 326 |
+
)
|
| 327 |
+
# else: same speaker, no increment
|
| 328 |
+
return self._turn_index
|
| 329 |
+
|
| 330 |
+
def set_agent_turn_start_time(self):
|
| 331 |
+
"""
|
| 332 |
+
Set the start time for the current agent turn.
|
| 333 |
+
|
| 334 |
+
This should be called when BotStartedSpeakingFrame is received,
|
| 335 |
+
which indicates the audio is actually starting to play (not just generated).
|
| 336 |
+
This provides more accurate timing than capturing time during TTS generation.
|
| 337 |
+
"""
|
| 338 |
+
if not self.enabled:
|
| 339 |
+
return
|
| 340 |
+
|
| 341 |
+
# Only set if not already set for this turn
|
| 342 |
+
if self._agent_turn_start_time is None:
|
| 343 |
+
self._agent_turn_start_time = self.get_time_from_start_of_session()
|
| 344 |
+
logger.debug(f"[AudioLogger] Agent turn start time set to {self._agent_turn_start_time:.3f}s")
|
| 345 |
+
|
| 346 |
+
def _save_audio_wav(
|
| 347 |
+
self,
|
| 348 |
+
audio_data: Union[bytes, np.ndarray],
|
| 349 |
+
file_path: Path,
|
| 350 |
+
sample_rate: int,
|
| 351 |
+
num_channels: int = 1,
|
| 352 |
+
):
|
| 353 |
+
"""
|
| 354 |
+
Save audio data to a WAV file.
|
| 355 |
+
|
| 356 |
+
Args:
|
| 357 |
+
audio_data: Audio data as bytes or numpy array
|
| 358 |
+
file_path: Path to save the WAV file
|
| 359 |
+
sample_rate: Audio sample rate in Hz
|
| 360 |
+
num_channels: Number of audio channels (default: 1)
|
| 361 |
+
"""
|
| 362 |
+
try:
|
| 363 |
+
# Convert audio data to bytes if it's a numpy array
|
| 364 |
+
if isinstance(audio_data, np.ndarray):
|
| 365 |
+
if audio_data.dtype in [np.float32, np.float64]:
|
| 366 |
+
# Convert float [-1, 1] to int16 [-32768, 32767]
|
| 367 |
+
audio_data = np.clip(audio_data, -1.0, 1.0)
|
| 368 |
+
audio_data = (audio_data * 32767).astype(np.int16)
|
| 369 |
+
elif audio_data.dtype != np.int16:
|
| 370 |
+
audio_data = audio_data.astype(np.int16)
|
| 371 |
+
audio_bytes = audio_data.tobytes()
|
| 372 |
+
else:
|
| 373 |
+
audio_bytes = audio_data
|
| 374 |
+
|
| 375 |
+
# Write WAV file
|
| 376 |
+
with wave.open(str(file_path), 'wb') as wav_file: # type: ignore[union-attr]
|
| 377 |
+
wav_file.setnchannels(num_channels)
|
| 378 |
+
wav_file.setsampwidth(2) # 16-bit audio
|
| 379 |
+
wav_file.setframerate(sample_rate)
|
| 380 |
+
wav_file.writeframes(audio_bytes)
|
| 381 |
+
|
| 382 |
+
logger.debug(f"[AudioLogger] Saved audio to {file_path}")
|
| 383 |
+
except Exception as e:
|
| 384 |
+
logger.error(f"[AudioLogger] Error saving audio to {file_path}: {e}")
|
| 385 |
+
raise
|
| 386 |
+
|
| 387 |
+
def _save_metadata_json(self, metadata: dict, file_path: Path):
|
| 388 |
+
"""Save metadata to a JSON file."""
|
| 389 |
+
try:
|
| 390 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
| 391 |
+
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
| 392 |
+
logger.debug(f"[AudioLogger] Saved metadata to {file_path}")
|
| 393 |
+
except Exception as e:
|
| 394 |
+
logger.error(f"[AudioLogger] Error saving metadata to {file_path}: {e}")
|
| 395 |
+
raise
|
| 396 |
+
|
| 397 |
+
def clear_user_audio_buffer(self):
|
| 398 |
+
"""
|
| 399 |
+
Clear the user audio buffer if the user stopped speaking detected by VAD.
|
| 400 |
+
"""
|
| 401 |
+
# Clear turn buffers if logging wasn't completed (e.g., no final transcription)
|
| 402 |
+
if len(self.turn_audio_buffer) > 0 or len(self.turn_transcription_buffer) > 0:
|
| 403 |
+
logger.debug(
|
| 404 |
+
"[AudioLogger] Clearing turn audio and transcription buffers due to VAD user stopped speaking"
|
| 405 |
+
)
|
| 406 |
+
self.turn_audio_buffer = []
|
| 407 |
+
self.turn_transcription_buffer = []
|
| 408 |
+
|
| 409 |
+
def stage_user_audio(
|
| 410 |
+
self,
|
| 411 |
+
timestamp_now: datetime,
|
| 412 |
+
transcription: str,
|
| 413 |
+
sample_rate: int = 16000,
|
| 414 |
+
num_channels: int = 1,
|
| 415 |
+
is_first_frame: bool = False,
|
| 416 |
+
is_backchannel: bool = False,
|
| 417 |
+
additional_metadata: Optional[dict] = None,
|
| 418 |
+
) -> Optional[dict]:
|
| 419 |
+
"""
|
| 420 |
+
Stage user audio metadata and transcription (from STT).
|
| 421 |
+
This data will be saved when the turn is complete by `save_user_audio` method.
|
| 422 |
+
Audio data is retrieved from continuous_user_audio_buffer based on timestamps.
|
| 423 |
+
|
| 424 |
+
Args:
|
| 425 |
+
timestamp_now: Timestamp when the audio was received
|
| 426 |
+
transcription: Transcribed text
|
| 427 |
+
sample_rate: Audio sample rate in Hz (default: 16000)
|
| 428 |
+
num_channels: Number of audio channels (default: 1)
|
| 429 |
+
is_first_frame: Whether this is the first frame of a turn (default: False)
|
| 430 |
+
is_backchannel: Whether this is a backchannel utterance (default: False)
|
| 431 |
+
additional_metadata: Additional metadata to include
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
Dictionary with logged file paths, or None if logging is disabled
|
| 435 |
+
"""
|
| 436 |
+
if not self.enabled:
|
| 437 |
+
return None
|
| 438 |
+
|
| 439 |
+
try:
|
| 440 |
+
# Get counter and generate filenames
|
| 441 |
+
counter = self._get_next_counter("user")
|
| 442 |
+
# timestamp_now = datetime.now()
|
| 443 |
+
base_name = f"{counter:05d}_{timestamp_now.strftime('%H%M%S')}"
|
| 444 |
+
|
| 445 |
+
audio_file = self.user_dir / f"{base_name}.wav"
|
| 446 |
+
metadata_file = self.user_dir / f"{base_name}.json"
|
| 447 |
+
|
| 448 |
+
if is_first_frame or self.staged_metadata is None or "start_time" not in self.staged_metadata:
|
| 449 |
+
raw_start_time = self.get_time_from_start_of_session(timestamp=timestamp_now)
|
| 450 |
+
# Apply pre-roll: go back pre_roll_time_sec, but don't go before the last entry's end time
|
| 451 |
+
pre_roll_start = raw_start_time - self._pre_roll_time_sec
|
| 452 |
+
if self.session_metadata["user_entries"]:
|
| 453 |
+
last_entry_end_time = self.session_metadata["user_entries"][-1]["end_time"]
|
| 454 |
+
_start_time = max(pre_roll_start, last_entry_end_time)
|
| 455 |
+
else:
|
| 456 |
+
# No previous entries, just ensure we don't go negative
|
| 457 |
+
_start_time = max(pre_roll_start, 0.0)
|
| 458 |
+
else:
|
| 459 |
+
# start_time is stored as float (seconds from session start), not ISO string
|
| 460 |
+
_start_time = self.staged_metadata["start_time"]
|
| 461 |
+
|
| 462 |
+
# Make end time into float (seconds from session start)
|
| 463 |
+
_end_time = self.get_time_from_start_of_session(timestamp=datetime.now())
|
| 464 |
+
audio_duration_sec = round(_end_time - _start_time, self._round_precision)
|
| 465 |
+
|
| 466 |
+
# Prepare metadata (initialize if None to allow update)
|
| 467 |
+
if self.staged_metadata is None:
|
| 468 |
+
self.staged_metadata = {}
|
| 469 |
+
self.staged_metadata.update(
|
| 470 |
+
{
|
| 471 |
+
"base_name": base_name,
|
| 472 |
+
"counter": counter,
|
| 473 |
+
"turn_index": self._turn_index,
|
| 474 |
+
"speaker": "user",
|
| 475 |
+
"timestamp": timestamp_now.isoformat(),
|
| 476 |
+
"start_time": _start_time,
|
| 477 |
+
"end_time": _end_time,
|
| 478 |
+
"transcription": transcription,
|
| 479 |
+
"audio_file": audio_file.name,
|
| 480 |
+
"sample_rate": sample_rate,
|
| 481 |
+
"num_channels": num_channels,
|
| 482 |
+
"audio_duration_sec": audio_duration_sec,
|
| 483 |
+
"is_backchannel": is_backchannel,
|
| 484 |
+
}
|
| 485 |
+
)
|
| 486 |
+
|
| 487 |
+
if additional_metadata:
|
| 488 |
+
self.staged_metadata.update(additional_metadata)
|
| 489 |
+
|
| 490 |
+
return {
|
| 491 |
+
"audio_file": str(audio_file),
|
| 492 |
+
"metadata_file": str(metadata_file),
|
| 493 |
+
"counter": counter,
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
except Exception as e:
|
| 497 |
+
logger.error(f"Error logging user audio: {e}")
|
| 498 |
+
return None
|
| 499 |
+
|
| 500 |
+
def stage_turn_audio_and_transcription(
|
| 501 |
+
self,
|
| 502 |
+
timestamp_now: datetime,
|
| 503 |
+
is_first_frame: bool = False,
|
| 504 |
+
additional_metadata: Optional[dict] = None,
|
| 505 |
+
):
|
| 506 |
+
"""
|
| 507 |
+
Stage the complete turn audio and accumulated transcriptions.
|
| 508 |
+
|
| 509 |
+
This method is called when a final transcription is received.
|
| 510 |
+
It joins all accumulated audio and transcription chunks and stages them together.
|
| 511 |
+
|
| 512 |
+
Args:
|
| 513 |
+
timestamp_now: Timestamp when the audio was received
|
| 514 |
+
is_first_frame: Whether this is the first frame of a turn (default: False)
|
| 515 |
+
additional_metadata: Additional metadata to include (e.g., model, backend info)
|
| 516 |
+
"""
|
| 517 |
+
if not self.turn_audio_buffer or not self.turn_transcription_buffer:
|
| 518 |
+
logger.debug("[AudioLogger] No audio or transcription to stage")
|
| 519 |
+
return
|
| 520 |
+
|
| 521 |
+
try:
|
| 522 |
+
complete_transcription = "".join(self.turn_transcription_buffer)
|
| 523 |
+
|
| 524 |
+
logger.debug(
|
| 525 |
+
f"[AudioLogger] Staging a turn with: {len(self.turn_audio_buffer)} audio chunks, "
|
| 526 |
+
f"{len(self.turn_transcription_buffer)} transcription chunks"
|
| 527 |
+
)
|
| 528 |
+
|
| 529 |
+
metadata = {
|
| 530 |
+
"num_transcription_chunks": len(self.turn_transcription_buffer),
|
| 531 |
+
"num_audio_chunks": len(self.turn_audio_buffer),
|
| 532 |
+
}
|
| 533 |
+
if additional_metadata:
|
| 534 |
+
metadata.update(additional_metadata)
|
| 535 |
+
|
| 536 |
+
self.stage_user_audio(
|
| 537 |
+
timestamp_now=timestamp_now,
|
| 538 |
+
transcription=complete_transcription,
|
| 539 |
+
sample_rate=self._stereo_sample_rate,
|
| 540 |
+
num_channels=1,
|
| 541 |
+
is_first_frame=is_first_frame,
|
| 542 |
+
additional_metadata=metadata,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
logger.info(
|
| 546 |
+
f"[AudioLogger] Staged the audio and transcription for turn: '{complete_transcription[:50]}...'"
|
| 547 |
+
)
|
| 548 |
+
|
| 549 |
+
except Exception as e:
|
| 550 |
+
logger.warning(f"[AudioLogger] Failed to stage user audio: {e}")
|
| 551 |
+
|
| 552 |
+
def save_user_audio(self, is_backchannel: bool = False, float_divisor: float = 32768.0):
|
| 553 |
+
"""Save the user audio to the disk.
|
| 554 |
+
|
| 555 |
+
Args:
|
| 556 |
+
is_backchannel: Whether this audio is a backchannel utterance (default: False)
|
| 557 |
+
"""
|
| 558 |
+
# Safety check: ensure staged metadata exists and has required fields
|
| 559 |
+
if self.staged_metadata is None or "base_name" not in self.staged_metadata:
|
| 560 |
+
# This is expected - multiple TranscriptionFrames may be pushed but only one has audio staged
|
| 561 |
+
logger.debug("[AudioLogger] No staged metadata to save (this is normal for multiple frame pushes)")
|
| 562 |
+
return
|
| 563 |
+
|
| 564 |
+
try:
|
| 565 |
+
# Add backchannel metadata (only set if not already True to preserve turn-taking detection)
|
| 566 |
+
if is_backchannel or not self.staged_metadata.get("is_backchannel", False):
|
| 567 |
+
self.staged_metadata["is_backchannel"] = is_backchannel
|
| 568 |
+
|
| 569 |
+
audio_file = self.user_dir / f"{self.staged_metadata['base_name']}.wav"
|
| 570 |
+
metadata_file = self.user_dir / f"{self.staged_metadata['base_name']}.json"
|
| 571 |
+
|
| 572 |
+
# Get the audio data from continuous user audio buffer
|
| 573 |
+
stt, end = self.staged_metadata["start_time"], self.staged_metadata["end_time"]
|
| 574 |
+
continuous_audio_bytes = b"".join(self.continuous_user_audio_buffer)
|
| 575 |
+
full_audio_array = np.frombuffer(continuous_audio_bytes, dtype=np.int16).astype(np.float32) / float_divisor
|
| 576 |
+
start_idx = int(stt * self._stereo_sample_rate)
|
| 577 |
+
end_idx = int(end * self._stereo_sample_rate)
|
| 578 |
+
staged_audio_data = full_audio_array[start_idx:end_idx]
|
| 579 |
+
|
| 580 |
+
self._save_audio_wav(
|
| 581 |
+
audio_data=staged_audio_data,
|
| 582 |
+
file_path=audio_file,
|
| 583 |
+
sample_rate=self.staged_metadata["sample_rate"],
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
self._save_metadata_json(metadata=self.staged_metadata, file_path=metadata_file)
|
| 587 |
+
backchannel_label = " [BACKCHANNEL]" if is_backchannel else ""
|
| 588 |
+
transcription_preview = self.staged_metadata['transcription'][:50]
|
| 589 |
+
ellipsis = '...' if len(self.staged_metadata['transcription']) > 50 else ''
|
| 590 |
+
logger.info(
|
| 591 |
+
f"[AudioLogger] Saved user audio #{self.staged_metadata['counter']}"
|
| 592 |
+
f"{backchannel_label}: '{transcription_preview}{ellipsis}'"
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
# Note: User audio for stereo conversation is handled via continuous_user_audio_buffer
|
| 596 |
+
# which is populated in append_continuous_user_audio() (not affected by VAD)
|
| 597 |
+
|
| 598 |
+
# Update session metadata
|
| 599 |
+
with self._lock:
|
| 600 |
+
self.session_metadata["user_entries"].append(self.staged_metadata)
|
| 601 |
+
self._save_session_metadata()
|
| 602 |
+
|
| 603 |
+
self.clear_user_audio_buffer()
|
| 604 |
+
|
| 605 |
+
# Clear staged data after successful save
|
| 606 |
+
self.staged_metadata = None
|
| 607 |
+
self._staged_audio_data = None
|
| 608 |
+
except Exception as e:
|
| 609 |
+
logger.error(f"[AudioLogger] Error saving user audio: {e}")
|
| 610 |
+
raise
|
| 611 |
+
|
| 612 |
+
def log_agent_audio(
|
| 613 |
+
self,
|
| 614 |
+
audio_data: Union[bytes, np.ndarray],
|
| 615 |
+
text: str,
|
| 616 |
+
sample_rate: int = 22050,
|
| 617 |
+
num_channels: int = 1,
|
| 618 |
+
additional_metadata: Optional[dict] = None,
|
| 619 |
+
tts_generation_time: Optional[float] = None,
|
| 620 |
+
) -> Optional[dict]:
|
| 621 |
+
"""
|
| 622 |
+
Log agent audio and text (from TTS).
|
| 623 |
+
|
| 624 |
+
Args:
|
| 625 |
+
audio_data: Generated audio data as bytes or numpy array
|
| 626 |
+
text: Input text that was synthesized
|
| 627 |
+
sample_rate: Audio sample rate in Hz (default: 22050)
|
| 628 |
+
num_channels: Number of audio channels (default: 1)
|
| 629 |
+
additional_metadata: Additional metadata to include
|
| 630 |
+
tts_generation_time: Time when TTS generation started (seconds from session start).
|
| 631 |
+
Used to calculate actual start_time for first segment of a turn.
|
| 632 |
+
|
| 633 |
+
Returns:
|
| 634 |
+
Dictionary with logged file paths, or None if logging is disabled
|
| 635 |
+
"""
|
| 636 |
+
if not self.enabled:
|
| 637 |
+
return None
|
| 638 |
+
|
| 639 |
+
try:
|
| 640 |
+
# Get counter and generate filenames
|
| 641 |
+
counter = self._get_next_counter("agent")
|
| 642 |
+
timestamp_now = datetime.now()
|
| 643 |
+
base_name = f"{counter:05d}_{timestamp_now.strftime('%H%M%S')}"
|
| 644 |
+
|
| 645 |
+
audio_file = self.agent_dir / f"{base_name}.wav"
|
| 646 |
+
metadata_file = self.agent_dir / f"{base_name}.json"
|
| 647 |
+
|
| 648 |
+
# Save audio
|
| 649 |
+
self._save_audio_wav(audio_data, audio_file, sample_rate, num_channels)
|
| 650 |
+
|
| 651 |
+
# Calculate audio duration
|
| 652 |
+
audio_duration_sec = (
|
| 653 |
+
len(audio_data) / (sample_rate * num_channels * 2)
|
| 654 |
+
if isinstance(audio_data, bytes)
|
| 655 |
+
else len(audio_data) / sample_rate
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# Determine start_time based on previous segment in the same turn
|
| 659 |
+
# If this is the first segment of the turn, use tts_generation_time
|
| 660 |
+
# Otherwise, use the previous segment's end_time for sequential playback
|
| 661 |
+
start_time = None
|
| 662 |
+
with self._lock:
|
| 663 |
+
agent_entries = self.session_metadata["agent_entries"]
|
| 664 |
+
# agent_entries is a list of turns, each turn is a list of segments
|
| 665 |
+
if agent_entries and agent_entries[-1]: # If there's a current turn with segments
|
| 666 |
+
last_segment = agent_entries[-1][-1] # Last segment of last turn
|
| 667 |
+
if last_segment["turn_index"] == self._turn_index:
|
| 668 |
+
# Same turn - start after previous segment ends
|
| 669 |
+
start_time = last_segment["end_time"]
|
| 670 |
+
|
| 671 |
+
if start_time is None:
|
| 672 |
+
# First segment of the turn - use agent_turn_start_time (from BotStartedSpeakingFrame)
|
| 673 |
+
# This is more accurate than tts_generation_time as it reflects actual playback start
|
| 674 |
+
if self._agent_turn_start_time is not None:
|
| 675 |
+
start_time = self._agent_turn_start_time
|
| 676 |
+
elif tts_generation_time is not None:
|
| 677 |
+
# Fallback to tts_generation_time if agent_turn_start_time not set
|
| 678 |
+
start_time = tts_generation_time
|
| 679 |
+
else:
|
| 680 |
+
start_time = self.get_time_from_start_of_session(timestamp=timestamp_now)
|
| 681 |
+
|
| 682 |
+
end_time = start_time + audio_duration_sec
|
| 683 |
+
|
| 684 |
+
# Prepare metadata
|
| 685 |
+
# cutoff_time is None by default (no interruption)
|
| 686 |
+
# It will be set by set_agent_cutoff_time() if TTS is interrupted
|
| 687 |
+
metadata = {
|
| 688 |
+
"base_name": base_name,
|
| 689 |
+
"counter": counter,
|
| 690 |
+
"turn_index": self._turn_index,
|
| 691 |
+
"speaker": "agent",
|
| 692 |
+
"timestamp": timestamp_now.isoformat(),
|
| 693 |
+
"start_time": round(start_time, self._round_precision),
|
| 694 |
+
"end_time": round(end_time, self._round_precision),
|
| 695 |
+
"cutoff_time": None, # None means not interrupted; float if interrupted
|
| 696 |
+
"text": text,
|
| 697 |
+
"audio_file": audio_file.name,
|
| 698 |
+
"sample_rate": sample_rate,
|
| 699 |
+
"num_channels": num_channels,
|
| 700 |
+
"audio_duration_sec": round(audio_duration_sec, self._round_precision),
|
| 701 |
+
}
|
| 702 |
+
|
| 703 |
+
if additional_metadata:
|
| 704 |
+
metadata.update(additional_metadata)
|
| 705 |
+
|
| 706 |
+
# Save metadata
|
| 707 |
+
self._save_metadata_json(metadata, metadata_file)
|
| 708 |
+
|
| 709 |
+
# Append to stereo conversation (left channel = agent)
|
| 710 |
+
self._append_to_stereo_conversation(
|
| 711 |
+
audio_data=audio_data,
|
| 712 |
+
channel="left",
|
| 713 |
+
start_time=start_time,
|
| 714 |
+
sample_rate=sample_rate,
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
# Update session metadata
|
| 718 |
+
# agent_entries is a list of turns, each turn is a list of segments
|
| 719 |
+
with self._lock:
|
| 720 |
+
agent_entries = self.session_metadata["agent_entries"]
|
| 721 |
+
# Check if we need to start a new turn or append to existing turn
|
| 722 |
+
if not agent_entries or agent_entries[-1][-1]["turn_index"] != self._turn_index:
|
| 723 |
+
# Start a new turn (new sublist)
|
| 724 |
+
agent_entries.append([metadata])
|
| 725 |
+
else:
|
| 726 |
+
# Append to current turn
|
| 727 |
+
agent_entries[-1].append(metadata)
|
| 728 |
+
self._save_session_metadata()
|
| 729 |
+
|
| 730 |
+
logger.info(f"[AudioLogger] Logged agent audio #{counter}: '{text[:50]}{'...' if len(text) > 50 else ''}'")
|
| 731 |
+
|
| 732 |
+
return {
|
| 733 |
+
"audio_file": str(audio_file),
|
| 734 |
+
"metadata_file": str(metadata_file),
|
| 735 |
+
"counter": counter,
|
| 736 |
+
}
|
| 737 |
+
|
| 738 |
+
except Exception as e:
|
| 739 |
+
logger.error(f"[AudioLogger] Error logging agent audio: {e}")
|
| 740 |
+
return None
|
| 741 |
+
|
| 742 |
+
def set_agent_cutoff_time(self, cutoff_time: Optional[float] = None):
|
| 743 |
+
"""
|
| 744 |
+
Set the cutoff time for the most recent agent audio entry.
|
| 745 |
+
|
| 746 |
+
This method should be called when TTS is interrupted by user speech.
|
| 747 |
+
The cutoff_time represents when the agent audio was actually cut off,
|
| 748 |
+
which may be earlier than the natural end_time.
|
| 749 |
+
|
| 750 |
+
Args:
|
| 751 |
+
cutoff_time: The cutoff time in seconds from session start.
|
| 752 |
+
If None, uses current time from session start.
|
| 753 |
+
"""
|
| 754 |
+
if not self.enabled:
|
| 755 |
+
return
|
| 756 |
+
|
| 757 |
+
if cutoff_time is None:
|
| 758 |
+
cutoff_time = self.get_time_from_start_of_session()
|
| 759 |
+
|
| 760 |
+
with self._lock:
|
| 761 |
+
agent_entries = self.session_metadata["agent_entries"]
|
| 762 |
+
if not agent_entries or not agent_entries[-1]:
|
| 763 |
+
logger.warning("[AudioLogger] No agent entries to set cutoff time")
|
| 764 |
+
return
|
| 765 |
+
|
| 766 |
+
# Get the current turn (last sublist) and update ALL segments in it
|
| 767 |
+
current_turn = agent_entries[-1]
|
| 768 |
+
turn_index = current_turn[0]["turn_index"]
|
| 769 |
+
|
| 770 |
+
# Update cutoff_time for ALL segments in the current turn
|
| 771 |
+
for segment in current_turn:
|
| 772 |
+
segment["cutoff_time"] = cutoff_time
|
| 773 |
+
# Also update individual JSON files
|
| 774 |
+
try:
|
| 775 |
+
metadata_file = self.agent_dir / f"{segment['base_name']}.json"
|
| 776 |
+
self._save_metadata_json(segment, metadata_file)
|
| 777 |
+
except Exception as e:
|
| 778 |
+
logger.error(f"[AudioLogger] Error updating agent cutoff time for segment: {e}")
|
| 779 |
+
|
| 780 |
+
# Truncate the stereo buffer (left channel = agent) at the cutoff point
|
| 781 |
+
cutoff_sample = int(cutoff_time * self._stereo_sample_rate)
|
| 782 |
+
if cutoff_sample < len(self._stereo_audio_buffer_left):
|
| 783 |
+
# Zero out agent audio after cutoff point
|
| 784 |
+
for i in range(cutoff_sample, len(self._stereo_audio_buffer_left)):
|
| 785 |
+
self._stereo_audio_buffer_left[i] = 0.0
|
| 786 |
+
logger.debug(
|
| 787 |
+
f"[AudioLogger] Truncated agent stereo buffer at sample {cutoff_sample} "
|
| 788 |
+
f"(cutoff_time={cutoff_time:.3f}s)"
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
logger.info(
|
| 792 |
+
f"[AudioLogger] Set cutoff_time={cutoff_time:.3f}s for turn {turn_index} "
|
| 793 |
+
f"({len(current_turn)} segments)"
|
| 794 |
+
)
|
| 795 |
+
|
| 796 |
+
# Save updated session metadata
|
| 797 |
+
self._save_session_metadata()
|
| 798 |
+
|
| 799 |
+
def _save_session_metadata(self):
|
| 800 |
+
"""Save the session metadata to disk."""
|
| 801 |
+
if not self.enabled:
|
| 802 |
+
return
|
| 803 |
+
|
| 804 |
+
try:
|
| 805 |
+
metadata_file = self.session_dir / "session_metadata.json"
|
| 806 |
+
self.session_metadata["last_updated"] = datetime.now().isoformat()
|
| 807 |
+
self._save_metadata_json(self.session_metadata, metadata_file)
|
| 808 |
+
except Exception as e:
|
| 809 |
+
logger.error(f"[AudioLogger] Error saving session metadata: {e}")
|
| 810 |
+
|
| 811 |
+
def finalize_session(self):
|
| 812 |
+
"""Finalize the session and save final metadata."""
|
| 813 |
+
if not self.enabled:
|
| 814 |
+
return
|
| 815 |
+
|
| 816 |
+
# Save stereo conversation before finalizing
|
| 817 |
+
self.save_stereo_conversation()
|
| 818 |
+
|
| 819 |
+
self.session_metadata["end_time"] = datetime.now().isoformat()
|
| 820 |
+
self.session_metadata["total_user_entries"] = self._user_counter
|
| 821 |
+
self.session_metadata["total_agent_segments"] = self._agent_counter
|
| 822 |
+
self.session_metadata["total_agent_turns"] = len(self.session_metadata["agent_entries"])
|
| 823 |
+
self._save_session_metadata()
|
| 824 |
+
logger.info(
|
| 825 |
+
f"[AudioLogger] Session finalized: {self.session_id} "
|
| 826 |
+
f"(User: {self._user_counter}, Agent: {self._agent_counter} segments in "
|
| 827 |
+
f"{len(self.session_metadata['agent_entries'])} turns)"
|
| 828 |
+
)
|
| 829 |
+
|
| 830 |
+
|
| 831 |
+
class RTVIAudioLoggerObserver(BaseObserver):
|
| 832 |
+
"""Observer that triggers audio logging when TranscriptionFrame is pushed."""
|
| 833 |
+
|
| 834 |
+
def __init__(self, audio_logger: AudioLogger):
|
| 835 |
+
super().__init__()
|
| 836 |
+
self._audio_logger = audio_logger
|
| 837 |
+
|
| 838 |
+
async def on_push_frame(self, data: FramePushed):
|
| 839 |
+
"""Handle frame push events and save user audio on TranscriptionFrame."""
|
| 840 |
+
frame = data.frame
|
| 841 |
+
if isinstance(frame, TranscriptionFrame) and self._audio_logger:
|
| 842 |
+
self._audio_logger.save_user_audio()
|
| 843 |
+
# Call parent class's on_push_frame method
|
| 844 |
+
await super().on_push_frame(data)
|
nemo/agents/voice_agent/pipecat/services/nemo/diar.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import asyncio
|
| 17 |
+
from typing import AsyncGenerator, Optional
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
from loguru import logger
|
| 21 |
+
from pipecat.frames.frames import (
|
| 22 |
+
CancelFrame,
|
| 23 |
+
EndFrame,
|
| 24 |
+
ErrorFrame,
|
| 25 |
+
Frame,
|
| 26 |
+
StartFrame,
|
| 27 |
+
VADUserStartedSpeakingFrame,
|
| 28 |
+
VADUserStoppedSpeakingFrame,
|
| 29 |
+
)
|
| 30 |
+
from pipecat.processors.frame_processor import FrameDirection
|
| 31 |
+
from pipecat.services.stt_service import STTService
|
| 32 |
+
from pipecat.transcriptions.language import Language
|
| 33 |
+
from pipecat.utils.time import time_now_iso8601
|
| 34 |
+
from pipecat.utils.tracing.service_decorators import traced_stt
|
| 35 |
+
from pydantic import BaseModel
|
| 36 |
+
|
| 37 |
+
from nemo.agents.voice_agent.pipecat.frames.frames import DiarResultFrame
|
| 38 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.streaming_diar import DiarizationConfig, NeMoStreamingDiarService
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class NeMoDiarInputParams(BaseModel):
|
| 42 |
+
threshold: Optional[float] = (
|
| 43 |
+
0.4 # threshold value used to determine if a speaker exists or not, setting it to a lower value will increase the sensitivity of the diarization model
|
| 44 |
+
)
|
| 45 |
+
language: Optional[Language] = Language.EN_US
|
| 46 |
+
frame_len_in_secs: Optional[float] = 0.08 # 80ms for FastConformer model
|
| 47 |
+
config_path: Optional[str] = None # path to the Niva ASR config file
|
| 48 |
+
raw_audio_frame_len_in_secs: Optional[float] = 0.016 # 16ms for websocket transport
|
| 49 |
+
buffer_size: Optional[int] = (
|
| 50 |
+
30 # number of audio frames to buffer, 1 frame is 16ms, streaming Sortformer was trained with 6*0.08=0.48s chunks
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class NemoDiarService(STTService):
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
*,
|
| 58 |
+
model: Optional[str] = "",
|
| 59 |
+
device: Optional[str] = "cuda:0",
|
| 60 |
+
sample_rate: Optional[int] = 16000,
|
| 61 |
+
params: Optional[NeMoDiarInputParams] = None,
|
| 62 |
+
use_vad: bool = True,
|
| 63 |
+
audio_passthrough: bool = True,
|
| 64 |
+
backend: Optional[str] = "legacy",
|
| 65 |
+
enabled: bool = True,
|
| 66 |
+
**kwargs,
|
| 67 |
+
):
|
| 68 |
+
super().__init__(audio_passthrough=audio_passthrough, **kwargs)
|
| 69 |
+
|
| 70 |
+
self._enabled = enabled
|
| 71 |
+
self._queue = asyncio.Queue()
|
| 72 |
+
self._response_queue = asyncio.Queue() # Add response queue
|
| 73 |
+
self._processing_task = None # Add processing task
|
| 74 |
+
self._response_task = None # Add response task
|
| 75 |
+
self._device = device
|
| 76 |
+
self._sample_rate = sample_rate
|
| 77 |
+
self._audio_passthrough = audio_passthrough
|
| 78 |
+
params.buffer_size = params.frame_len_in_secs // params.raw_audio_frame_len_in_secs
|
| 79 |
+
self._params = params
|
| 80 |
+
self._model_name = model
|
| 81 |
+
self._use_vad = use_vad
|
| 82 |
+
self._backend = backend
|
| 83 |
+
if not params:
|
| 84 |
+
raise ValueError("params is required")
|
| 85 |
+
|
| 86 |
+
self._load_model()
|
| 87 |
+
|
| 88 |
+
self._vad_user_speaking = False
|
| 89 |
+
self._audio_buffer = []
|
| 90 |
+
self._current_speaker_id = None
|
| 91 |
+
self._processing_running = False
|
| 92 |
+
|
| 93 |
+
if not self._use_vad:
|
| 94 |
+
self._vad_user_speaking = True
|
| 95 |
+
|
| 96 |
+
def _load_model(self):
|
| 97 |
+
if not self._enabled or not self._model_name:
|
| 98 |
+
self._model = None
|
| 99 |
+
self._enabled = False
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
if self._backend == "legacy":
|
| 103 |
+
cfg = DiarizationConfig()
|
| 104 |
+
cfg.device = self._device
|
| 105 |
+
self._model = NeMoStreamingDiarService(
|
| 106 |
+
cfg, self._model_name, frame_len_in_secs=self._params.frame_len_in_secs, sample_rate=self.sample_rate
|
| 107 |
+
)
|
| 108 |
+
else:
|
| 109 |
+
raise ValueError(f"Invalid backend: {self._backend}")
|
| 110 |
+
logger.info(f"Diarization service initialized on device: {self._device}")
|
| 111 |
+
|
| 112 |
+
def can_generate_metrics(self) -> bool:
|
| 113 |
+
"""Indicates whether this service can generate metrics.
|
| 114 |
+
|
| 115 |
+
Returns:
|
| 116 |
+
bool: True, as this service supports metric generation.
|
| 117 |
+
"""
|
| 118 |
+
return True
|
| 119 |
+
|
| 120 |
+
async def start(self, frame: StartFrame):
|
| 121 |
+
"""Handle service start."""
|
| 122 |
+
await super().start(frame)
|
| 123 |
+
|
| 124 |
+
# Initialize the model if not already done
|
| 125 |
+
if not hasattr(self, "_model"):
|
| 126 |
+
self._load_model()
|
| 127 |
+
|
| 128 |
+
# Start background processing task
|
| 129 |
+
if not self._processing_task:
|
| 130 |
+
self._processing_task = self.create_task(self._processing_task_handler())
|
| 131 |
+
|
| 132 |
+
# Start response handling task
|
| 133 |
+
if not self._response_task:
|
| 134 |
+
self._response_task = self.create_task(self._response_task_handler())
|
| 135 |
+
|
| 136 |
+
async def stop(self, frame: EndFrame):
|
| 137 |
+
"""Handle service stop."""
|
| 138 |
+
await super().stop(frame)
|
| 139 |
+
await self._stop_tasks()
|
| 140 |
+
|
| 141 |
+
async def cancel(self, frame: CancelFrame):
|
| 142 |
+
"""Handle service cancellation."""
|
| 143 |
+
await super().cancel(frame)
|
| 144 |
+
await self._stop_tasks()
|
| 145 |
+
|
| 146 |
+
async def _stop_tasks(self):
|
| 147 |
+
"""Stop background processing tasks."""
|
| 148 |
+
await self._queue.put(None) # Signal to stop processing
|
| 149 |
+
if self._processing_task:
|
| 150 |
+
await self.cancel_task(self._processing_task)
|
| 151 |
+
self._processing_task = None
|
| 152 |
+
|
| 153 |
+
if self._response_task:
|
| 154 |
+
await self.cancel_task(self._response_task)
|
| 155 |
+
self._response_task = None
|
| 156 |
+
|
| 157 |
+
def _diarization_processor(self):
|
| 158 |
+
"""Background processor that handles diarization calls."""
|
| 159 |
+
try:
|
| 160 |
+
while self._processing_running:
|
| 161 |
+
try:
|
| 162 |
+
# Get audio from queue - blocking call that will be interrupted by cancellation
|
| 163 |
+
future = asyncio.run_coroutine_threadsafe(self._queue.get(), self.get_event_loop())
|
| 164 |
+
audio = future.result()
|
| 165 |
+
|
| 166 |
+
if audio is None: # Stop signal
|
| 167 |
+
logger.debug("Received stop signal in background processor")
|
| 168 |
+
break
|
| 169 |
+
|
| 170 |
+
# Process diarization
|
| 171 |
+
diar_result = self._model.diarize(audio)
|
| 172 |
+
|
| 173 |
+
# Send result back to async loop
|
| 174 |
+
asyncio.run_coroutine_threadsafe(self._response_queue.put(diar_result), self.get_event_loop())
|
| 175 |
+
|
| 176 |
+
except Exception as e:
|
| 177 |
+
logger.error(f"Error in background diarization processor: {e}")
|
| 178 |
+
# Send error back to async loop
|
| 179 |
+
asyncio.run_coroutine_threadsafe(self._response_queue.put(('error', e)), self.get_event_loop())
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.error(f"Background diarization processor fatal error: {e}")
|
| 183 |
+
finally:
|
| 184 |
+
logger.debug("Background diarization processor stopped")
|
| 185 |
+
|
| 186 |
+
async def _processing_task_handler(self):
|
| 187 |
+
"""Handler for background processing task."""
|
| 188 |
+
try:
|
| 189 |
+
self._processing_running = True
|
| 190 |
+
logger.debug("Starting background processing task")
|
| 191 |
+
await asyncio.to_thread(self._diarization_processor)
|
| 192 |
+
except asyncio.CancelledError:
|
| 193 |
+
logger.debug("Background processing task cancelled")
|
| 194 |
+
self._processing_running = False
|
| 195 |
+
raise
|
| 196 |
+
finally:
|
| 197 |
+
self._processing_running = False
|
| 198 |
+
|
| 199 |
+
async def _handle_diarization_result(self, diar_result):
|
| 200 |
+
"""Handle diarization result from background processing."""
|
| 201 |
+
try:
|
| 202 |
+
if diar_result is None:
|
| 203 |
+
return
|
| 204 |
+
dominant_speaker_id = self._get_dominant_speaker_id(diar_result)
|
| 205 |
+
# logger.debug(f"Dominant speaker ID: {dominant_speaker_id}")
|
| 206 |
+
if dominant_speaker_id is not None and dominant_speaker_id != self._current_speaker_id:
|
| 207 |
+
self._current_speaker_id = dominant_speaker_id
|
| 208 |
+
logger.debug(f"Pushing DiarResultFrame with speaker {dominant_speaker_id}")
|
| 209 |
+
await self.push_frame(DiarResultFrame(dominant_speaker_id, stream_id="default"))
|
| 210 |
+
except Exception as e:
|
| 211 |
+
logger.error(f"Error handling diarization result: {e}")
|
| 212 |
+
await self.push_frame(
|
| 213 |
+
ErrorFrame(
|
| 214 |
+
str(e),
|
| 215 |
+
time_now_iso8601(),
|
| 216 |
+
)
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
async def _response_task_handler(self):
|
| 220 |
+
"""Handler for processing diarization results."""
|
| 221 |
+
logger.debug("Response task handler started")
|
| 222 |
+
try:
|
| 223 |
+
while True:
|
| 224 |
+
try:
|
| 225 |
+
result = await self._response_queue.get()
|
| 226 |
+
|
| 227 |
+
if isinstance(result, tuple) and result[0] == 'error':
|
| 228 |
+
# Handle error from background processing
|
| 229 |
+
error = result[1]
|
| 230 |
+
logger.error(f"Error in NeMo Diarization processing: {error}")
|
| 231 |
+
await self.push_frame(
|
| 232 |
+
ErrorFrame(
|
| 233 |
+
str(error),
|
| 234 |
+
time_now_iso8601(),
|
| 235 |
+
)
|
| 236 |
+
)
|
| 237 |
+
else:
|
| 238 |
+
# Handle successful diarization result
|
| 239 |
+
await self._handle_diarization_result(result)
|
| 240 |
+
|
| 241 |
+
except Exception as e:
|
| 242 |
+
logger.error(f"Error in response task handler: {e}")
|
| 243 |
+
except asyncio.CancelledError:
|
| 244 |
+
logger.debug("Response task handler cancelled")
|
| 245 |
+
raise
|
| 246 |
+
|
| 247 |
+
async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
|
| 248 |
+
"""Process audio data and generate transcription frames.
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
audio: Raw audio bytes to transcribe
|
| 252 |
+
|
| 253 |
+
Yields:
|
| 254 |
+
Frame: Transcription frames containing the results
|
| 255 |
+
"""
|
| 256 |
+
if self._vad_user_speaking and self._enabled:
|
| 257 |
+
self._audio_buffer.append(audio)
|
| 258 |
+
if len(self._audio_buffer) >= self._params.buffer_size:
|
| 259 |
+
await self.start_ttfb_metrics()
|
| 260 |
+
await self.start_processing_metrics()
|
| 261 |
+
audio = b"".join(self._audio_buffer)
|
| 262 |
+
self._audio_buffer = []
|
| 263 |
+
# Queue audio for background processing
|
| 264 |
+
await self._queue.put(audio)
|
| 265 |
+
yield None
|
| 266 |
+
|
| 267 |
+
@traced_stt
|
| 268 |
+
async def _handle_transcription(self, transcript: str, is_final: bool, language: Optional[str] = None):
|
| 269 |
+
"""Handle a transcription result.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
transcript: The transcribed text
|
| 273 |
+
is_final: Whether this is a final transcription
|
| 274 |
+
language: The language of the transcription
|
| 275 |
+
"""
|
| 276 |
+
pass # Base implementation - can be extended for specific handling needs
|
| 277 |
+
|
| 278 |
+
async def set_language(self, language: Language):
|
| 279 |
+
"""Update the service's recognition language.
|
| 280 |
+
|
| 281 |
+
Args:
|
| 282 |
+
language: New language for recognition
|
| 283 |
+
"""
|
| 284 |
+
if self._params:
|
| 285 |
+
self._params.language = language
|
| 286 |
+
else:
|
| 287 |
+
self._params = NeMoDiarInputParams(language=language)
|
| 288 |
+
|
| 289 |
+
logger.info(f"Switching STT language to: {language}")
|
| 290 |
+
|
| 291 |
+
async def set_model(self, model: str):
|
| 292 |
+
"""Update the service's model.
|
| 293 |
+
|
| 294 |
+
Args:
|
| 295 |
+
model: New model name/path to use
|
| 296 |
+
"""
|
| 297 |
+
await super().set_model(model)
|
| 298 |
+
self._model_name = model
|
| 299 |
+
self._load_model()
|
| 300 |
+
|
| 301 |
+
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
| 302 |
+
"""Process audio data and generate transcription frames.
|
| 303 |
+
|
| 304 |
+
Args:
|
| 305 |
+
audio: Raw audio bytes to transcribe
|
| 306 |
+
|
| 307 |
+
Yields:
|
| 308 |
+
Frame: Transcription frames containing the results
|
| 309 |
+
"""
|
| 310 |
+
if not self._enabled:
|
| 311 |
+
# if diarization is disabled, just pass the frame through
|
| 312 |
+
await self.push_frame(frame, direction)
|
| 313 |
+
return
|
| 314 |
+
|
| 315 |
+
await super().process_frame(frame, direction)
|
| 316 |
+
if isinstance(frame, VADUserStartedSpeakingFrame):
|
| 317 |
+
self._vad_user_speaking = True
|
| 318 |
+
self._audio_buffer = []
|
| 319 |
+
logger.debug("VADUserStartedSpeakingFrame received")
|
| 320 |
+
elif isinstance(frame, VADUserStoppedSpeakingFrame):
|
| 321 |
+
self._vad_user_speaking = False
|
| 322 |
+
logger.debug("VADUserStoppedSpeakingFrame received")
|
| 323 |
+
self._current_speaker_id = None
|
| 324 |
+
self._audio_buffer = []
|
| 325 |
+
|
| 326 |
+
def reset(self):
|
| 327 |
+
"""Reset the diarization service."""
|
| 328 |
+
self._current_speaker_id = None
|
| 329 |
+
self._audio_buffer = []
|
| 330 |
+
self._vad_user_speaking = False
|
| 331 |
+
self._model.reset_state()
|
| 332 |
+
|
| 333 |
+
def _get_dominant_speaker_id(self, spk_pred: np.ndarray):
|
| 334 |
+
spk_pred = (spk_pred > self._params.threshold).astype(int)
|
| 335 |
+
dominant_speaker_id = None
|
| 336 |
+
if spk_pred.sum() > 0:
|
| 337 |
+
# get the dominant speaker id
|
| 338 |
+
# Filter to only keep frames that have any speaker probability > 0.0
|
| 339 |
+
valid_frame_mask = spk_pred.sum(axis=1) > 0
|
| 340 |
+
|
| 341 |
+
# Filter diar_result to only keep valid frames
|
| 342 |
+
filtered_diar_result = spk_pred[valid_frame_mask] # ndarray of shape [num_valid_frames, num_speakers]
|
| 343 |
+
|
| 344 |
+
# Get the primary speaker for each valid frame
|
| 345 |
+
primary_spk = np.argmax(filtered_diar_result, axis=1) # ndarray of shape [num_valid_frames]
|
| 346 |
+
# logger.debug(f"Primary speaker for valid frames: {primary_spk}")
|
| 347 |
+
|
| 348 |
+
# count the number of different speakers in the primary speaker sequence
|
| 349 |
+
num_speakers = len(np.unique(primary_spk))
|
| 350 |
+
# logger.debug(f"Number of different speakers: {num_speakers}")
|
| 351 |
+
|
| 352 |
+
# If there are multiple speakers, get the dominant one
|
| 353 |
+
if num_speakers > 1:
|
| 354 |
+
# Count occurrences of each speaker
|
| 355 |
+
speaker_counts = np.bincount(primary_spk)
|
| 356 |
+
dominant_speaker_id = np.argmax(speaker_counts)
|
| 357 |
+
else:
|
| 358 |
+
# Only one speaker, return that speaker ID
|
| 359 |
+
dominant_speaker_id = primary_spk[0]
|
| 360 |
+
return dominant_speaker_id
|
nemo/agents/voice_agent/pipecat/services/nemo/llm.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
import os
|
| 17 |
+
import socket
|
| 18 |
+
import subprocess
|
| 19 |
+
import time
|
| 20 |
+
import uuid
|
| 21 |
+
from threading import Thread
|
| 22 |
+
from typing import AsyncGenerator, List, Mapping, Optional
|
| 23 |
+
|
| 24 |
+
import psutil
|
| 25 |
+
import requests
|
| 26 |
+
from jinja2.exceptions import TemplateError
|
| 27 |
+
from loguru import logger
|
| 28 |
+
from omegaconf import DictConfig, OmegaConf
|
| 29 |
+
from openai import APITimeoutError, AsyncStream, BadRequestError
|
| 30 |
+
from openai.types.chat import ChatCompletionChunk, ChatCompletionMessageParam
|
| 31 |
+
from pipecat.adapters.services.open_ai_adapter import OpenAILLMInvocationParams
|
| 32 |
+
from pipecat.frames.frames import (
|
| 33 |
+
CancelFrame,
|
| 34 |
+
EndFrame,
|
| 35 |
+
LLMFullResponseEndFrame,
|
| 36 |
+
LLMFullResponseStartFrame,
|
| 37 |
+
LLMTextFrame,
|
| 38 |
+
)
|
| 39 |
+
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
|
| 40 |
+
from pipecat.services.openai.llm import OpenAILLMService
|
| 41 |
+
from transformers import AsyncTextIteratorStreamer, AutoModelForCausalLM, AutoTokenizer
|
| 42 |
+
from vllm.config import ModelConfig as vllmModelConfig
|
| 43 |
+
|
| 44 |
+
DEFAULT_GENERATION_KWARGS = {
|
| 45 |
+
"max_new_tokens": 256,
|
| 46 |
+
"temperature": 0.7,
|
| 47 |
+
"top_p": 0.9,
|
| 48 |
+
"do_sample": True,
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class LLMUtilsMixin:
|
| 53 |
+
"""Utils for local LLM services."""
|
| 54 |
+
|
| 55 |
+
def _maybe_add_user_message(self, messages: List[ChatCompletionMessageParam]) -> List[ChatCompletionMessageParam]:
|
| 56 |
+
"""
|
| 57 |
+
Some LLMs like "nvidia/Llama-3.1-Nemotron-Nano-8B-v1" requires a user turn after the system prompt,
|
| 58 |
+
this function is used to add a dummy user turn if the system prompt is followed by an assistant turn.
|
| 59 |
+
"""
|
| 60 |
+
if len(messages) > 1 and messages[0]["role"] == "system" and messages[1]["role"] == "assistant":
|
| 61 |
+
message = {"role": "user", "content": "Hi"}
|
| 62 |
+
messages.insert(1, message)
|
| 63 |
+
elif len(messages) == 1 and messages[0]["role"] == "system":
|
| 64 |
+
messages.append({"role": "user", "content": "Hi"})
|
| 65 |
+
return messages
|
| 66 |
+
|
| 67 |
+
def _maybe_merge_consecutive_user_turns(
|
| 68 |
+
self, messages: List[ChatCompletionMessageParam]
|
| 69 |
+
) -> List[ChatCompletionMessageParam]:
|
| 70 |
+
"""
|
| 71 |
+
Merge consecutive user turns into a single turn,
|
| 72 |
+
since some LLMs like "nvidia/Llama-3.1-Nemotron-Nano-8B-v1" do not support consecutive user turns.
|
| 73 |
+
"""
|
| 74 |
+
if not messages:
|
| 75 |
+
return messages
|
| 76 |
+
|
| 77 |
+
merged_messages = []
|
| 78 |
+
|
| 79 |
+
user_content = ""
|
| 80 |
+
for message in messages:
|
| 81 |
+
role = message["role"]
|
| 82 |
+
if role != "user":
|
| 83 |
+
# check if there's any preceeding user content, add them first
|
| 84 |
+
if user_content:
|
| 85 |
+
merged_messages.append({"role": "user", "content": user_content})
|
| 86 |
+
user_content = ""
|
| 87 |
+
merged_messages.append(message)
|
| 88 |
+
else:
|
| 89 |
+
if user_content:
|
| 90 |
+
user_content += "; " + message["content"]
|
| 91 |
+
else:
|
| 92 |
+
user_content = message["content"]
|
| 93 |
+
|
| 94 |
+
# add the last user content
|
| 95 |
+
if user_content:
|
| 96 |
+
merged_messages.append({"role": "user", "content": user_content})
|
| 97 |
+
|
| 98 |
+
return merged_messages
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class HuggingFaceLLMLocalService(LLMUtilsMixin):
|
| 102 |
+
"""
|
| 103 |
+
HuggingFace LLM local service.
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
def __init__(
|
| 107 |
+
self,
|
| 108 |
+
model: str = "meta-llama/Meta-Llama-3-8B-Instruct",
|
| 109 |
+
device: str = "cuda:0",
|
| 110 |
+
dtype: str = "bfloat16",
|
| 111 |
+
thinking_budget: int = 0,
|
| 112 |
+
generation_kwargs: dict = None,
|
| 113 |
+
apply_chat_template_kwargs: dict = None,
|
| 114 |
+
):
|
| 115 |
+
self.device = device
|
| 116 |
+
self.dtype = dtype
|
| 117 |
+
self.thinking_budget = thinking_budget
|
| 118 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model)
|
| 119 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
| 120 |
+
model, device_map=device, dtype=dtype, trust_remote_code=True
|
| 121 |
+
) # type: AutoModelForCausalLM
|
| 122 |
+
|
| 123 |
+
self.generation_kwargs = generation_kwargs if generation_kwargs else DEFAULT_GENERATION_KWARGS
|
| 124 |
+
logger.debug(f"LLM generation kwargs: {self.generation_kwargs}")
|
| 125 |
+
|
| 126 |
+
self.apply_chat_template_kwargs = apply_chat_template_kwargs if apply_chat_template_kwargs else {}
|
| 127 |
+
if "tokenize" in self.apply_chat_template_kwargs:
|
| 128 |
+
if self.apply_chat_template_kwargs["tokenize"] is not False:
|
| 129 |
+
logger.warning(
|
| 130 |
+
f"Found `tokenize=True` in apply_chat_template_kwargs={self.apply_chat_template_kwargs},"
|
| 131 |
+
"it will be ignored and forced to `False`"
|
| 132 |
+
)
|
| 133 |
+
self.apply_chat_template_kwargs.pop("tokenize")
|
| 134 |
+
|
| 135 |
+
logger.debug(f"LLM apply_chat_template kwargs: {self.apply_chat_template_kwargs}")
|
| 136 |
+
|
| 137 |
+
def _apply_chat_template(self, messages: List[ChatCompletionMessageParam]) -> str:
|
| 138 |
+
"""
|
| 139 |
+
Apply the chat template to the messages.
|
| 140 |
+
"""
|
| 141 |
+
return self.tokenizer.apply_chat_template(messages, tokenize=False, **self.apply_chat_template_kwargs)
|
| 142 |
+
|
| 143 |
+
def _get_prompt_from_messages(self, messages: List[ChatCompletionMessageParam]) -> str:
|
| 144 |
+
"""
|
| 145 |
+
Get the formatted prompt from the conversation history messages.
|
| 146 |
+
This function also tries to fix the messages if the LLM cannot handle consecutive turns of the same role,
|
| 147 |
+
or requires a user turn after the system prompt.
|
| 148 |
+
"""
|
| 149 |
+
try:
|
| 150 |
+
prompt = self._apply_chat_template(messages)
|
| 151 |
+
return prompt
|
| 152 |
+
except TemplateError as e:
|
| 153 |
+
logger.warning(f"Got TemplateError: {e}.")
|
| 154 |
+
|
| 155 |
+
logger.debug(f"Input LLM messages: {messages}")
|
| 156 |
+
if len(messages) > 1 and messages[0]["role"] == "system" and messages[1]["role"] == "assistant":
|
| 157 |
+
logger.warning("Trying to fix by adding dummy user message after system prompt...")
|
| 158 |
+
try:
|
| 159 |
+
messages = self._maybe_add_user_message(messages)
|
| 160 |
+
logger.debug(f"LLM messages after adding dummy user message: {messages}")
|
| 161 |
+
prompt = self._apply_chat_template(messages)
|
| 162 |
+
return prompt
|
| 163 |
+
except TemplateError as e:
|
| 164 |
+
logger.warning(f"Got TemplateError: {e}. Trying to fix by merging consecutive turns if possible.")
|
| 165 |
+
|
| 166 |
+
try:
|
| 167 |
+
new_messages = self._maybe_merge_consecutive_user_turns(messages)
|
| 168 |
+
logger.debug(f"LLM messages after merging consecutive user turns: {new_messages}")
|
| 169 |
+
prompt = self._apply_chat_template(new_messages)
|
| 170 |
+
# Update the messages in place if successful
|
| 171 |
+
messages.clear()
|
| 172 |
+
messages.extend(new_messages)
|
| 173 |
+
return prompt
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logger.warning(f"Got Exception: {e}, messages: {messages}")
|
| 176 |
+
raise e
|
| 177 |
+
|
| 178 |
+
async def generate_stream(
|
| 179 |
+
self, messages: List[ChatCompletionMessageParam], **kwargs
|
| 180 |
+
) -> AsyncGenerator[ChatCompletionChunk, None]:
|
| 181 |
+
"""
|
| 182 |
+
Generate a stream of chat completion chunks from the messages.
|
| 183 |
+
"""
|
| 184 |
+
# Convert messages to prompt format
|
| 185 |
+
prompt = self._get_prompt_from_messages(messages)
|
| 186 |
+
|
| 187 |
+
logger.debug(f"LLM prompt: {prompt}")
|
| 188 |
+
|
| 189 |
+
inputs = self.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").to(self.device)
|
| 190 |
+
|
| 191 |
+
# Generate with streaming
|
| 192 |
+
streamer = AsyncTextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True)
|
| 193 |
+
generation_kwargs = {
|
| 194 |
+
**inputs,
|
| 195 |
+
"streamer": streamer,
|
| 196 |
+
**self.generation_kwargs,
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
# Start generation in background
|
| 200 |
+
thread = Thread(
|
| 201 |
+
target=self.model.generate,
|
| 202 |
+
kwargs=generation_kwargs,
|
| 203 |
+
)
|
| 204 |
+
thread.start()
|
| 205 |
+
|
| 206 |
+
# Stream the output
|
| 207 |
+
async for text in streamer:
|
| 208 |
+
# logger.debug(f"Streamer yielded text: {text}")
|
| 209 |
+
chunk = ChatCompletionChunk(
|
| 210 |
+
id="hf-" + str(uuid.uuid4()),
|
| 211 |
+
choices=[{"delta": {"content": text}, "finish_reason": None, "index": 0}],
|
| 212 |
+
created=int(time.time()),
|
| 213 |
+
model=self.model.config._name_or_path,
|
| 214 |
+
object="chat.completion.chunk",
|
| 215 |
+
)
|
| 216 |
+
yield chunk
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class HuggingFaceLLMService(OpenAILLMService):
|
| 220 |
+
"""
|
| 221 |
+
LLM service that hosts a HuggingFace model.
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
def __init__(
|
| 225 |
+
self,
|
| 226 |
+
*,
|
| 227 |
+
model: str = "google/gemma-7b-it",
|
| 228 |
+
device: str = "cuda",
|
| 229 |
+
dtype: str = "bfloat16",
|
| 230 |
+
thinking_budget: int = 0,
|
| 231 |
+
generation_kwargs: dict = None,
|
| 232 |
+
apply_chat_template_kwargs: dict = None,
|
| 233 |
+
**kwargs,
|
| 234 |
+
):
|
| 235 |
+
self._model_name = model
|
| 236 |
+
self._device = device
|
| 237 |
+
self._dtype = dtype
|
| 238 |
+
self._thinking_budget = thinking_budget
|
| 239 |
+
self._generation_kwargs = generation_kwargs if generation_kwargs is not None else DEFAULT_GENERATION_KWARGS
|
| 240 |
+
self._apply_chat_template_kwargs = apply_chat_template_kwargs if apply_chat_template_kwargs is not None else {}
|
| 241 |
+
super().__init__(model=model, **kwargs)
|
| 242 |
+
|
| 243 |
+
def create_client(self, api_key=None, base_url=None, **kwargs):
|
| 244 |
+
"""
|
| 245 |
+
Create a HuggingFaceLLMLocalService client.
|
| 246 |
+
"""
|
| 247 |
+
return HuggingFaceLLMLocalService(
|
| 248 |
+
model=self._model_name,
|
| 249 |
+
device=self._device,
|
| 250 |
+
dtype=self._dtype,
|
| 251 |
+
thinking_budget=self._thinking_budget,
|
| 252 |
+
generation_kwargs=self._generation_kwargs,
|
| 253 |
+
apply_chat_template_kwargs=self._apply_chat_template_kwargs,
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
async def _process_context(self, context: OpenAILLMContext):
|
| 257 |
+
"""Process a context through the LLM and push text frames.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
context (OpenAILLMContext): The context to process, containing messages
|
| 261 |
+
and other information needed for the LLM interaction.
|
| 262 |
+
"""
|
| 263 |
+
await self.push_frame(LLMFullResponseStartFrame())
|
| 264 |
+
cumulative_text = ""
|
| 265 |
+
try:
|
| 266 |
+
await self.start_ttfb_metrics()
|
| 267 |
+
messages = context.get_messages()
|
| 268 |
+
async for chunk in self._client.generate_stream(messages):
|
| 269 |
+
if chunk.choices[0].delta.content:
|
| 270 |
+
await self.stop_ttfb_metrics()
|
| 271 |
+
text = chunk.choices[0].delta.content
|
| 272 |
+
cumulative_text += text
|
| 273 |
+
frame = LLMTextFrame(text)
|
| 274 |
+
await self.push_frame(frame)
|
| 275 |
+
except Exception as e:
|
| 276 |
+
logger.error(f"Error in _process_context: {e}", exc_info=True)
|
| 277 |
+
raise
|
| 278 |
+
finally:
|
| 279 |
+
cumulative_text = " ".join(cumulative_text.split()).strip()
|
| 280 |
+
if not cumulative_text:
|
| 281 |
+
logger.warning(f"LLM response is empty for context: {context}")
|
| 282 |
+
await self.push_frame(LLMFullResponseEndFrame())
|
| 283 |
+
|
| 284 |
+
async def get_chat_completions(
|
| 285 |
+
self, params_from_context: OpenAILLMInvocationParams
|
| 286 |
+
) -> AsyncGenerator[ChatCompletionChunk, None]:
|
| 287 |
+
"""Create a streaming chat completion using HuggingFace model.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
context (OpenAILLMContext): The context object containing tools configuration
|
| 291 |
+
and other settings for the chat completion.
|
| 292 |
+
messages (List[ChatCompletionMessageParam]): The list of messages comprising
|
| 293 |
+
the conversation history and current request.
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
AsyncGenerator[ChatCompletionChunk]: A streaming response of chat completion
|
| 297 |
+
chunks that can be processed asynchronously.
|
| 298 |
+
"""
|
| 299 |
+
messages = params_from_context["messages"]
|
| 300 |
+
|
| 301 |
+
return self._client.generate_stream(messages)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
class VLLMService(OpenAILLMService, LLMUtilsMixin):
|
| 305 |
+
"""
|
| 306 |
+
LLM service that hosts a vLLM server.
|
| 307 |
+
"""
|
| 308 |
+
|
| 309 |
+
def __init__(
|
| 310 |
+
self,
|
| 311 |
+
*,
|
| 312 |
+
model: str,
|
| 313 |
+
device: str = "cuda",
|
| 314 |
+
api_key="None",
|
| 315 |
+
base_url="http://localhost:8000/v1",
|
| 316 |
+
organization="None",
|
| 317 |
+
project="None",
|
| 318 |
+
default_headers: Optional[Mapping[str, str]] = None,
|
| 319 |
+
params: Optional[OpenAILLMService.InputParams] = None,
|
| 320 |
+
thinking_budget: int = 0,
|
| 321 |
+
start_vllm_on_init: bool = False,
|
| 322 |
+
vllm_server_params: Optional[str] = None,
|
| 323 |
+
vllm_server_max_wait_time: int = 3600, # 1 hour max wait time
|
| 324 |
+
vllm_server_check_interval: int = 5, # check server every 5 seconds
|
| 325 |
+
**kwargs,
|
| 326 |
+
):
|
| 327 |
+
self._device = device
|
| 328 |
+
self._vllm_server_max_wait_time = vllm_server_max_wait_time
|
| 329 |
+
self._vllm_server_check_interval = vllm_server_check_interval
|
| 330 |
+
if start_vllm_on_init:
|
| 331 |
+
base_url = self._start_vllm_server(model, vllm_server_params, base_url)
|
| 332 |
+
|
| 333 |
+
super().__init__(
|
| 334 |
+
model=model,
|
| 335 |
+
api_key=api_key,
|
| 336 |
+
base_url=base_url,
|
| 337 |
+
organization=organization,
|
| 338 |
+
project=project,
|
| 339 |
+
default_headers=default_headers,
|
| 340 |
+
params=params,
|
| 341 |
+
**kwargs,
|
| 342 |
+
)
|
| 343 |
+
self._thinking_budget = thinking_budget
|
| 344 |
+
self._vllm_server_params = vllm_server_params
|
| 345 |
+
self._start_vllm_on_init = start_vllm_on_init
|
| 346 |
+
|
| 347 |
+
# TODO: handle thinking budget
|
| 348 |
+
logger.info(
|
| 349 |
+
f"VLLMService initialized with model: {model}, api_key: {api_key}, base_url: {base_url},"
|
| 350 |
+
f"params: {params}, thinking_budget: {thinking_budget}"
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
def _start_vllm_server(
|
| 354 |
+
self, model: str, vllm_server_params: Optional[str] = None, base_url: Optional[str] = None
|
| 355 |
+
) -> str:
|
| 356 |
+
"""
|
| 357 |
+
Start a vllm server and return the base url.
|
| 358 |
+
"""
|
| 359 |
+
|
| 360 |
+
requested_port = None
|
| 361 |
+
# If base_url is provided, extract port from it
|
| 362 |
+
if base_url:
|
| 363 |
+
try:
|
| 364 |
+
# Extract port from base_url like "http://localhost:8003/v1"
|
| 365 |
+
from urllib.parse import urlparse
|
| 366 |
+
|
| 367 |
+
parsed_url = urlparse(base_url)
|
| 368 |
+
if parsed_url.port:
|
| 369 |
+
requested_port = parsed_url.port
|
| 370 |
+
except Exception as e:
|
| 371 |
+
logger.warning(
|
| 372 |
+
f"Could not parse port from base_url {base_url}: {e}, using port from vllm_server_params"
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
# Parse port from vllm_server_params, default to 8000
|
| 376 |
+
if vllm_server_params:
|
| 377 |
+
params_list = vllm_server_params.split()
|
| 378 |
+
for i, param in enumerate(params_list):
|
| 379 |
+
if param == "--port" and i + 1 < len(params_list):
|
| 380 |
+
try:
|
| 381 |
+
param_port = int(params_list[i + 1])
|
| 382 |
+
if requested_port is None:
|
| 383 |
+
requested_port = param_port
|
| 384 |
+
else:
|
| 385 |
+
if param_port != requested_port:
|
| 386 |
+
logger.warning(
|
| 387 |
+
f"Port {param_port} from vllm_server_params is different from base_url port"
|
| 388 |
+
f"{requested_port}, using new port {param_port}"
|
| 389 |
+
)
|
| 390 |
+
requested_port = param_port
|
| 391 |
+
break
|
| 392 |
+
except ValueError:
|
| 393 |
+
logger.warning(f"Invalid port number: {params_list[i + 1]}, using default 8000")
|
| 394 |
+
|
| 395 |
+
if requested_port is None:
|
| 396 |
+
# try to use default port
|
| 397 |
+
requested_port = 8000
|
| 398 |
+
|
| 399 |
+
def find_available_port(start_port: int) -> int:
|
| 400 |
+
"""Find an available port starting from start_port"""
|
| 401 |
+
for port in range(start_port, start_port + 100): # Try up to 100 ports
|
| 402 |
+
try:
|
| 403 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 404 |
+
s.bind(('localhost', port))
|
| 405 |
+
return port
|
| 406 |
+
except OSError:
|
| 407 |
+
continue
|
| 408 |
+
raise RuntimeError(f"Could not find an available port starting from {start_port}")
|
| 409 |
+
|
| 410 |
+
def get_pid_on_port(port: int) -> Optional[int]:
|
| 411 |
+
for conn in psutil.net_connections(kind="inet"):
|
| 412 |
+
if conn.laddr.port == port and conn.status == psutil.CONN_LISTEN:
|
| 413 |
+
return conn.pid
|
| 414 |
+
return None
|
| 415 |
+
|
| 416 |
+
def check_server_model(port: int, verbose: bool = False) -> tuple[bool, str]:
|
| 417 |
+
"""Check if server is running on port and return (is_running, model_name)"""
|
| 418 |
+
try:
|
| 419 |
+
response = requests.get(f"http://localhost:{port}/v1/models", timeout=5)
|
| 420 |
+
if response.status_code == 200:
|
| 421 |
+
# get the PID for the server process
|
| 422 |
+
pid = get_pid_on_port(port)
|
| 423 |
+
if pid is not None and verbose:
|
| 424 |
+
logger.warning(
|
| 425 |
+
f"Found vLLM server process (PID: {pid}) on port {port}, you can use `lsof -i :{port}`"
|
| 426 |
+
"to find the process and kill it if you want to start a new server."
|
| 427 |
+
)
|
| 428 |
+
models_data = response.json()
|
| 429 |
+
if "data" in models_data and models_data["data"]:
|
| 430 |
+
served_model = models_data["data"][0].get("id", "")
|
| 431 |
+
return True, served_model
|
| 432 |
+
return True, ""
|
| 433 |
+
return False, ""
|
| 434 |
+
except (requests.exceptions.RequestException, requests.exceptions.Timeout):
|
| 435 |
+
return False, ""
|
| 436 |
+
|
| 437 |
+
# First, check if vLLM server is already running on the requested port
|
| 438 |
+
is_running, served_model = check_server_model(requested_port, verbose=True)
|
| 439 |
+
if is_running:
|
| 440 |
+
if served_model == model:
|
| 441 |
+
final_base_url = f"http://localhost:{requested_port}/v1"
|
| 442 |
+
logger.info(f"vLLM server is already running at {final_base_url} with the correct model: {model}")
|
| 443 |
+
return final_base_url
|
| 444 |
+
else:
|
| 445 |
+
logger.warning(
|
| 446 |
+
f"vLLM server on port {requested_port} is serving model '{served_model}' but we need '{model}'."
|
| 447 |
+
"Finding new port..."
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
# Find an available port for our model
|
| 451 |
+
port = find_available_port(requested_port)
|
| 452 |
+
if port != requested_port:
|
| 453 |
+
logger.info(f"Using port {port} instead of requested port {requested_port}")
|
| 454 |
+
|
| 455 |
+
final_base_url = f"http://localhost:{port}/v1"
|
| 456 |
+
|
| 457 |
+
# Check if there's already a vLLM process running on the same port and model
|
| 458 |
+
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
| 459 |
+
try:
|
| 460 |
+
if proc.info['cmdline'] and any('vllm' in arg and 'serve' in arg for arg in proc.info['cmdline']):
|
| 461 |
+
# Check if this process is using the same port and model
|
| 462 |
+
cmdline_str = ' '.join(proc.info['cmdline'])
|
| 463 |
+
if f"--port {port}" in cmdline_str:
|
| 464 |
+
# Extract the model from the command line
|
| 465 |
+
cmdline_parts = proc.info['cmdline']
|
| 466 |
+
model_index = -1
|
| 467 |
+
for i, arg in enumerate(cmdline_parts):
|
| 468 |
+
if arg == "serve" and i + 1 < len(cmdline_parts):
|
| 469 |
+
model_index = i + 1
|
| 470 |
+
break
|
| 471 |
+
|
| 472 |
+
if model_index != -1 and model_index < len(cmdline_parts):
|
| 473 |
+
running_model = cmdline_parts[model_index]
|
| 474 |
+
if running_model == model:
|
| 475 |
+
logger.info(
|
| 476 |
+
f"Found existing vLLM server process (PID: {proc.info['pid']}) on port {port}"
|
| 477 |
+
f"serving model {model}"
|
| 478 |
+
)
|
| 479 |
+
# Wait a bit and check if it's responding
|
| 480 |
+
time.sleep(2)
|
| 481 |
+
is_running, served_model = check_server_model(port)
|
| 482 |
+
if is_running and served_model == model:
|
| 483 |
+
logger.info(
|
| 484 |
+
f"Existing vLLM server is responding at {final_base_url} with correct model"
|
| 485 |
+
)
|
| 486 |
+
return final_base_url
|
| 487 |
+
else:
|
| 488 |
+
logger.warning(
|
| 489 |
+
f"Existing vLLM process found on port {port} but not responding correctly,"
|
| 490 |
+
"will start new server"
|
| 491 |
+
)
|
| 492 |
+
else:
|
| 493 |
+
logger.info(
|
| 494 |
+
f"Found vLLM process on port {port} but serving different model '{running_model}'"
|
| 495 |
+
f"(need '{model}'). Will start new server."
|
| 496 |
+
)
|
| 497 |
+
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
| 498 |
+
continue
|
| 499 |
+
|
| 500 |
+
# Build the command with the determined port
|
| 501 |
+
cmd_parts = ["vllm", "serve", model]
|
| 502 |
+
|
| 503 |
+
# Parse and modify vllm_server_params to use the correct port
|
| 504 |
+
if vllm_server_params:
|
| 505 |
+
# parse the vllm_server_params and add the port to the command
|
| 506 |
+
params_list = vllm_server_params.split()
|
| 507 |
+
modified_params = []
|
| 508 |
+
i = 0
|
| 509 |
+
while i < len(params_list):
|
| 510 |
+
if params_list[i] == "--port" and i + 1 < len(params_list):
|
| 511 |
+
# Replace the port with our determined port
|
| 512 |
+
modified_params.extend(["--port", str(port)])
|
| 513 |
+
i += 2 # Skip the original port value
|
| 514 |
+
else:
|
| 515 |
+
modified_params.append(params_list[i])
|
| 516 |
+
i += 1
|
| 517 |
+
cmd_parts.extend(modified_params)
|
| 518 |
+
else:
|
| 519 |
+
# Add port if vllm_server_params is not provided
|
| 520 |
+
cmd_parts.extend(["--port", str(port)])
|
| 521 |
+
|
| 522 |
+
logger.info(f"Starting vLLM server with command: {' '.join(cmd_parts)}")
|
| 523 |
+
logger.warning("It will take a while to download the model if it's not already downloaded.")
|
| 524 |
+
# Set up environment variables for device configuration
|
| 525 |
+
env = os.environ.copy()
|
| 526 |
+
if self._device and self._device != "cpu":
|
| 527 |
+
# Extract CUDA device number if it's in format "cuda:0", "cuda:1", etc.
|
| 528 |
+
if self._device.startswith("cuda:"):
|
| 529 |
+
device_id = self._device.split(":")[1]
|
| 530 |
+
env["CUDA_VISIBLE_DEVICES"] = device_id
|
| 531 |
+
logger.info(f"Setting CUDA_VISIBLE_DEVICES={device_id}")
|
| 532 |
+
elif self._device == "cuda":
|
| 533 |
+
# Use default CUDA device (don't set CUDA_VISIBLE_DEVICES)
|
| 534 |
+
logger.info("Using default CUDA device")
|
| 535 |
+
else:
|
| 536 |
+
# For other device strings, try to extract device number
|
| 537 |
+
logger.warning(f"Unknown device format: {self._device}, using as-is")
|
| 538 |
+
env["CUDA_VISIBLE_DEVICES"] = self._device
|
| 539 |
+
elif self._device == "cpu":
|
| 540 |
+
env["CUDA_VISIBLE_DEVICES"] = ""
|
| 541 |
+
logger.info("Setting CUDA_VISIBLE_DEVICES='' to use CPU")
|
| 542 |
+
|
| 543 |
+
try:
|
| 544 |
+
# Start the vLLM server process with environment variables
|
| 545 |
+
process = subprocess.Popen(
|
| 546 |
+
cmd_parts,
|
| 547 |
+
stdout=subprocess.PIPE,
|
| 548 |
+
stderr=subprocess.PIPE,
|
| 549 |
+
text=True,
|
| 550 |
+
env=env,
|
| 551 |
+
preexec_fn=os.setsid if os.name != 'nt' else None, # Create new process group
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
# Store the process for potential cleanup later
|
| 555 |
+
self._vllm_process = process
|
| 556 |
+
|
| 557 |
+
# Wait for server to start up
|
| 558 |
+
max_wait_time = self._vllm_server_max_wait_time
|
| 559 |
+
check_interval = self._vllm_server_check_interval
|
| 560 |
+
waited_time = 0
|
| 561 |
+
|
| 562 |
+
logger.info(f"Waiting for vLLM server to start on port {port}...")
|
| 563 |
+
while waited_time < max_wait_time:
|
| 564 |
+
is_running, served_model = check_server_model(port)
|
| 565 |
+
if is_running and served_model == model:
|
| 566 |
+
logger.info(f"vLLM server started successfully at {final_base_url} serving model: {model}")
|
| 567 |
+
return final_base_url
|
| 568 |
+
elif is_running and served_model != model:
|
| 569 |
+
logger.warning(
|
| 570 |
+
f"vLLM server started but serving wrong model '{served_model}' instead of '{model}'."
|
| 571 |
+
"Continuing to wait..."
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
# Check if process is still running
|
| 575 |
+
if process.poll() is not None:
|
| 576 |
+
# Process has terminated
|
| 577 |
+
stdout, stderr = process.communicate()
|
| 578 |
+
logger.error(f"vLLM server process terminated unexpectedly. stdout: {stdout}, stderr: {stderr}")
|
| 579 |
+
raise RuntimeError(f"Failed to start vLLM server: {stderr}")
|
| 580 |
+
|
| 581 |
+
time.sleep(check_interval)
|
| 582 |
+
waited_time += check_interval
|
| 583 |
+
logger.debug(f"Still waiting for vLLM server on port {port}... ({waited_time}s)")
|
| 584 |
+
|
| 585 |
+
# If we get here, server didn't start in time
|
| 586 |
+
logger.error(f"vLLM server failed to start within {max_wait_time} seconds on port {port}")
|
| 587 |
+
process.terminate()
|
| 588 |
+
raise RuntimeError(f"vLLM server failed to start within {max_wait_time} seconds on port {port}")
|
| 589 |
+
|
| 590 |
+
except FileNotFoundError:
|
| 591 |
+
logger.error("vLLM not found. Please install vLLM: pip install vllm")
|
| 592 |
+
raise RuntimeError("vLLM not found. Please install vLLM: pip install vllm")
|
| 593 |
+
except Exception as e:
|
| 594 |
+
logger.error(f"Failed to start vLLM server: {e}")
|
| 595 |
+
self._stop_vllm_server()
|
| 596 |
+
raise e
|
| 597 |
+
|
| 598 |
+
def _stop_vllm_server(self):
|
| 599 |
+
"""Stop the vLLM server process if it's running."""
|
| 600 |
+
if hasattr(self, '_vllm_process') and self._vllm_process:
|
| 601 |
+
logger.info(f"Stopping vLLM server process {self._vllm_process.pid}")
|
| 602 |
+
self._vllm_process.terminate()
|
| 603 |
+
|
| 604 |
+
async def stop(self, frame: EndFrame):
|
| 605 |
+
"""Stop the LLM service.
|
| 606 |
+
|
| 607 |
+
Args:
|
| 608 |
+
frame: The end frame.
|
| 609 |
+
"""
|
| 610 |
+
await super().stop(frame)
|
| 611 |
+
self._stop_vllm_server()
|
| 612 |
+
|
| 613 |
+
async def cancel(self, frame: CancelFrame):
|
| 614 |
+
"""Cancel the LLM service.
|
| 615 |
+
|
| 616 |
+
Args:
|
| 617 |
+
frame: The cancel frame.
|
| 618 |
+
"""
|
| 619 |
+
await super().cancel(frame)
|
| 620 |
+
self._stop_vllm_server()
|
| 621 |
+
|
| 622 |
+
async def get_chat_completions(
|
| 623 |
+
self, params_from_context: OpenAILLMInvocationParams
|
| 624 |
+
) -> AsyncStream[ChatCompletionChunk]:
|
| 625 |
+
"""Get streaming chat completions from OpenAI API.
|
| 626 |
+
|
| 627 |
+
Args:
|
| 628 |
+
context: The LLM context containing tools and configuration.
|
| 629 |
+
messages: List of chat completion messages to send.
|
| 630 |
+
|
| 631 |
+
Returns:
|
| 632 |
+
Async stream of chat completion chunks.
|
| 633 |
+
"""
|
| 634 |
+
|
| 635 |
+
params = self.build_chat_completion_params(params_from_context)
|
| 636 |
+
messages = params_from_context["messages"]
|
| 637 |
+
if self._retry_on_timeout:
|
| 638 |
+
try:
|
| 639 |
+
chunks = await asyncio.wait_for(
|
| 640 |
+
self._get_response_from_client(messages, params), timeout=self._retry_timeout_secs
|
| 641 |
+
)
|
| 642 |
+
return chunks
|
| 643 |
+
except (APITimeoutError, asyncio.TimeoutError):
|
| 644 |
+
# Retry, this time without a timeout so we get a response
|
| 645 |
+
logger.debug(f"{self}: Retrying chat completion due to timeout")
|
| 646 |
+
chunks = await self._get_response_from_client(messages, params)
|
| 647 |
+
return chunks
|
| 648 |
+
else:
|
| 649 |
+
chunks = await self._get_response_from_client(messages, params)
|
| 650 |
+
return chunks
|
| 651 |
+
|
| 652 |
+
async def _get_response_from_client(
|
| 653 |
+
self, messages: List[ChatCompletionMessageParam], params: dict
|
| 654 |
+
) -> AsyncStream[ChatCompletionChunk]:
|
| 655 |
+
"""Get a response from the client."""
|
| 656 |
+
|
| 657 |
+
try:
|
| 658 |
+
chunks = await self._client.chat.completions.create(**params)
|
| 659 |
+
except BadRequestError as e:
|
| 660 |
+
logger.error(f"Error in _get_response_from_client: {e}, trying to fix...")
|
| 661 |
+
logger.debug(f"LLM messages before fixing: {messages}")
|
| 662 |
+
messages = self._maybe_add_user_message(messages)
|
| 663 |
+
messages = self._maybe_merge_consecutive_user_turns(messages)
|
| 664 |
+
logger.debug(f"LLM messages after fixing: {messages}")
|
| 665 |
+
params["messages"] = messages
|
| 666 |
+
chunks = await self._client.chat.completions.create(**params)
|
| 667 |
+
|
| 668 |
+
return chunks
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def get_llm_service_from_config(config: DictConfig) -> OpenAILLMService:
|
| 672 |
+
"""Get an LLM service from the configuration."""
|
| 673 |
+
backend = config.type
|
| 674 |
+
|
| 675 |
+
logger.info(f"Initializing LLM service from config: {config}")
|
| 676 |
+
|
| 677 |
+
# If backend is "auto", try to detect the best backend
|
| 678 |
+
if backend == "auto":
|
| 679 |
+
model_name = config.get("model")
|
| 680 |
+
if not model_name:
|
| 681 |
+
raise ValueError("Model name is required for LLM")
|
| 682 |
+
|
| 683 |
+
try:
|
| 684 |
+
_ = vllmModelConfig(model_name, trust_remote_code=True)
|
| 685 |
+
backend = "vllm"
|
| 686 |
+
logger.info(f"Auto-detected vLLM as the best backend for model {model_name}")
|
| 687 |
+
except Exception as e:
|
| 688 |
+
logger.info(
|
| 689 |
+
f"The LLM doesn't seem to be supported by vLLM yet (error: {e}), using HuggingFace as the backend"
|
| 690 |
+
f"for model: {model_name}. If you are sure that the LLM is supported by vLLM, you can set `type: vllm`"
|
| 691 |
+
"in the config file to force using vLLM."
|
| 692 |
+
)
|
| 693 |
+
backend = "hf"
|
| 694 |
+
|
| 695 |
+
assert backend in [
|
| 696 |
+
"hf",
|
| 697 |
+
"vllm",
|
| 698 |
+
"auto",
|
| 699 |
+
], f"Invalid backend: {backend}, only `hf`, `vllm`, and `auto` are supported."
|
| 700 |
+
|
| 701 |
+
if backend == "hf":
|
| 702 |
+
llm_model = config.model
|
| 703 |
+
llm_device = config.device
|
| 704 |
+
llm_dtype = config.dtype
|
| 705 |
+
llm_generation_kwargs = config.get("generation_kwargs", {})
|
| 706 |
+
if llm_generation_kwargs is not None:
|
| 707 |
+
llm_generation_kwargs = OmegaConf.to_container(llm_generation_kwargs, resolve=True)
|
| 708 |
+
llm_apply_chat_template_kwargs = config.get("apply_chat_template_kwargs", None)
|
| 709 |
+
if llm_apply_chat_template_kwargs is not None:
|
| 710 |
+
llm_apply_chat_template_kwargs = OmegaConf.to_container(llm_apply_chat_template_kwargs, resolve=True)
|
| 711 |
+
llm_thinking_budget = config.get("thinking_budget", 0)
|
| 712 |
+
return HuggingFaceLLMService(
|
| 713 |
+
model=llm_model,
|
| 714 |
+
device=llm_device,
|
| 715 |
+
dtype=llm_dtype,
|
| 716 |
+
generation_kwargs=llm_generation_kwargs,
|
| 717 |
+
apply_chat_template_kwargs=llm_apply_chat_template_kwargs,
|
| 718 |
+
thinking_budget=llm_thinking_budget,
|
| 719 |
+
)
|
| 720 |
+
elif backend == "vllm":
|
| 721 |
+
llm_model = config.get("model", "vllm_server")
|
| 722 |
+
llm_api_key = config.get("api_key", "None")
|
| 723 |
+
llm_base_url = config.get("base_url", "http://localhost:8000/v1")
|
| 724 |
+
llm_organization = config.get("organization", "None")
|
| 725 |
+
llm_project = config.get("project", "None")
|
| 726 |
+
llm_default_headers = config.get("default_headers", None)
|
| 727 |
+
llm_params = config.get("vllm_generation_params", None)
|
| 728 |
+
llm_dtype = config.dtype
|
| 729 |
+
vllm_server_params = config.get("vllm_server_params", None)
|
| 730 |
+
if vllm_server_params is not None:
|
| 731 |
+
if "dtype" not in vllm_server_params:
|
| 732 |
+
vllm_server_params = f"--dtype {llm_dtype} {vllm_server_params}"
|
| 733 |
+
logger.info(f"Adding dtype {llm_dtype} to vllm_server_params: {vllm_server_params}")
|
| 734 |
+
if llm_params is not None:
|
| 735 |
+
# cast into OpenAILLMService.InputParams object
|
| 736 |
+
llm_params = OmegaConf.to_container(llm_params, resolve=True)
|
| 737 |
+
extra = llm_params.get("extra", None)
|
| 738 |
+
# ensure extra is a dictionary
|
| 739 |
+
if extra is None:
|
| 740 |
+
llm_params["extra"] = {}
|
| 741 |
+
elif not isinstance(extra, dict):
|
| 742 |
+
raise ValueError(f"extra must be a dictionary, got {type(extra)}")
|
| 743 |
+
llm_params = OpenAILLMService.InputParams(**llm_params)
|
| 744 |
+
else:
|
| 745 |
+
llm_params = OpenAILLMService.InputParams()
|
| 746 |
+
llm_thinking_budget = config.get("thinking_budget", 0)
|
| 747 |
+
return VLLMService(
|
| 748 |
+
model=llm_model,
|
| 749 |
+
api_key=llm_api_key,
|
| 750 |
+
base_url=llm_base_url,
|
| 751 |
+
organization=llm_organization,
|
| 752 |
+
project=llm_project,
|
| 753 |
+
default_headers=llm_default_headers,
|
| 754 |
+
params=llm_params,
|
| 755 |
+
thinking_budget=llm_thinking_budget,
|
| 756 |
+
start_vllm_on_init=config.get("start_vllm_on_init", False),
|
| 757 |
+
vllm_server_params=vllm_server_params,
|
| 758 |
+
)
|
| 759 |
+
else:
|
| 760 |
+
raise ValueError(f"Invalid LLM backend: {backend}")
|
nemo/agents/voice_agent/pipecat/services/nemo/streaming_asr.py
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
import time
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from typing import List, Optional
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch
|
| 23 |
+
from omegaconf import open_dict
|
| 24 |
+
|
| 25 |
+
import nemo.collections.asr as nemo_asr
|
| 26 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.utils import CacheFeatureBufferer
|
| 27 |
+
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
|
| 28 |
+
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class ASRResult:
|
| 33 |
+
text: str
|
| 34 |
+
is_final: bool
|
| 35 |
+
eou_prob: Optional[float] = None
|
| 36 |
+
eob_prob: Optional[float] = None
|
| 37 |
+
eou_latency: Optional[float] = None
|
| 38 |
+
eob_latency: Optional[float] = None
|
| 39 |
+
processing_time: Optional[float] = None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class NemoStreamingASRService:
|
| 43 |
+
def __init__(
|
| 44 |
+
self,
|
| 45 |
+
model: str = "nvidia/parakeet_realtime_eou_120m-v1",
|
| 46 |
+
att_context_size: List[int] = [70, 1],
|
| 47 |
+
device: str = "cuda",
|
| 48 |
+
eou_string: str = "<EOU>",
|
| 49 |
+
eob_string: str = "<EOB>",
|
| 50 |
+
decoder_type: str = None,
|
| 51 |
+
chunk_size: int = -1,
|
| 52 |
+
shift_size: int = -1,
|
| 53 |
+
left_chunks: int = 2,
|
| 54 |
+
sample_rate: int = 16000,
|
| 55 |
+
frame_len_in_secs: float = 0.08,
|
| 56 |
+
use_amp: bool = False,
|
| 57 |
+
chunk_size_in_secs: float = 0.08,
|
| 58 |
+
):
|
| 59 |
+
self.model = model
|
| 60 |
+
self.eou_string = eou_string
|
| 61 |
+
self.eob_string = eob_string
|
| 62 |
+
self.device = device
|
| 63 |
+
self.att_context_size = att_context_size
|
| 64 |
+
self.decoder_type = decoder_type
|
| 65 |
+
self.chunk_size = chunk_size
|
| 66 |
+
self.shift_size = shift_size
|
| 67 |
+
self.left_chunks = left_chunks
|
| 68 |
+
self.asr_model = self._load_model(model)
|
| 69 |
+
self.tokenizer: SentencePieceTokenizer = self.asr_model.tokenizer
|
| 70 |
+
self.use_amp = use_amp
|
| 71 |
+
self.pad_and_drop_preencoded = False
|
| 72 |
+
self.blank_id = self.get_blank_id()
|
| 73 |
+
self.chunk_size_in_secs = chunk_size_in_secs
|
| 74 |
+
|
| 75 |
+
assert len(self.att_context_size) == 2, "Att context size must be a list of two integers"
|
| 76 |
+
assert (
|
| 77 |
+
self.att_context_size[0] >= 0
|
| 78 |
+
), f"Left att context size must be greater than 0: {self.att_context_size[0]}"
|
| 79 |
+
assert (
|
| 80 |
+
self.att_context_size[1] >= 0
|
| 81 |
+
), f"Right att context size must be greater than 0: {self.att_context_size[1]}"
|
| 82 |
+
|
| 83 |
+
window_stride_in_secs = self.asr_model.cfg.preprocessor.window_stride
|
| 84 |
+
model_stride = self.asr_model.cfg.encoder.subsampling_factor
|
| 85 |
+
self.model_chunk_size = self.asr_model.encoder.streaming_cfg.chunk_size
|
| 86 |
+
if isinstance(self.model_chunk_size, list):
|
| 87 |
+
self.model_chunk_size = self.model_chunk_size[1]
|
| 88 |
+
self.pre_encode_cache_size = self.asr_model.encoder.streaming_cfg.pre_encode_cache_size
|
| 89 |
+
if isinstance(self.pre_encode_cache_size, list):
|
| 90 |
+
self.pre_encode_cache_size = self.pre_encode_cache_size[1]
|
| 91 |
+
self.pre_encode_cache_size_in_secs = self.pre_encode_cache_size * window_stride_in_secs
|
| 92 |
+
|
| 93 |
+
self.tokens_per_frame = math.ceil(np.trunc(self.chunk_size_in_secs / window_stride_in_secs) / model_stride)
|
| 94 |
+
# overwrite the encoder streaming params with proper shift size for cache aware streaming
|
| 95 |
+
self.asr_model.encoder.setup_streaming_params(
|
| 96 |
+
chunk_size=self.model_chunk_size // model_stride, shift_size=self.tokens_per_frame
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
model_chunk_size_in_secs = self.model_chunk_size * window_stride_in_secs
|
| 100 |
+
|
| 101 |
+
self.buffer_size_in_secs = self.pre_encode_cache_size_in_secs + model_chunk_size_in_secs
|
| 102 |
+
|
| 103 |
+
self._audio_buffer = CacheFeatureBufferer(
|
| 104 |
+
sample_rate=sample_rate,
|
| 105 |
+
buffer_size_in_secs=self.buffer_size_in_secs,
|
| 106 |
+
chunk_size_in_secs=self.chunk_size_in_secs,
|
| 107 |
+
preprocessor_cfg=self.asr_model.cfg.preprocessor,
|
| 108 |
+
device=self.device,
|
| 109 |
+
)
|
| 110 |
+
self._reset_cache()
|
| 111 |
+
self._previous_hypotheses = self._get_blank_hypothesis()
|
| 112 |
+
self._last_transcript_timestamp = time.time()
|
| 113 |
+
print(f"NemoStreamingASRService initialized with model `{model}` on device `{self.device}`")
|
| 114 |
+
|
| 115 |
+
def _reset_cache(self):
|
| 116 |
+
(
|
| 117 |
+
self._cache_last_channel, # [17, B, 70, 512]
|
| 118 |
+
self._cache_last_time, # [17, B, 512, 8]
|
| 119 |
+
self._cache_last_channel_len, # B
|
| 120 |
+
) = self.asr_model.encoder.get_initial_cache_state(
|
| 121 |
+
1
|
| 122 |
+
) # batch size is 1
|
| 123 |
+
|
| 124 |
+
def _get_blank_hypothesis(self) -> List[Hypothesis]:
|
| 125 |
+
blank_hypothesis = Hypothesis(score=0.0, y_sequence=[], dec_state=None, timestamp=[], last_token=None)
|
| 126 |
+
return [blank_hypothesis]
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def drop_extra_pre_encoded(self):
|
| 130 |
+
return self.asr_model.encoder.streaming_cfg.drop_extra_pre_encoded
|
| 131 |
+
|
| 132 |
+
def get_blank_id(self):
|
| 133 |
+
return len(self.tokenizer.vocab)
|
| 134 |
+
|
| 135 |
+
def get_text_from_tokens(self, tokens: List[int]) -> str:
|
| 136 |
+
sep = "\u2581" # '▁'
|
| 137 |
+
tokens = [int(t) for t in tokens if t != self.blank_id]
|
| 138 |
+
if tokens:
|
| 139 |
+
pieces = self.tokenizer.ids_to_tokens(tokens)
|
| 140 |
+
text = "".join([p.replace(sep, ' ') if p.startswith(sep) else p for p in pieces])
|
| 141 |
+
else:
|
| 142 |
+
text = ""
|
| 143 |
+
return text
|
| 144 |
+
|
| 145 |
+
def _load_model(self, model: str):
|
| 146 |
+
if model.endswith(".nemo"):
|
| 147 |
+
asr_model = nemo_asr.models.ASRModel.restore_from(model, map_location=torch.device(self.device))
|
| 148 |
+
else:
|
| 149 |
+
asr_model = nemo_asr.models.ASRModel.from_pretrained(model, map_location=torch.device(self.device))
|
| 150 |
+
|
| 151 |
+
if self.decoder_type is not None and hasattr(asr_model, "cur_decoder"):
|
| 152 |
+
asr_model.change_decoding_strategy(decoder_type=self.decoder_type)
|
| 153 |
+
elif isinstance(asr_model, nemo_asr.models.EncDecCTCModel):
|
| 154 |
+
self.decoder_type = "ctc"
|
| 155 |
+
elif isinstance(asr_model, nemo_asr.models.EncDecRNNTModel):
|
| 156 |
+
self.decoder_type = "rnnt"
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError("Decoder type not supported for this model.")
|
| 159 |
+
|
| 160 |
+
if self.att_context_size is not None:
|
| 161 |
+
if hasattr(asr_model.encoder, "set_default_att_context_size"):
|
| 162 |
+
asr_model.encoder.set_default_att_context_size(att_context_size=self.att_context_size)
|
| 163 |
+
else:
|
| 164 |
+
raise ValueError("Model does not support multiple lookaheads.")
|
| 165 |
+
else:
|
| 166 |
+
self.att_context_size = asr_model.cfg.encoder.att_context_size
|
| 167 |
+
|
| 168 |
+
decoding_cfg = asr_model.cfg.decoding
|
| 169 |
+
with open_dict(decoding_cfg):
|
| 170 |
+
decoding_cfg.strategy = "greedy"
|
| 171 |
+
decoding_cfg.compute_timestamps = False
|
| 172 |
+
decoding_cfg.preserve_alignments = True
|
| 173 |
+
if hasattr(asr_model, 'joint'): # if an RNNT model
|
| 174 |
+
decoding_cfg.greedy.max_symbols = 10
|
| 175 |
+
decoding_cfg.fused_batch_size = -1
|
| 176 |
+
asr_model.change_decoding_strategy(decoding_cfg)
|
| 177 |
+
|
| 178 |
+
if hasattr(asr_model.encoder, "set_default_att_context_size"):
|
| 179 |
+
asr_model.encoder.set_default_att_context_size(att_context_size=self.att_context_size)
|
| 180 |
+
|
| 181 |
+
# chunk_size is set automatically for models trained for streaming.
|
| 182 |
+
# For models trained for offline mode with full context, we need to pass the chunk_size explicitly.
|
| 183 |
+
if self.chunk_size > 0:
|
| 184 |
+
if self.shift_size < 0:
|
| 185 |
+
shift_size = self.chunk_size
|
| 186 |
+
else:
|
| 187 |
+
shift_size = self.shift_size
|
| 188 |
+
asr_model.encoder.setup_streaming_params(
|
| 189 |
+
chunk_size=self.chunk_size, left_chunks=self.left_chunks, shift_size=shift_size
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
asr_model.eval()
|
| 193 |
+
return asr_model
|
| 194 |
+
|
| 195 |
+
def _get_best_hypothesis(self, encoded, encoded_len, partial_hypotheses=None):
|
| 196 |
+
if self.decoder_type == "ctc":
|
| 197 |
+
best_hyp = self.asr_model.decoding.ctc_decoder_predictions_tensor(
|
| 198 |
+
encoded,
|
| 199 |
+
encoded_len,
|
| 200 |
+
return_hypotheses=True,
|
| 201 |
+
)
|
| 202 |
+
elif self.decoder_type == "rnnt":
|
| 203 |
+
best_hyp = self.asr_model.decoding.rnnt_decoder_predictions_tensor(
|
| 204 |
+
encoded, encoded_len, return_hypotheses=True, partial_hypotheses=partial_hypotheses
|
| 205 |
+
)
|
| 206 |
+
else:
|
| 207 |
+
raise ValueError("Decoder type not supported for this model.")
|
| 208 |
+
return best_hyp
|
| 209 |
+
|
| 210 |
+
def _get_tokens_and_probs_from_alignments(self, alignments):
|
| 211 |
+
tokens = []
|
| 212 |
+
probs = []
|
| 213 |
+
if self.decoder_type == "ctc":
|
| 214 |
+
all_logits = alignments[0]
|
| 215 |
+
all_tokens = alignments[1]
|
| 216 |
+
for i in range(len(all_tokens)):
|
| 217 |
+
token_id = int(all_tokens[i])
|
| 218 |
+
if token_id != self.blank_id:
|
| 219 |
+
tokens.append(token_id)
|
| 220 |
+
logits = all_logits[i] # shape (vocab_size,)
|
| 221 |
+
probs_i = torch.softmax(logits, dim=-1)[token_id].item()
|
| 222 |
+
probs.append(probs_i)
|
| 223 |
+
elif self.decoder_type == "rnnt":
|
| 224 |
+
for t in range(len(alignments)):
|
| 225 |
+
for u in range(len(alignments[t])):
|
| 226 |
+
logits, token_id = alignments[t][u] # (logits, token_id)
|
| 227 |
+
token_id = int(token_id)
|
| 228 |
+
if token_id != self.blank_id:
|
| 229 |
+
tokens.append(token_id)
|
| 230 |
+
probs_i = torch.softmax(logits, dim=-1)[token_id].item()
|
| 231 |
+
probs.append(probs_i)
|
| 232 |
+
else:
|
| 233 |
+
raise ValueError("Decoder type not supported for this model.")
|
| 234 |
+
|
| 235 |
+
return tokens, probs
|
| 236 |
+
|
| 237 |
+
def transcribe(self, audio: bytes, stream_id: str = "default") -> ASRResult:
|
| 238 |
+
start_time = time.time()
|
| 239 |
+
|
| 240 |
+
# Convert bytes to numpy array
|
| 241 |
+
audio_array = np.frombuffer(audio, dtype=np.int16).astype(np.float32) / 32768.0
|
| 242 |
+
|
| 243 |
+
self._audio_buffer.update(audio_array)
|
| 244 |
+
|
| 245 |
+
features = self._audio_buffer.get_feature_buffer()
|
| 246 |
+
feature_lengths = torch.tensor([features.shape[1]], device=self.device)
|
| 247 |
+
features = features.unsqueeze(0) # Add batch dimension
|
| 248 |
+
|
| 249 |
+
with torch.no_grad():
|
| 250 |
+
(
|
| 251 |
+
encoded,
|
| 252 |
+
encoded_len,
|
| 253 |
+
cache_last_channel,
|
| 254 |
+
cache_last_time,
|
| 255 |
+
cache_last_channel_len,
|
| 256 |
+
) = self.asr_model.encoder.cache_aware_stream_step(
|
| 257 |
+
processed_signal=features,
|
| 258 |
+
processed_signal_length=feature_lengths,
|
| 259 |
+
cache_last_channel=self._cache_last_channel,
|
| 260 |
+
cache_last_time=self._cache_last_time,
|
| 261 |
+
cache_last_channel_len=self._cache_last_channel_len,
|
| 262 |
+
keep_all_outputs=False,
|
| 263 |
+
drop_extra_pre_encoded=self.drop_extra_pre_encoded,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
best_hyp = self._get_best_hypothesis(encoded, encoded_len, partial_hypotheses=self._previous_hypotheses)
|
| 267 |
+
|
| 268 |
+
self._previous_hypotheses = best_hyp
|
| 269 |
+
self._cache_last_channel = cache_last_channel
|
| 270 |
+
self._cache_last_time = cache_last_time
|
| 271 |
+
self._cache_last_channel_len = cache_last_channel_len
|
| 272 |
+
|
| 273 |
+
tokens, probs = self._get_tokens_and_probs_from_alignments(best_hyp[0].alignments)
|
| 274 |
+
|
| 275 |
+
text = self.get_text_from_tokens(tokens)
|
| 276 |
+
|
| 277 |
+
is_final = False
|
| 278 |
+
eou_latency = None
|
| 279 |
+
eob_latency = None
|
| 280 |
+
eou_prob = None
|
| 281 |
+
eob_prob = None
|
| 282 |
+
current_timestamp = time.time()
|
| 283 |
+
if self.eou_string in text or self.eob_string in text:
|
| 284 |
+
is_final = True
|
| 285 |
+
if self.eou_string in text:
|
| 286 |
+
eou_latency = (
|
| 287 |
+
current_timestamp - self._last_transcript_timestamp if text.strip() == self.eou_string else 0.0
|
| 288 |
+
)
|
| 289 |
+
eou_prob = self.get_eou_probability(tokens, probs, self.eou_string)
|
| 290 |
+
if self.eob_string in text:
|
| 291 |
+
eob_latency = (
|
| 292 |
+
current_timestamp - self._last_transcript_timestamp if text.strip() == self.eob_string else 0.0
|
| 293 |
+
)
|
| 294 |
+
eob_prob = self.get_eou_probability(tokens, probs, self.eob_string)
|
| 295 |
+
self.reset_state(stream_id=stream_id)
|
| 296 |
+
if text.strip():
|
| 297 |
+
self._last_transcript_timestamp = current_timestamp
|
| 298 |
+
|
| 299 |
+
processing_time = time.time() - start_time
|
| 300 |
+
return ASRResult(
|
| 301 |
+
text=text,
|
| 302 |
+
is_final=is_final,
|
| 303 |
+
eou_latency=eou_latency,
|
| 304 |
+
eob_latency=eob_latency,
|
| 305 |
+
eou_prob=eou_prob,
|
| 306 |
+
eob_prob=eob_prob,
|
| 307 |
+
processing_time=processing_time,
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
def reset_state(self, stream_id: str = "default"):
|
| 311 |
+
self._audio_buffer.reset()
|
| 312 |
+
self._reset_cache()
|
| 313 |
+
self._previous_hypotheses = self._get_blank_hypothesis()
|
| 314 |
+
self._last_transcript_timestamp = time.time()
|
| 315 |
+
|
| 316 |
+
def get_eou_probability(self, tokens: List[int], probs: List[float], eou_string: str = "<EOU>") -> float:
|
| 317 |
+
text_tokens = self.tokenizer.ids_to_tokens(tokens)
|
| 318 |
+
eou_index = text_tokens.index(eou_string)
|
| 319 |
+
return probs[eou_index]
|
nemo/agents/voice_agent/pipecat/services/nemo/streaming_diar.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
|
| 15 |
+
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from typing import Tuple
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from torch import Tensor
|
| 22 |
+
|
| 23 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.utils import CacheFeatureBufferer
|
| 24 |
+
from nemo.collections.asr.models import SortformerEncLabelModel
|
| 25 |
+
|
| 26 |
+
from nemo.collections.asr.modules.sortformer_modules import StreamingSortformerState
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class DiarizationConfig:
|
| 31 |
+
"""Diarization configuration parameters for inference."""
|
| 32 |
+
|
| 33 |
+
model_path: str = "nvidia/diar_streaming_sortformer_4spk-v2"
|
| 34 |
+
device: str = "cuda"
|
| 35 |
+
|
| 36 |
+
log: bool = False # If True, log will be printed
|
| 37 |
+
max_num_speakers: int = 4
|
| 38 |
+
spkcache_len: int = 188
|
| 39 |
+
spkcache_refresh_rate: int = 144
|
| 40 |
+
fifo_len: int = 188
|
| 41 |
+
chunk_len: int = 6
|
| 42 |
+
chunk_left_context: int = 1
|
| 43 |
+
chunk_right_context: int = 7
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class NeMoStreamingDiarService:
|
| 47 |
+
def __init__(
|
| 48 |
+
self,
|
| 49 |
+
cfg: DiarizationConfig,
|
| 50 |
+
model: str,
|
| 51 |
+
frame_len_in_secs: float = 0.08,
|
| 52 |
+
sample_rate: int = 16000,
|
| 53 |
+
left_offset: int = 8,
|
| 54 |
+
right_offset: int = 8,
|
| 55 |
+
use_amp: bool = False,
|
| 56 |
+
compute_dtype: torch.dtype = torch.float32,
|
| 57 |
+
):
|
| 58 |
+
self.model = model
|
| 59 |
+
self.cfg = cfg
|
| 60 |
+
self.cfg.model_path = model
|
| 61 |
+
self.diarizer = self.build_diarizer()
|
| 62 |
+
self.device = cfg.device
|
| 63 |
+
self.use_amp = use_amp
|
| 64 |
+
self.compute_dtype = compute_dtype
|
| 65 |
+
self.frame_len_in_secs = frame_len_in_secs
|
| 66 |
+
self.left_offset = left_offset
|
| 67 |
+
self.right_offset = right_offset
|
| 68 |
+
self.chunk_size = self.cfg.chunk_len
|
| 69 |
+
self.buffer_size_in_secs = (
|
| 70 |
+
self.cfg.chunk_len * self.frame_len_in_secs + (self.left_offset + self.right_offset) * 0.01
|
| 71 |
+
)
|
| 72 |
+
self.max_num_speakers = self.cfg.max_num_speakers
|
| 73 |
+
|
| 74 |
+
self.feature_bufferer = CacheFeatureBufferer(
|
| 75 |
+
sample_rate=sample_rate,
|
| 76 |
+
buffer_size_in_secs=self.buffer_size_in_secs,
|
| 77 |
+
chunk_size_in_secs=self.cfg.chunk_len * self.frame_len_in_secs,
|
| 78 |
+
preprocessor_cfg=self.diarizer.cfg.preprocessor,
|
| 79 |
+
device=self.device,
|
| 80 |
+
)
|
| 81 |
+
self.streaming_state = self.init_streaming_state(batch_size=1)
|
| 82 |
+
self.total_preds = torch.zeros((1, 0, self.max_num_speakers), device=self.diarizer.device)
|
| 83 |
+
|
| 84 |
+
print(f"NeMoStreamingDiarService initialized with model `{model}` on device `{self.device}`")
|
| 85 |
+
|
| 86 |
+
def build_diarizer(self):
|
| 87 |
+
if self.cfg.model_path.endswith(".nemo"):
|
| 88 |
+
diar_model = SortformerEncLabelModel.restore_from(self.cfg.model_path, map_location=self.cfg.device)
|
| 89 |
+
else:
|
| 90 |
+
diar_model = SortformerEncLabelModel.from_pretrained(self.cfg.model_path, map_location=self.cfg.device)
|
| 91 |
+
|
| 92 |
+
# Steaming mode setup
|
| 93 |
+
diar_model.sortformer_modules.chunk_len = self.cfg.chunk_len
|
| 94 |
+
diar_model.sortformer_modules.spkcache_len = self.cfg.spkcache_len
|
| 95 |
+
diar_model.sortformer_modules.chunk_left_context = self.cfg.chunk_left_context
|
| 96 |
+
diar_model.sortformer_modules.chunk_right_context = self.cfg.chunk_right_context
|
| 97 |
+
diar_model.sortformer_modules.fifo_len = self.cfg.fifo_len
|
| 98 |
+
diar_model.sortformer_modules.log = self.cfg.log
|
| 99 |
+
diar_model.sortformer_modules.spkcache_refresh_rate = self.cfg.spkcache_refresh_rate
|
| 100 |
+
diar_model.eval()
|
| 101 |
+
|
| 102 |
+
return diar_model
|
| 103 |
+
|
| 104 |
+
def print_diar_result(self, diar_result: np.ndarray):
|
| 105 |
+
for t in range(diar_result.shape[0]):
|
| 106 |
+
spk_probs = ""
|
| 107 |
+
for s in range(diar_result.shape[1]):
|
| 108 |
+
spk_probs += f"{diar_result[t, s]:.2f} "
|
| 109 |
+
print(f"Time {t}: {spk_probs}")
|
| 110 |
+
|
| 111 |
+
def diarize(self, audio: bytes, stream_id: str = "default") -> str:
|
| 112 |
+
|
| 113 |
+
audio_array = np.frombuffer(audio, dtype=np.int16).astype(np.float32) / 32768.0
|
| 114 |
+
|
| 115 |
+
self.feature_bufferer.update(audio_array)
|
| 116 |
+
|
| 117 |
+
features = self.feature_bufferer.get_feature_buffer()
|
| 118 |
+
feature_buffers = features.unsqueeze(0) # add batch dimension
|
| 119 |
+
feature_buffers = feature_buffers.transpose(1, 2) # [batch, feature, time] -> [batch, time, feature]
|
| 120 |
+
feature_buffer_lens = torch.tensor([feature_buffers.shape[1]], device=self.device)
|
| 121 |
+
self.streaming_state, chunk_preds = self.stream_step(
|
| 122 |
+
processed_signal=feature_buffers,
|
| 123 |
+
processed_signal_length=feature_buffer_lens,
|
| 124 |
+
streaming_state=self.streaming_state,
|
| 125 |
+
total_preds=self.total_preds,
|
| 126 |
+
left_offset=self.left_offset,
|
| 127 |
+
right_offset=self.right_offset,
|
| 128 |
+
)
|
| 129 |
+
self.total_preds = chunk_preds
|
| 130 |
+
diar_result = chunk_preds[:, -self.chunk_size :, :].clone().cpu().numpy()
|
| 131 |
+
return diar_result[0] # tensor of shape [6, 4]
|
| 132 |
+
|
| 133 |
+
def reset_state(self, stream_id: str = "default"):
|
| 134 |
+
self.feature_bufferer.reset()
|
| 135 |
+
self.streaming_state = self.init_streaming_state(batch_size=1)
|
| 136 |
+
self.total_preds = torch.zeros((1, 0, self.max_num_speakers), device=self.diarizer.device)
|
| 137 |
+
|
| 138 |
+
def init_streaming_state(self, batch_size: int = 1) -> StreamingSortformerState:
|
| 139 |
+
"""
|
| 140 |
+
Initialize the streaming state for the diarization model.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
batch_size: The batch size to use.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
SortformerStreamingState: The initialized streaming state.
|
| 147 |
+
"""
|
| 148 |
+
# Use the model's init_streaming_state method but convert to SortformerStreamingState format
|
| 149 |
+
nemo_state = self.diarizer.sortformer_modules.init_streaming_state(
|
| 150 |
+
batch_size=batch_size, async_streaming=self.diarizer.async_streaming, device=self.device
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
return nemo_state
|
| 154 |
+
|
| 155 |
+
def stream_step(
|
| 156 |
+
self,
|
| 157 |
+
processed_signal: Tensor,
|
| 158 |
+
processed_signal_length: Tensor,
|
| 159 |
+
streaming_state: StreamingSortformerState,
|
| 160 |
+
total_preds: Tensor,
|
| 161 |
+
left_offset: int = 0,
|
| 162 |
+
right_offset: int = 0,
|
| 163 |
+
) -> Tuple[StreamingSortformerState, Tensor]:
|
| 164 |
+
"""
|
| 165 |
+
Execute a single streaming step for diarization.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
processed_signal: The processed audio signal.
|
| 169 |
+
processed_signal_length: The length of the processed signal.
|
| 170 |
+
streaming_state: The current streaming state.
|
| 171 |
+
total_preds: The total predictions so far.
|
| 172 |
+
left_offset: The left offset for the current chunk.
|
| 173 |
+
right_offset: The right offset for the current chunk.
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
Tuple[SortformerStreamingState, Tensor]: The updated streaming state and predictions.
|
| 177 |
+
"""
|
| 178 |
+
# Move tensors to correct device
|
| 179 |
+
if processed_signal.device != self.device:
|
| 180 |
+
processed_signal = processed_signal.to(self.device)
|
| 181 |
+
|
| 182 |
+
if processed_signal_length.device != self.device:
|
| 183 |
+
processed_signal_length = processed_signal_length.to(self.device)
|
| 184 |
+
|
| 185 |
+
if total_preds is not None and total_preds.device != self.device:
|
| 186 |
+
total_preds = total_preds.to(self.device)
|
| 187 |
+
|
| 188 |
+
with (
|
| 189 |
+
torch.amp.autocast(device_type=self.device, dtype=self.compute_dtype, enabled=self.use_amp),
|
| 190 |
+
torch.inference_mode(),
|
| 191 |
+
torch.no_grad(),
|
| 192 |
+
):
|
| 193 |
+
try:
|
| 194 |
+
# Call the model's forward_streaming_step method
|
| 195 |
+
streaming_state, diar_pred_out_stream = self.diarizer.forward_streaming_step(
|
| 196 |
+
processed_signal=processed_signal,
|
| 197 |
+
processed_signal_length=processed_signal_length,
|
| 198 |
+
streaming_state=streaming_state,
|
| 199 |
+
total_preds=total_preds,
|
| 200 |
+
left_offset=left_offset,
|
| 201 |
+
right_offset=right_offset,
|
| 202 |
+
)
|
| 203 |
+
except Exception as e:
|
| 204 |
+
print(f"Error in diarizer streaming step: {e}")
|
| 205 |
+
# print the stack trace
|
| 206 |
+
import traceback
|
| 207 |
+
|
| 208 |
+
traceback.print_exc()
|
| 209 |
+
# Return the existing state and preds if there's an error
|
| 210 |
+
return streaming_state, total_preds
|
| 211 |
+
|
| 212 |
+
return streaming_state, diar_pred_out_stream
|
nemo/agents/voice_agent/pipecat/services/nemo/stt.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
from typing import AsyncGenerator, List, Optional
|
| 18 |
+
|
| 19 |
+
from loguru import logger
|
| 20 |
+
from pipecat.frames.frames import (
|
| 21 |
+
CancelFrame,
|
| 22 |
+
EndFrame,
|
| 23 |
+
ErrorFrame,
|
| 24 |
+
Frame,
|
| 25 |
+
InterimTranscriptionFrame,
|
| 26 |
+
StartFrame,
|
| 27 |
+
TranscriptionFrame,
|
| 28 |
+
VADUserStartedSpeakingFrame,
|
| 29 |
+
VADUserStoppedSpeakingFrame,
|
| 30 |
+
)
|
| 31 |
+
from pipecat.processors.frame_processor import FrameDirection
|
| 32 |
+
from pipecat.services.stt_service import STTService
|
| 33 |
+
from pipecat.transcriptions.language import Language
|
| 34 |
+
from pipecat.utils.time import time_now_iso8601
|
| 35 |
+
from pipecat.utils.tracing.service_decorators import traced_stt
|
| 36 |
+
from pydantic import BaseModel
|
| 37 |
+
|
| 38 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
|
| 39 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.streaming_asr import NemoStreamingASRService
|
| 40 |
+
|
| 41 |
+
ASR_EOU_MODELS = ["nvidia/parakeet_realtime_eou_120m-v1"]
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
# disable nemo logging
|
| 45 |
+
from nemo.utils import logging
|
| 46 |
+
|
| 47 |
+
level = logging.getEffectiveLevel()
|
| 48 |
+
logging.setLevel(logging.CRITICAL)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
except ModuleNotFoundError as e:
|
| 52 |
+
logger.error(f"Exception: {e}")
|
| 53 |
+
logger.error('In order to use NVIDIA NeMo STT, you need to `pip install "nemo_toolkit[all]"`.')
|
| 54 |
+
raise Exception(f"Missing module: {e}")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class NeMoSTTInputParams(BaseModel):
|
| 58 |
+
"""Input parameters for NeMo STT service."""
|
| 59 |
+
|
| 60 |
+
language: Optional[Language] = Language.EN_US
|
| 61 |
+
att_context_size: Optional[List] = [70, 1]
|
| 62 |
+
frame_len_in_secs: Optional[float] = 0.08 # 80ms for FastConformer model
|
| 63 |
+
config_path: Optional[str] = None # path to the Niva ASR config file
|
| 64 |
+
raw_audio_frame_len_in_secs: Optional[float] = 0.016 # 16ms for websocket transport
|
| 65 |
+
buffer_size: Optional[int] = 5 # number of audio frames to buffer, 1 frame is 16ms
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class NemoSTTService(STTService):
|
| 69 |
+
"""NeMo Speech-to-Text service for Pipecat integration."""
|
| 70 |
+
|
| 71 |
+
def __init__(
|
| 72 |
+
self,
|
| 73 |
+
*,
|
| 74 |
+
model: Optional[str] = "nnvidia/parakeet_realtime_eou_120m-v1",
|
| 75 |
+
device: Optional[str] = "cuda:0",
|
| 76 |
+
sample_rate: Optional[int] = 16000,
|
| 77 |
+
params: Optional[NeMoSTTInputParams] = None,
|
| 78 |
+
has_turn_taking: Optional[bool] = None, # if None, it will be set by the model name
|
| 79 |
+
backend: Optional[str] = "legacy",
|
| 80 |
+
decoder_type: Optional[str] = "rnnt",
|
| 81 |
+
audio_logger: Optional[AudioLogger] = None,
|
| 82 |
+
**kwargs,
|
| 83 |
+
):
|
| 84 |
+
super().__init__(**kwargs)
|
| 85 |
+
self._queue = asyncio.Queue()
|
| 86 |
+
self._sample_rate = sample_rate
|
| 87 |
+
self._params = params or NeMoSTTInputParams()
|
| 88 |
+
self._model_name = model
|
| 89 |
+
if has_turn_taking is None:
|
| 90 |
+
has_turn_taking = True if model in ASR_EOU_MODELS else False
|
| 91 |
+
logger.info(f"Setting has_turn_taking to `{has_turn_taking}` based on model name: `{model}`")
|
| 92 |
+
self._has_turn_taking = has_turn_taking
|
| 93 |
+
self._backend = backend
|
| 94 |
+
self._decoder_type = decoder_type
|
| 95 |
+
self._audio_logger = audio_logger
|
| 96 |
+
self._is_vad_active = False
|
| 97 |
+
logger.info(f"NeMoSTTInputParams: {self._params}")
|
| 98 |
+
|
| 99 |
+
self._device = device
|
| 100 |
+
|
| 101 |
+
self._load_model()
|
| 102 |
+
|
| 103 |
+
self.audio_buffer = []
|
| 104 |
+
self.user_is_speaking = False
|
| 105 |
+
|
| 106 |
+
def _load_model(self):
|
| 107 |
+
if self._backend == "legacy":
|
| 108 |
+
self._model = NemoStreamingASRService(
|
| 109 |
+
self._model_name,
|
| 110 |
+
self._params.att_context_size,
|
| 111 |
+
device=self._device,
|
| 112 |
+
decoder_type=self._decoder_type,
|
| 113 |
+
frame_len_in_secs=self._params.frame_len_in_secs,
|
| 114 |
+
)
|
| 115 |
+
else:
|
| 116 |
+
raise ValueError(f"Invalid ASR backend: {self._backend}")
|
| 117 |
+
|
| 118 |
+
def can_generate_metrics(self) -> bool:
|
| 119 |
+
"""Indicates whether this service can generate metrics.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
bool: True, as this service supports metric generation.
|
| 123 |
+
"""
|
| 124 |
+
return True
|
| 125 |
+
|
| 126 |
+
async def start(self, frame: StartFrame):
|
| 127 |
+
"""Handle service start.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
frame: StartFrame containing initial configuration
|
| 131 |
+
"""
|
| 132 |
+
await super().start(frame)
|
| 133 |
+
|
| 134 |
+
# Initialize the model if not already done
|
| 135 |
+
if not hasattr(self, "_model"):
|
| 136 |
+
self._load_model()
|
| 137 |
+
|
| 138 |
+
async def stop(self, frame: EndFrame):
|
| 139 |
+
"""Handle service stop.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
frame: EndFrame that triggered this method
|
| 143 |
+
"""
|
| 144 |
+
await super().stop(frame)
|
| 145 |
+
# Clear any internal state if needed
|
| 146 |
+
await self._queue.put(None) # Signal to stop processing
|
| 147 |
+
|
| 148 |
+
async def cancel(self, frame: CancelFrame):
|
| 149 |
+
"""Handle service cancellation.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
frame: CancelFrame that triggered this method
|
| 153 |
+
"""
|
| 154 |
+
await super().cancel(frame)
|
| 155 |
+
# Clear any internal state
|
| 156 |
+
await self._queue.put(None) # Signal to stop processing
|
| 157 |
+
self._queue = asyncio.Queue() # Reset the queue
|
| 158 |
+
|
| 159 |
+
async def run_stt(self, audio: bytes) -> AsyncGenerator[Frame, None]:
|
| 160 |
+
"""Process audio data and generate transcription frames.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
audio: Raw audio bytes to transcribe
|
| 164 |
+
|
| 165 |
+
Yields:
|
| 166 |
+
Frame: Transcription frames containing the results
|
| 167 |
+
"""
|
| 168 |
+
timestamp_now = datetime.now()
|
| 169 |
+
await self.start_ttfb_metrics()
|
| 170 |
+
await self.start_processing_metrics()
|
| 171 |
+
if self._audio_logger is not None and self._audio_logger.first_audio_timestamp is None:
|
| 172 |
+
self._audio_logger.first_audio_timestamp = timestamp_now
|
| 173 |
+
|
| 174 |
+
try:
|
| 175 |
+
is_final = False
|
| 176 |
+
user_has_finished = False
|
| 177 |
+
transcription = None
|
| 178 |
+
self.audio_buffer.append(audio)
|
| 179 |
+
if len(self.audio_buffer) >= self._params.buffer_size:
|
| 180 |
+
audio = b"".join(self.audio_buffer)
|
| 181 |
+
self.audio_buffer = []
|
| 182 |
+
|
| 183 |
+
# Append to continuous user audio buffer for stereo conversation recording
|
| 184 |
+
if self._audio_logger is not None:
|
| 185 |
+
self._audio_logger.append_continuous_user_audio(audio)
|
| 186 |
+
|
| 187 |
+
asr_result = self._model.transcribe(audio)
|
| 188 |
+
transcription = asr_result.text
|
| 189 |
+
is_final = asr_result.is_final
|
| 190 |
+
if self._audio_logger is not None:
|
| 191 |
+
if self._is_vad_active:
|
| 192 |
+
is_first_frame = False
|
| 193 |
+
self._audio_logger.turn_audio_buffer.append(audio)
|
| 194 |
+
# Accumulate transcriptions for turn-based logging
|
| 195 |
+
if transcription:
|
| 196 |
+
self._audio_logger.turn_transcription_buffer.append(transcription)
|
| 197 |
+
self._audio_logger.stage_turn_audio_and_transcription(
|
| 198 |
+
timestamp_now=timestamp_now,
|
| 199 |
+
is_first_frame=is_first_frame,
|
| 200 |
+
additional_metadata={
|
| 201 |
+
"model": self._model_name,
|
| 202 |
+
"backend": self._backend,
|
| 203 |
+
},
|
| 204 |
+
)
|
| 205 |
+
eou_latency = asr_result.eou_latency
|
| 206 |
+
eob_latency = asr_result.eob_latency
|
| 207 |
+
eou_prob = asr_result.eou_prob
|
| 208 |
+
eob_prob = asr_result.eob_prob
|
| 209 |
+
if eou_latency is not None:
|
| 210 |
+
logger.debug(
|
| 211 |
+
f"EOU latency: {eou_latency: .4f} seconds. EOU probability: {eou_prob: .2f}."
|
| 212 |
+
f"Processing time: {asr_result.processing_time: .4f} seconds."
|
| 213 |
+
)
|
| 214 |
+
user_has_finished = True
|
| 215 |
+
if eob_latency is not None:
|
| 216 |
+
logger.debug(
|
| 217 |
+
f"EOB latency: {eob_latency: .4f} seconds. EOB probability: {eob_prob: .2f}."
|
| 218 |
+
f"Processing time: {asr_result.processing_time: .4f} seconds."
|
| 219 |
+
)
|
| 220 |
+
user_has_finished = True
|
| 221 |
+
await self.stop_ttfb_metrics()
|
| 222 |
+
await self.stop_processing_metrics()
|
| 223 |
+
|
| 224 |
+
if transcription:
|
| 225 |
+
logger.debug(f"Transcription (is_final={is_final}): `{transcription}`")
|
| 226 |
+
self.user_is_speaking = True if not user_has_finished else False
|
| 227 |
+
|
| 228 |
+
# Get the language from params or default to EN_US
|
| 229 |
+
language = self._params.language if self._params else Language.EN_US
|
| 230 |
+
|
| 231 |
+
# Create and push the transcription frame
|
| 232 |
+
if self._has_turn_taking:
|
| 233 |
+
# if turn taking is enabled, we push interim transcription frames
|
| 234 |
+
# and let the turn taking service handle the final transcription
|
| 235 |
+
frame_type = InterimTranscriptionFrame
|
| 236 |
+
else:
|
| 237 |
+
# otherwise, we use the is_final flag to determine the frame type
|
| 238 |
+
frame_type = TranscriptionFrame if is_final else InterimTranscriptionFrame
|
| 239 |
+
await self.push_frame(
|
| 240 |
+
frame_type(
|
| 241 |
+
transcription,
|
| 242 |
+
"", # No speaker ID in this implementation
|
| 243 |
+
time_now_iso8601(),
|
| 244 |
+
language,
|
| 245 |
+
result={"text": transcription},
|
| 246 |
+
)
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
# Handle the transcription
|
| 250 |
+
await self._handle_transcription(
|
| 251 |
+
transcript=transcription,
|
| 252 |
+
is_final=is_final,
|
| 253 |
+
language=language,
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
yield None
|
| 257 |
+
|
| 258 |
+
except Exception as e:
|
| 259 |
+
logger.error(f"Error in NeMo STT processing: {e}")
|
| 260 |
+
await self.push_frame(
|
| 261 |
+
ErrorFrame(
|
| 262 |
+
str(e),
|
| 263 |
+
time_now_iso8601(),
|
| 264 |
+
)
|
| 265 |
+
)
|
| 266 |
+
yield None
|
| 267 |
+
|
| 268 |
+
@traced_stt
|
| 269 |
+
async def _handle_transcription(self, transcript: str, is_final: bool, language: Optional[str] = None):
|
| 270 |
+
"""Handle a transcription result.
|
| 271 |
+
|
| 272 |
+
Args:
|
| 273 |
+
transcript: The transcribed text
|
| 274 |
+
is_final: Whether this is a final transcription
|
| 275 |
+
language: The language of the transcription
|
| 276 |
+
"""
|
| 277 |
+
pass # Base implementation - can be extended for specific handling needs
|
| 278 |
+
|
| 279 |
+
async def set_language(self, language: Language):
|
| 280 |
+
"""Update the service's recognition language.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
language: New language for recognition
|
| 284 |
+
"""
|
| 285 |
+
if self._params:
|
| 286 |
+
self._params.language = language
|
| 287 |
+
else:
|
| 288 |
+
self._params = NeMoSTTInputParams(language=language)
|
| 289 |
+
|
| 290 |
+
logger.info(f"Switching STT language to: {language}")
|
| 291 |
+
|
| 292 |
+
async def set_model(self, model: str):
|
| 293 |
+
"""Update the service's model.
|
| 294 |
+
|
| 295 |
+
Args:
|
| 296 |
+
model: New model name/path to use
|
| 297 |
+
"""
|
| 298 |
+
await super().set_model(model)
|
| 299 |
+
self._model_name = model
|
| 300 |
+
self._load_model()
|
| 301 |
+
|
| 302 |
+
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
| 303 |
+
"""Process incoming frames and handle VAD events."""
|
| 304 |
+
if isinstance(frame, VADUserStoppedSpeakingFrame) and isinstance(self._model, NemoStreamingASRService):
|
| 305 |
+
# manualy reset the state of the model when end of utterance is detected by VAD
|
| 306 |
+
logger.debug("Resetting state of the model due to VADUserStoppedSpeakingFrame")
|
| 307 |
+
if self.user_is_speaking:
|
| 308 |
+
logger.debug(
|
| 309 |
+
"[EOU missing] STT failed to detect end of utterance before VAD detected user stopped speaking"
|
| 310 |
+
)
|
| 311 |
+
self._model.reset_state()
|
| 312 |
+
self._is_vad_active = False
|
| 313 |
+
elif isinstance(frame, VADUserStartedSpeakingFrame):
|
| 314 |
+
self._is_vad_active = True
|
| 315 |
+
|
| 316 |
+
await super().process_frame(frame, direction)
|
nemo/agents/voice_agent/pipecat/services/nemo/tts.py
ADDED
|
@@ -0,0 +1,892 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
import inspect
|
| 17 |
+
import uuid
|
| 18 |
+
from collections.abc import AsyncGenerator
|
| 19 |
+
from datetime import datetime
|
| 20 |
+
from typing import Iterator, List, Optional
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from loguru import logger
|
| 25 |
+
from omegaconf import DictConfig, OmegaConf
|
| 26 |
+
from pipecat.frames.frames import (
|
| 27 |
+
CancelFrame,
|
| 28 |
+
EndFrame,
|
| 29 |
+
ErrorFrame,
|
| 30 |
+
Frame,
|
| 31 |
+
LLMTextFrame,
|
| 32 |
+
StartFrame,
|
| 33 |
+
TTSAudioRawFrame,
|
| 34 |
+
TTSStartedFrame,
|
| 35 |
+
TTSStoppedFrame,
|
| 36 |
+
)
|
| 37 |
+
from pipecat.services.llm_service import FunctionCallParams
|
| 38 |
+
from pipecat.services.tts_service import TTSService
|
| 39 |
+
|
| 40 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
|
| 41 |
+
from nemo.agents.voice_agent.pipecat.utils.text.simple_text_aggregator import SimpleSegmentedTextAggregator
|
| 42 |
+
from nemo.agents.voice_agent.utils.tool_calling.mixins import ToolCallingMixin
|
| 43 |
+
from nemo.collections.tts.models import FastPitchModel, HifiGanModel
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class BaseNemoTTSService(TTSService, ToolCallingMixin):
|
| 47 |
+
"""Text-to-Speech service using Nemo TTS models.
|
| 48 |
+
|
| 49 |
+
This service works with any TTS model that exposes a generate(text) method
|
| 50 |
+
that returns audio data. The TTS generation runs in a dedicated background thread to
|
| 51 |
+
avoid blocking the main asyncio event loop, following the same pattern as NemoDiarService.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
model: TTS model instance with a generate(text) method
|
| 55 |
+
sample_rate: Audio sample rate in Hz (defaults to 22050)
|
| 56 |
+
**kwargs: Additional arguments passed to TTSService
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
*,
|
| 62 |
+
model,
|
| 63 |
+
device: str = "cuda",
|
| 64 |
+
sample_rate: int = 22050,
|
| 65 |
+
think_tokens: Optional[List[str]] = None,
|
| 66 |
+
audio_logger: Optional[AudioLogger] = None,
|
| 67 |
+
ignore_strings: Optional[List[str]] = None,
|
| 68 |
+
**kwargs,
|
| 69 |
+
):
|
| 70 |
+
super().__init__(sample_rate=sample_rate, **kwargs)
|
| 71 |
+
logger.info(f"Initializing TTS service with model: {model} and device: {device}")
|
| 72 |
+
self._model_name = model
|
| 73 |
+
self._device = device
|
| 74 |
+
self._model = self._setup_model()
|
| 75 |
+
self._think_tokens = think_tokens
|
| 76 |
+
self._audio_logger = audio_logger
|
| 77 |
+
if think_tokens is not None:
|
| 78 |
+
assert (
|
| 79 |
+
isinstance(think_tokens, list) and len(think_tokens) == 2
|
| 80 |
+
), f"think_tokens must be a list of two strings, but got type {type(think_tokens)}: {think_tokens}"
|
| 81 |
+
self._ignore_strings = set(ignore_strings) if ignore_strings is not None else None
|
| 82 |
+
# Background processing infrastructure - no response handler needed
|
| 83 |
+
self._tts_queue = asyncio.Queue()
|
| 84 |
+
self._processing_task = None
|
| 85 |
+
self._processing_running = False
|
| 86 |
+
|
| 87 |
+
# Track pending requests with their response queues
|
| 88 |
+
self._pending_requests = {}
|
| 89 |
+
self._have_seen_think_tokens = False
|
| 90 |
+
|
| 91 |
+
def reset(self):
|
| 92 |
+
"""Reset the TTS service."""
|
| 93 |
+
self._text_aggregator.reset()
|
| 94 |
+
|
| 95 |
+
def setup_tool_calling(self):
|
| 96 |
+
"""
|
| 97 |
+
Setup the tool calling mixin by registering all available tools.
|
| 98 |
+
"""
|
| 99 |
+
pass # No tools by default
|
| 100 |
+
|
| 101 |
+
def _setup_model(self):
|
| 102 |
+
raise NotImplementedError("Subclass must implement _setup_model")
|
| 103 |
+
|
| 104 |
+
def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
|
| 105 |
+
raise NotImplementedError("Subclass must implement _generate_audio")
|
| 106 |
+
|
| 107 |
+
def can_generate_metrics(self) -> bool:
|
| 108 |
+
"""If the TTS service can generate metrics."""
|
| 109 |
+
return True
|
| 110 |
+
|
| 111 |
+
async def start(self, frame: StartFrame):
|
| 112 |
+
"""Handle service start."""
|
| 113 |
+
await super().start(frame)
|
| 114 |
+
|
| 115 |
+
# Initialize the model if not already done
|
| 116 |
+
if not hasattr(self, "_model") or self._model is None:
|
| 117 |
+
self._model = self._setup_model()
|
| 118 |
+
|
| 119 |
+
# Only start background processing task - no response handler needed
|
| 120 |
+
if not self._processing_task:
|
| 121 |
+
self._processing_task = self.create_task(self._processing_task_handler())
|
| 122 |
+
|
| 123 |
+
async def stop(self, frame: EndFrame):
|
| 124 |
+
"""Handle service stop."""
|
| 125 |
+
await super().stop(frame)
|
| 126 |
+
await self._stop_tasks()
|
| 127 |
+
|
| 128 |
+
async def cancel(self, frame: CancelFrame):
|
| 129 |
+
"""Handle service cancellation."""
|
| 130 |
+
await super().cancel(frame)
|
| 131 |
+
await self._stop_tasks()
|
| 132 |
+
|
| 133 |
+
async def _stop_tasks(self):
|
| 134 |
+
"""Stop background processing tasks."""
|
| 135 |
+
self._processing_running = False
|
| 136 |
+
await self._tts_queue.put(None) # Signal to stop processing
|
| 137 |
+
|
| 138 |
+
if self._processing_task:
|
| 139 |
+
await self.cancel_task(self._processing_task)
|
| 140 |
+
self._processing_task = None
|
| 141 |
+
|
| 142 |
+
def _tts_processor(self):
|
| 143 |
+
"""Background processor that handles TTS generation calls."""
|
| 144 |
+
try:
|
| 145 |
+
while self._processing_running:
|
| 146 |
+
try:
|
| 147 |
+
future = asyncio.run_coroutine_threadsafe(self._tts_queue.get(), self.get_event_loop())
|
| 148 |
+
request = future.result()
|
| 149 |
+
|
| 150 |
+
if request is None: # Stop signal
|
| 151 |
+
logger.debug("Received stop signal in TTS background processor")
|
| 152 |
+
break
|
| 153 |
+
|
| 154 |
+
text, request_id = request
|
| 155 |
+
logger.debug(f"Processing TTS request for text: [{text}]")
|
| 156 |
+
|
| 157 |
+
# Get the response queue for this request
|
| 158 |
+
response_queue = None
|
| 159 |
+
future = asyncio.run_coroutine_threadsafe(
|
| 160 |
+
self._get_response_queue(request_id), self.get_event_loop()
|
| 161 |
+
)
|
| 162 |
+
response_queue = future.result()
|
| 163 |
+
|
| 164 |
+
if response_queue is None:
|
| 165 |
+
logger.warning(f"No response queue found for request {request_id}")
|
| 166 |
+
continue
|
| 167 |
+
|
| 168 |
+
# Process TTS generation
|
| 169 |
+
try:
|
| 170 |
+
audio_result = self._generate_audio(text)
|
| 171 |
+
|
| 172 |
+
# Send result directly to the waiting request
|
| 173 |
+
asyncio.run_coroutine_threadsafe(
|
| 174 |
+
response_queue.put(('success', audio_result)), self.get_event_loop()
|
| 175 |
+
)
|
| 176 |
+
except Exception as e:
|
| 177 |
+
logger.error(f"Error in TTS generation: {e}")
|
| 178 |
+
# Send error directly to the waiting request
|
| 179 |
+
asyncio.run_coroutine_threadsafe(response_queue.put(('error', e)), self.get_event_loop())
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
logger.error(f"Error in background TTS processor: {e}")
|
| 183 |
+
|
| 184 |
+
except Exception as e:
|
| 185 |
+
logger.error(f"Background TTS processor fatal error: {e}")
|
| 186 |
+
finally:
|
| 187 |
+
logger.debug("Background TTS processor stopped")
|
| 188 |
+
|
| 189 |
+
async def _get_response_queue(self, request_id: str):
|
| 190 |
+
"""Get the response queue for a specific request."""
|
| 191 |
+
return self._pending_requests.get(request_id)
|
| 192 |
+
|
| 193 |
+
async def _processing_task_handler(self):
|
| 194 |
+
"""Handler for background processing task."""
|
| 195 |
+
try:
|
| 196 |
+
self._processing_running = True
|
| 197 |
+
logger.debug("Starting background TTS processing task")
|
| 198 |
+
await asyncio.to_thread(self._tts_processor)
|
| 199 |
+
except asyncio.CancelledError:
|
| 200 |
+
logger.debug("Background TTS processing task cancelled")
|
| 201 |
+
self._processing_running = False
|
| 202 |
+
raise
|
| 203 |
+
finally:
|
| 204 |
+
self._processing_running = False
|
| 205 |
+
|
| 206 |
+
def _handle_think_tokens(self, text: str) -> Optional[str]:
|
| 207 |
+
"""
|
| 208 |
+
Handle the thinking tokens for TTS.
|
| 209 |
+
If the thinking tokens are not provided, return the text as it is.
|
| 210 |
+
Otherwise:
|
| 211 |
+
If both thinking tokens appear in the text, return the text after the end of thinking tokens.
|
| 212 |
+
If the LLM is thinking, return None.
|
| 213 |
+
If the LLM is done thinking, return the text after the end of thinking tokens.
|
| 214 |
+
If the LLM starts thinking, return the text before the start of thinking tokens.
|
| 215 |
+
If the LLM is not thinking, return the text as is.
|
| 216 |
+
"""
|
| 217 |
+
if not self._think_tokens or not text:
|
| 218 |
+
return text
|
| 219 |
+
elif self._think_tokens[0] in text and self._think_tokens[1] in text:
|
| 220 |
+
# LLM finishes thinking in one chunk or outputs dummy thinking tokens
|
| 221 |
+
logger.debug(f"LLM finishes thinking: {text}")
|
| 222 |
+
idx = text.index(self._think_tokens[1])
|
| 223 |
+
# only return the text after the end of thinking tokens
|
| 224 |
+
text = text[idx + len(self._think_tokens[1]) :]
|
| 225 |
+
self._have_seen_think_tokens = False
|
| 226 |
+
logger.debug(f"Returning text after thinking: {text}")
|
| 227 |
+
return text
|
| 228 |
+
elif self._have_seen_think_tokens:
|
| 229 |
+
# LLM is thinking
|
| 230 |
+
if self._think_tokens[1] not in text:
|
| 231 |
+
logger.debug(f"LLM is still thinking: {text}")
|
| 232 |
+
# LLM is still thinking
|
| 233 |
+
return None
|
| 234 |
+
else:
|
| 235 |
+
# LLM is done thinking
|
| 236 |
+
logger.debug(f"LLM is done thinking: {text}")
|
| 237 |
+
idx = text.index(self._think_tokens[1])
|
| 238 |
+
# only return the text after the end of thinking tokens
|
| 239 |
+
text = text[idx + len(self._think_tokens[1]) :]
|
| 240 |
+
self._have_seen_think_tokens = False
|
| 241 |
+
logger.debug(f"Returning text after thinking: {text}")
|
| 242 |
+
return text
|
| 243 |
+
elif self._think_tokens[0] in text:
|
| 244 |
+
# LLM now starts thinking
|
| 245 |
+
logger.debug(f"LLM starts thinking: {text}")
|
| 246 |
+
self._have_seen_think_tokens = True
|
| 247 |
+
# return text before the start of thinking tokens
|
| 248 |
+
idx = text.index(self._think_tokens[0])
|
| 249 |
+
text = text[:idx]
|
| 250 |
+
logger.debug(f"Returning text before thinking: {text}")
|
| 251 |
+
return text
|
| 252 |
+
else:
|
| 253 |
+
# LLM is not thinking
|
| 254 |
+
return text
|
| 255 |
+
|
| 256 |
+
def _drop_special_tokens(self, text: str) -> Optional[str]:
|
| 257 |
+
"""
|
| 258 |
+
Drop the special tokens from the text.
|
| 259 |
+
"""
|
| 260 |
+
if self._ignore_strings is None:
|
| 261 |
+
return text
|
| 262 |
+
for ignore_string in self._ignore_strings:
|
| 263 |
+
if ignore_string in text:
|
| 264 |
+
logger.debug(f"Dropping string `{ignore_string}` from text: `{text}`")
|
| 265 |
+
text = text.replace(ignore_string, "")
|
| 266 |
+
return text
|
| 267 |
+
|
| 268 |
+
async def run_tts(self, text: str) -> AsyncGenerator[Frame, None]:
|
| 269 |
+
"""Generate speech from text using the Nemo TTS model."""
|
| 270 |
+
|
| 271 |
+
if self._think_tokens is not None:
|
| 272 |
+
text = self._handle_think_tokens(text)
|
| 273 |
+
|
| 274 |
+
if not text:
|
| 275 |
+
yield None
|
| 276 |
+
return
|
| 277 |
+
|
| 278 |
+
if self._ignore_strings is not None:
|
| 279 |
+
text = self._drop_special_tokens(text)
|
| 280 |
+
|
| 281 |
+
logger.debug(f"{self}: Generating TTS [{text}]")
|
| 282 |
+
|
| 283 |
+
try:
|
| 284 |
+
await self.start_ttfb_metrics()
|
| 285 |
+
yield TTSStartedFrame()
|
| 286 |
+
|
| 287 |
+
# Increment turn index at the start of agent speaking (only if speaker changed)
|
| 288 |
+
if self._audio_logger is not None:
|
| 289 |
+
self._audio_logger.increment_turn_index(speaker="agent")
|
| 290 |
+
|
| 291 |
+
# Generate unique request ID
|
| 292 |
+
|
| 293 |
+
request_id = str(uuid.uuid4())
|
| 294 |
+
|
| 295 |
+
# Create response queue for this specific request
|
| 296 |
+
request_queue = asyncio.Queue()
|
| 297 |
+
self._pending_requests[request_id] = request_queue
|
| 298 |
+
|
| 299 |
+
try:
|
| 300 |
+
# Queue the TTS request for background processing
|
| 301 |
+
await self._tts_queue.put((text, request_id))
|
| 302 |
+
|
| 303 |
+
# Wait for the result directly from our request queue
|
| 304 |
+
result = await request_queue.get()
|
| 305 |
+
status, data = result
|
| 306 |
+
|
| 307 |
+
if status == 'error':
|
| 308 |
+
logger.error(f"{self} TTS generation error: {data}")
|
| 309 |
+
yield ErrorFrame(error=f"TTS generation error: {str(data)}")
|
| 310 |
+
return
|
| 311 |
+
|
| 312 |
+
audio_result = data
|
| 313 |
+
if audio_result is None:
|
| 314 |
+
logger.error(f"{self} TTS model returned None for text: [{text}]")
|
| 315 |
+
yield ErrorFrame(error="TTS generation failed - no audio returned")
|
| 316 |
+
return
|
| 317 |
+
|
| 318 |
+
await self.start_tts_usage_metrics(text)
|
| 319 |
+
|
| 320 |
+
# Collect all audio for logging
|
| 321 |
+
all_audio_bytes = b""
|
| 322 |
+
# Capture the start time when TTS begins (not when it ends)
|
| 323 |
+
if self._audio_logger is not None and self._audio_logger.first_audio_timestamp is None:
|
| 324 |
+
self._audio_logger.first_audio_timestamp = datetime.now()
|
| 325 |
+
|
| 326 |
+
# Process the audio result (same as before)
|
| 327 |
+
if (
|
| 328 |
+
inspect.isgenerator(audio_result)
|
| 329 |
+
or hasattr(audio_result, '__iter__')
|
| 330 |
+
and hasattr(audio_result, '__next__')
|
| 331 |
+
):
|
| 332 |
+
# Handle generator case
|
| 333 |
+
first_chunk = True
|
| 334 |
+
for audio_chunk in audio_result:
|
| 335 |
+
if first_chunk:
|
| 336 |
+
await self.stop_ttfb_metrics()
|
| 337 |
+
first_chunk = False
|
| 338 |
+
# Capture start time on first chunk
|
| 339 |
+
if self._audio_logger is not None:
|
| 340 |
+
tts_start_time = self._audio_logger.get_time_from_start_of_session()
|
| 341 |
+
|
| 342 |
+
if audio_chunk is None:
|
| 343 |
+
break
|
| 344 |
+
|
| 345 |
+
audio_bytes = self._convert_to_bytes(audio_chunk)
|
| 346 |
+
all_audio_bytes += audio_bytes
|
| 347 |
+
chunk_size = self.chunk_size
|
| 348 |
+
for i in range(0, len(audio_bytes), chunk_size):
|
| 349 |
+
audio_chunk_bytes = audio_bytes[i : i + chunk_size]
|
| 350 |
+
if not audio_chunk_bytes:
|
| 351 |
+
break
|
| 352 |
+
|
| 353 |
+
frame = TTSAudioRawFrame(
|
| 354 |
+
audio=audio_chunk_bytes, sample_rate=self.sample_rate, num_channels=1
|
| 355 |
+
)
|
| 356 |
+
yield frame
|
| 357 |
+
else:
|
| 358 |
+
# Handle single result case
|
| 359 |
+
await self.stop_ttfb_metrics()
|
| 360 |
+
# Capture start time for single result
|
| 361 |
+
if self._audio_logger is not None:
|
| 362 |
+
tts_start_time = self._audio_logger.get_time_from_start_of_session()
|
| 363 |
+
audio_bytes = self._convert_to_bytes(audio_result)
|
| 364 |
+
all_audio_bytes = audio_bytes
|
| 365 |
+
|
| 366 |
+
chunk_size = self.chunk_size
|
| 367 |
+
for i in range(0, len(audio_bytes), chunk_size):
|
| 368 |
+
chunk = audio_bytes[i : i + chunk_size]
|
| 369 |
+
if not chunk:
|
| 370 |
+
break
|
| 371 |
+
|
| 372 |
+
frame = TTSAudioRawFrame(audio=chunk, sample_rate=self.sample_rate, num_channels=1)
|
| 373 |
+
yield frame
|
| 374 |
+
|
| 375 |
+
# Log the complete audio if logger is available
|
| 376 |
+
if self._audio_logger is not None and all_audio_bytes:
|
| 377 |
+
try:
|
| 378 |
+
self._audio_logger.log_agent_audio(
|
| 379 |
+
audio_data=all_audio_bytes,
|
| 380 |
+
text=text,
|
| 381 |
+
sample_rate=self.sample_rate,
|
| 382 |
+
num_channels=1,
|
| 383 |
+
additional_metadata={
|
| 384 |
+
"model": self._model_name,
|
| 385 |
+
},
|
| 386 |
+
tts_generation_time=tts_start_time,
|
| 387 |
+
)
|
| 388 |
+
except Exception as e:
|
| 389 |
+
logger.warning(f"Failed to log agent audio: {e}")
|
| 390 |
+
|
| 391 |
+
yield TTSStoppedFrame()
|
| 392 |
+
|
| 393 |
+
finally:
|
| 394 |
+
# Clean up the pending request
|
| 395 |
+
if request_id in self._pending_requests:
|
| 396 |
+
del self._pending_requests[request_id]
|
| 397 |
+
|
| 398 |
+
except Exception as e:
|
| 399 |
+
logger.exception(f"{self} error generating TTS: {e}")
|
| 400 |
+
error_message = f"TTS generation error: {str(e)}"
|
| 401 |
+
yield ErrorFrame(error=error_message)
|
| 402 |
+
|
| 403 |
+
def _convert_to_bytes(self, audio_data) -> bytes:
|
| 404 |
+
"""Convert various audio data formats to bytes."""
|
| 405 |
+
if isinstance(audio_data, (bytes, bytearray)):
|
| 406 |
+
return bytes(audio_data)
|
| 407 |
+
|
| 408 |
+
if isinstance(audio_data, np.ndarray):
|
| 409 |
+
# Ensure it's in the right format (16-bit PCM)
|
| 410 |
+
if audio_data.dtype in [np.float32, np.float64]:
|
| 411 |
+
# Convert float [-1, 1] to int16 [-32768, 32767]
|
| 412 |
+
audio_data = np.clip(audio_data, -1.0, 1.0) # Ensure values are in range
|
| 413 |
+
audio_data = (audio_data * 32767).astype(np.int16)
|
| 414 |
+
elif audio_data.dtype != np.int16:
|
| 415 |
+
# Convert other integer types to int16
|
| 416 |
+
audio_data = audio_data.astype(np.int16)
|
| 417 |
+
return audio_data.tobytes()
|
| 418 |
+
elif hasattr(audio_data, 'tobytes'):
|
| 419 |
+
return audio_data.tobytes()
|
| 420 |
+
else:
|
| 421 |
+
return bytes(audio_data)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class NeMoFastPitchHiFiGANTTSService(BaseNemoTTSService):
|
| 425 |
+
"""Text-to-Speech service using NeMo FastPitch-Hifigan model.
|
| 426 |
+
|
| 427 |
+
More info: https://huggingface.co/nvidia/tts_en_fastpitch
|
| 428 |
+
|
| 429 |
+
Args:
|
| 430 |
+
fastpitch_model: FastPitch model name
|
| 431 |
+
hifigan_model: Hifigan model name
|
| 432 |
+
device: Device to run on (default: 'cuda')
|
| 433 |
+
**kwargs: Additional arguments passed to BaseNemoTTSService
|
| 434 |
+
"""
|
| 435 |
+
|
| 436 |
+
def __init__(
|
| 437 |
+
self,
|
| 438 |
+
fastpitch_model: str = "nvidia/tts_en_fastpitch",
|
| 439 |
+
hifigan_model: str = "nvidia/tts_hifigan",
|
| 440 |
+
device: str = "cuda",
|
| 441 |
+
**kwargs,
|
| 442 |
+
):
|
| 443 |
+
model_name = f"{fastpitch_model}+{hifigan_model}"
|
| 444 |
+
self._fastpitch_model_name = fastpitch_model
|
| 445 |
+
self._hifigan_model_name = hifigan_model
|
| 446 |
+
super().__init__(model=model_name, device=device, **kwargs)
|
| 447 |
+
self.setup_tool_calling()
|
| 448 |
+
|
| 449 |
+
def _setup_model(self):
|
| 450 |
+
logger.info(
|
| 451 |
+
f"Loading FastPitch model={self._fastpitch_model_name} and HiFiGAN model={self._hifigan_model_name}"
|
| 452 |
+
)
|
| 453 |
+
self._fastpitch_model = self._setup_fastpitch_model(self._fastpitch_model_name)
|
| 454 |
+
self._hifigan_model = self._setup_hifigan_model(self._hifigan_model_name)
|
| 455 |
+
return self._fastpitch_model, self._hifigan_model
|
| 456 |
+
|
| 457 |
+
def _setup_fastpitch_model(self, model_name: str):
|
| 458 |
+
if model_name.endswith(".nemo"):
|
| 459 |
+
fastpitch_model = FastPitchModel.restore_from(model_name, map_location=torch.device(self._device))
|
| 460 |
+
else:
|
| 461 |
+
fastpitch_model = FastPitchModel.from_pretrained(model_name, map_location=torch.device(self._device))
|
| 462 |
+
fastpitch_model.eval()
|
| 463 |
+
return fastpitch_model
|
| 464 |
+
|
| 465 |
+
def _setup_hifigan_model(self, model_name: str):
|
| 466 |
+
if model_name.endswith(".nemo"):
|
| 467 |
+
hifigan_model = HifiGanModel.restore_from(model_name, map_location=torch.device(self._device))
|
| 468 |
+
else:
|
| 469 |
+
hifigan_model = HifiGanModel.from_pretrained(model_name, map_location=torch.device(self._device))
|
| 470 |
+
hifigan_model.eval()
|
| 471 |
+
return hifigan_model
|
| 472 |
+
|
| 473 |
+
def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
|
| 474 |
+
with torch.no_grad():
|
| 475 |
+
parsed = self._fastpitch_model.parse(text)
|
| 476 |
+
spectrogram = self._fastpitch_model.generate_spectrogram(tokens=parsed)
|
| 477 |
+
audio = self._hifigan_model.convert_spectrogram_to_audio(spec=spectrogram)
|
| 478 |
+
audio = audio.detach().view(-1).cpu().numpy()
|
| 479 |
+
yield audio
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
class KokoroTTSService(BaseNemoTTSService):
|
| 483 |
+
"""Text-to-Speech service using Kokoro-82M model.
|
| 484 |
+
|
| 485 |
+
Kokoro is an open-weight TTS model with 82 million parameters.
|
| 486 |
+
More info: https://huggingface.co/hexgrad/Kokoro-82M
|
| 487 |
+
|
| 488 |
+
Args:
|
| 489 |
+
lang_code: Language code for the model (default: 'a' for American English)
|
| 490 |
+
voice: Voice to use (default: 'af_heart')
|
| 491 |
+
device: Device to run on (default: 'cuda')
|
| 492 |
+
sample_rate: Audio sample rate in Hz (default: 24000 for Kokoro)
|
| 493 |
+
download_all: Download all models for different languages (default: True)
|
| 494 |
+
cache_models: Cache models on GPU for faster switching between languages (default: True)
|
| 495 |
+
**kwargs: Additional arguments passed to BaseNemoTTSService
|
| 496 |
+
"""
|
| 497 |
+
|
| 498 |
+
def __init__(
|
| 499 |
+
self,
|
| 500 |
+
model: str = "hexgrad/Kokoro-82M",
|
| 501 |
+
lang_code: str = "a",
|
| 502 |
+
voice: str = "af_heart",
|
| 503 |
+
device: str = "cuda",
|
| 504 |
+
sample_rate: int = 24000,
|
| 505 |
+
speed: float = 1.0,
|
| 506 |
+
download_all: bool = True,
|
| 507 |
+
cache_models: bool = True,
|
| 508 |
+
**kwargs,
|
| 509 |
+
):
|
| 510 |
+
self._lang_code = lang_code
|
| 511 |
+
self._voice = voice
|
| 512 |
+
self._speed = speed
|
| 513 |
+
assert speed > 0, "Speed must be greater than 0"
|
| 514 |
+
self._original_speed = speed
|
| 515 |
+
self._original_voice = voice
|
| 516 |
+
self._gender = 'female' if voice[1] == 'f' else 'male'
|
| 517 |
+
self._original_gender = self._gender
|
| 518 |
+
self._original_lang_code = self._lang_code
|
| 519 |
+
if download_all:
|
| 520 |
+
self._model_maps = self._download_all_models(
|
| 521 |
+
lang_code=["a", "b"], device=device, repo_id=model, cache_models=cache_models
|
| 522 |
+
)
|
| 523 |
+
else:
|
| 524 |
+
self._model_maps = {}
|
| 525 |
+
super().__init__(model=model, device=device, sample_rate=sample_rate, **kwargs)
|
| 526 |
+
self.setup_tool_calling()
|
| 527 |
+
|
| 528 |
+
def _setup_model(self, lang_code: Optional[str] = None, voice: Optional[str] = None):
|
| 529 |
+
"""Initialize the Kokoro pipeline."""
|
| 530 |
+
try:
|
| 531 |
+
from kokoro import KPipeline
|
| 532 |
+
except ImportError:
|
| 533 |
+
raise ImportError(
|
| 534 |
+
"kokoro package is required for KokoroTTSService. Install it with: `pip install kokoro>=0.9.2`"
|
| 535 |
+
)
|
| 536 |
+
if lang_code is None:
|
| 537 |
+
lang_code = self._lang_code
|
| 538 |
+
if voice is None:
|
| 539 |
+
voice = self._voice
|
| 540 |
+
logger.info(f"Loading Kokoro TTS model with model={self._model_name}, lang_code={lang_code}, voice={voice}")
|
| 541 |
+
if lang_code in self._model_maps:
|
| 542 |
+
pipeline = self._model_maps[lang_code]
|
| 543 |
+
else:
|
| 544 |
+
pipeline = KPipeline(lang_code=lang_code, device=self._device, repo_id=self._model_name)
|
| 545 |
+
self._model_maps[lang_code] = pipeline
|
| 546 |
+
return pipeline
|
| 547 |
+
|
| 548 |
+
def _download_all_models(
|
| 549 |
+
self, lang_code: List[str] = ['a', 'b'], device="cuda", repo_id="hexgrad/Kokoro-82M", cache_models=True
|
| 550 |
+
):
|
| 551 |
+
"""Download all models for Kokoro TTS service."""
|
| 552 |
+
logger.info(f"Downloading all models for Kokoro TTS service with lang_code={lang_code}")
|
| 553 |
+
from kokoro import KPipeline
|
| 554 |
+
|
| 555 |
+
model_maps = {}
|
| 556 |
+
|
| 557 |
+
for lang in lang_code:
|
| 558 |
+
pipeline = KPipeline(lang_code=lang, device=device, repo_id=repo_id)
|
| 559 |
+
if cache_models:
|
| 560 |
+
model_maps[lang] = pipeline
|
| 561 |
+
torch.cuda.empty_cache()
|
| 562 |
+
return model_maps
|
| 563 |
+
|
| 564 |
+
def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
|
| 565 |
+
"""Generate audio using the Kokoro pipeline.
|
| 566 |
+
|
| 567 |
+
Args:
|
| 568 |
+
text: Text to convert to speech
|
| 569 |
+
|
| 570 |
+
Yields:
|
| 571 |
+
Audio data as numpy arrays
|
| 572 |
+
"""
|
| 573 |
+
try:
|
| 574 |
+
# Generate audio using Kokoro pipeline
|
| 575 |
+
generator = self._model(text, voice=self._voice, speed=self._speed)
|
| 576 |
+
|
| 577 |
+
# The generator yields tuples of (gs, ps, audio)
|
| 578 |
+
# We only need the audio component
|
| 579 |
+
for i, (gs, ps, audio) in enumerate(generator):
|
| 580 |
+
logger.debug(
|
| 581 |
+
f"Kokoro generated audio chunk {i}: gs={gs}, ps={ps},"
|
| 582 |
+
f"audio_shape={audio.shape if hasattr(audio, 'shape') else len(audio)}"
|
| 583 |
+
)
|
| 584 |
+
if isinstance(audio, torch.Tensor):
|
| 585 |
+
audio = audio.detach().cpu().numpy()
|
| 586 |
+
# Kokoro returns audio as numpy array in float32 format [-1, 1]
|
| 587 |
+
# The base class will handle conversion to int16 bytes
|
| 588 |
+
yield audio
|
| 589 |
+
|
| 590 |
+
except Exception as e:
|
| 591 |
+
logger.error(f"Error generating audio with Kokoro: {e}")
|
| 592 |
+
raise
|
| 593 |
+
|
| 594 |
+
async def tool_tts_set_speed(self, params: FunctionCallParams, speed_lambda: float):
|
| 595 |
+
"""
|
| 596 |
+
Set a specific speaking speed of the assistant's voice.
|
| 597 |
+
This tool should be called only when the user specifies the speed explicitly,
|
| 598 |
+
such as "speak twice as fast" or "speak half as slow" or "speak 1.5 times as fast".
|
| 599 |
+
|
| 600 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 601 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 602 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 603 |
+
|
| 604 |
+
Args:
|
| 605 |
+
speed_lambda: positive float, the relative change of the speaking speed to the original speed.
|
| 606 |
+
E.g., 1.0 for original speed, 1.25 for 25% faster than original speed,
|
| 607 |
+
0.8 for 20% slower than original speed.
|
| 608 |
+
|
| 609 |
+
"""
|
| 610 |
+
if speed_lambda <= 0:
|
| 611 |
+
result = {
|
| 612 |
+
"success": False,
|
| 613 |
+
"message": f"Speed remains unchanged since the change is not a positive number: {speed_lambda}",
|
| 614 |
+
}
|
| 615 |
+
logger.debug(f"Speed remains unchanged since the change is not a positive number: {speed_lambda}")
|
| 616 |
+
else:
|
| 617 |
+
self._speed = speed_lambda * self._speed
|
| 618 |
+
result = {
|
| 619 |
+
"success": True,
|
| 620 |
+
"message": f"Speed set to {speed_lambda} of the previous speed",
|
| 621 |
+
}
|
| 622 |
+
logger.debug(f"Speed set to {speed_lambda} of the previous speed {self._original_speed}")
|
| 623 |
+
await params.result_callback(result)
|
| 624 |
+
|
| 625 |
+
async def tool_tts_reset_speed(self, params: FunctionCallParams):
|
| 626 |
+
"""
|
| 627 |
+
Reset the speaking speed to the original speed.
|
| 628 |
+
|
| 629 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 630 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 631 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 632 |
+
"""
|
| 633 |
+
self._speed = self._original_speed
|
| 634 |
+
result = {"success": True, "message": "Speaking speed is reset to the original one"}
|
| 635 |
+
logger.debug(f"Speaking speed is reset to the original speed {self._original_speed}")
|
| 636 |
+
await params.result_callback(result)
|
| 637 |
+
|
| 638 |
+
async def tool_tts_speak_faster(self, params: FunctionCallParams):
|
| 639 |
+
"""
|
| 640 |
+
Speak faster by increasing the speaking speed 15% faster each time this function is called.
|
| 641 |
+
|
| 642 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 643 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 644 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 645 |
+
"""
|
| 646 |
+
speed_lambda = 1.15
|
| 647 |
+
self._speed = speed_lambda * self._speed
|
| 648 |
+
result = {
|
| 649 |
+
"success": True,
|
| 650 |
+
"message": f"Speaking speed is increased to {speed_lambda} of the previous speed",
|
| 651 |
+
}
|
| 652 |
+
logger.debug(f"Speed is set to {speed_lambda} of the previous speed, new speed is {self._speed}")
|
| 653 |
+
await params.result_callback(result)
|
| 654 |
+
|
| 655 |
+
async def tool_tts_speak_slower(self, params: FunctionCallParams):
|
| 656 |
+
"""
|
| 657 |
+
Speak slower by decreasing the speaking speed 15% slower each time this function is called.
|
| 658 |
+
|
| 659 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 660 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 661 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 662 |
+
"""
|
| 663 |
+
speed_lambda = 0.85
|
| 664 |
+
self._speed = speed_lambda * self._speed
|
| 665 |
+
result = {
|
| 666 |
+
"success": True,
|
| 667 |
+
"message": f"Speaking speed is decreased to {speed_lambda} of the previous speed",
|
| 668 |
+
}
|
| 669 |
+
logger.debug(f"Speed is set to {speed_lambda} of the previous speed, new speed is {self._speed}")
|
| 670 |
+
await params.result_callback(result)
|
| 671 |
+
|
| 672 |
+
async def tool_tts_set_voice(self, params: FunctionCallParams, accent: str, gender: str):
|
| 673 |
+
"""
|
| 674 |
+
Set the accent and gender of the assistant's voice.
|
| 675 |
+
This tool should be called only when the user specifies the accent and/or gender explicitly.
|
| 676 |
+
|
| 677 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 678 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 679 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 680 |
+
|
| 681 |
+
Args:
|
| 682 |
+
accent: Accent for the TTS model. Must be one of 'American English', 'British English'
|
| 683 |
+
or 'current' for keeping the current accent.
|
| 684 |
+
gender: gender of the assistant's voice. Must be one of 'male', 'female',
|
| 685 |
+
or 'current' for keeping the current gender.
|
| 686 |
+
"""
|
| 687 |
+
await params.llm.push_frame(LLMTextFrame("Just a moment."))
|
| 688 |
+
|
| 689 |
+
lang_code = "a" if accent == "American English" else "b" if accent == "British English" else "current"
|
| 690 |
+
new_lang_code = self._lang_code
|
| 691 |
+
new_gender = self._gender
|
| 692 |
+
if lang_code != 'current':
|
| 693 |
+
new_lang_code = lang_code
|
| 694 |
+
if gender != 'current':
|
| 695 |
+
new_gender = gender
|
| 696 |
+
|
| 697 |
+
if new_lang_code == 'a':
|
| 698 |
+
new_voice = 'af_heart' if new_gender == 'female' else 'am_michael'
|
| 699 |
+
elif new_lang_code == 'b':
|
| 700 |
+
new_voice = 'bf_emma' if new_gender == 'female' else 'bm_george'
|
| 701 |
+
else:
|
| 702 |
+
await params.result_callback(
|
| 703 |
+
{
|
| 704 |
+
"success": False,
|
| 705 |
+
"message": f"Invalid language code: {new_lang_code} or gender: {new_gender}",
|
| 706 |
+
}
|
| 707 |
+
)
|
| 708 |
+
return
|
| 709 |
+
|
| 710 |
+
new_model = await asyncio.to_thread(self._setup_model, new_lang_code, new_voice)
|
| 711 |
+
self._model = new_model
|
| 712 |
+
self._lang_code = new_lang_code
|
| 713 |
+
self._gender = new_gender
|
| 714 |
+
self._voice = new_voice
|
| 715 |
+
logger.debug(f"Language and voice are set to {new_lang_code} and {new_voice}")
|
| 716 |
+
await params.result_callback({"success": True, "message": "Voice has been updated."})
|
| 717 |
+
|
| 718 |
+
async def tool_tts_reset_voice(self, params: FunctionCallParams):
|
| 719 |
+
"""
|
| 720 |
+
Reset the accent and voice to the original ones.
|
| 721 |
+
|
| 722 |
+
Inform user of the result of this tool call. After calling this tool, continue the previous
|
| 723 |
+
response if it was unfinished and was interrupted by the user, otherwise start a new response
|
| 724 |
+
and ask if the user needs help on anything else. Avoid repeating previous responses.
|
| 725 |
+
|
| 726 |
+
"""
|
| 727 |
+
await params.llm.push_frame(LLMTextFrame("Of course."))
|
| 728 |
+
|
| 729 |
+
new_model = await asyncio.to_thread(self._setup_model, self._original_lang_code, self._original_voice)
|
| 730 |
+
self._model = new_model
|
| 731 |
+
self._lang_code = self._original_lang_code
|
| 732 |
+
self._gender = self._original_gender
|
| 733 |
+
self._voice = self._original_voice
|
| 734 |
+
logger.debug(
|
| 735 |
+
f"Language and voice are reset to the original ones {self._original_lang_code} and {self._original_voice}"
|
| 736 |
+
)
|
| 737 |
+
await params.result_callback({"success": True, "message": "Voice has been reset to the original one."})
|
| 738 |
+
|
| 739 |
+
def setup_tool_calling(self):
|
| 740 |
+
"""
|
| 741 |
+
Setup the tool calling mixin by registering all available tools.
|
| 742 |
+
"""
|
| 743 |
+
self.register_direct_function("tool_tts_reset_speed", self.tool_tts_reset_speed)
|
| 744 |
+
self.register_direct_function("tool_tts_speak_faster", self.tool_tts_speak_faster)
|
| 745 |
+
self.register_direct_function("tool_tts_speak_slower", self.tool_tts_speak_slower)
|
| 746 |
+
self.register_direct_function("tool_tts_set_speed", self.tool_tts_set_speed)
|
| 747 |
+
self.register_direct_function("tool_tts_set_voice", self.tool_tts_set_voice)
|
| 748 |
+
self.register_direct_function("tool_tts_reset_voice", self.tool_tts_reset_voice)
|
| 749 |
+
|
| 750 |
+
def reset(self):
|
| 751 |
+
"""
|
| 752 |
+
Reset the voice and speed to the original ones.
|
| 753 |
+
"""
|
| 754 |
+
self._text_aggregator.reset()
|
| 755 |
+
self._speed = self._original_speed
|
| 756 |
+
self._model = self._setup_model(self._original_lang_code, self._original_voice)
|
| 757 |
+
self._lang_code = self._original_lang_code
|
| 758 |
+
self._gender = self._original_gender
|
| 759 |
+
self._voice = self._original_voice
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
class MagpieTTSService(BaseNemoTTSService):
|
| 763 |
+
"""Text-to-Speech service using Magpie TTS model.
|
| 764 |
+
|
| 765 |
+
Magpie is a multilingual TTS model with 357 million parameters.
|
| 766 |
+
More info: https://huggingface.co/nvidia/magpie_tts_multilingual_357m
|
| 767 |
+
|
| 768 |
+
Args:
|
| 769 |
+
model: Model name or path to the Magpie TTS model.
|
| 770 |
+
language: Language code for the model (default: 'en' for English)
|
| 771 |
+
speaker: Speaker to use for the model (default: 'Sofia')
|
| 772 |
+
apply_TN: Whether to apply text normalization (default: False)
|
| 773 |
+
device: Device to run on (default: 'cuda')
|
| 774 |
+
**kwargs: Additional arguments passed to BaseNemoTTSService
|
| 775 |
+
"""
|
| 776 |
+
|
| 777 |
+
SPEAKER_MAP = {"John": 0, "Sofia": 1, "Aria": 2, "Jason": 3, "Leo": 4}
|
| 778 |
+
|
| 779 |
+
def __init__(
|
| 780 |
+
self,
|
| 781 |
+
model: str = "nvidia/magpie_tts_multilingual_357m",
|
| 782 |
+
language: str = "en",
|
| 783 |
+
speaker: str = "Sofia",
|
| 784 |
+
apply_TN: bool = False,
|
| 785 |
+
device: str = "cuda",
|
| 786 |
+
**kwargs,
|
| 787 |
+
):
|
| 788 |
+
if speaker not in self.SPEAKER_MAP:
|
| 789 |
+
raise ValueError(f"Invalid speaker: {speaker}, must be one of {list(self.SPEAKER_MAP.keys())}")
|
| 790 |
+
self._language = language
|
| 791 |
+
self._current_speaker = speaker
|
| 792 |
+
self._apply_TN = apply_TN
|
| 793 |
+
super().__init__(model=model, device=device, **kwargs)
|
| 794 |
+
self.setup_tool_calling()
|
| 795 |
+
|
| 796 |
+
def _setup_model(self):
|
| 797 |
+
from nemo.collections.tts.models import MagpieTTSModel
|
| 798 |
+
|
| 799 |
+
if self._model_name.endswith(".nemo"):
|
| 800 |
+
model = MagpieTTSModel.restore_from(self._model_name, map_location=torch.device(self._device))
|
| 801 |
+
else:
|
| 802 |
+
model = MagpieTTSModel.from_pretrained(self._model_name, map_location=torch.device(self._device))
|
| 803 |
+
model.eval()
|
| 804 |
+
|
| 805 |
+
text = "Warming up the Magpie TTS model, this will help the model to respond faster for later requests."
|
| 806 |
+
with torch.no_grad():
|
| 807 |
+
_, _ = model.do_tts(
|
| 808 |
+
text,
|
| 809 |
+
language=self._language,
|
| 810 |
+
apply_TN=self._apply_TN,
|
| 811 |
+
speaker_index=self.SPEAKER_MAP[self._current_speaker],
|
| 812 |
+
)
|
| 813 |
+
torch.cuda.empty_cache()
|
| 814 |
+
return model
|
| 815 |
+
|
| 816 |
+
def _generate_audio(self, text: str) -> Iterator[np.ndarray]:
|
| 817 |
+
audio, audio_len = self._model.do_tts(
|
| 818 |
+
text,
|
| 819 |
+
language=self._language,
|
| 820 |
+
apply_TN=self._apply_TN,
|
| 821 |
+
speaker_index=self.SPEAKER_MAP[self._current_speaker],
|
| 822 |
+
)
|
| 823 |
+
audio_len = audio_len.view(-1).item()
|
| 824 |
+
audio = audio.detach().view(-1).cpu().numpy()
|
| 825 |
+
yield audio[:audio_len]
|
| 826 |
+
|
| 827 |
+
def setup_tool_calling(self):
|
| 828 |
+
"""No tools for now for Magpie TTS service."""
|
| 829 |
+
pass
|
| 830 |
+
|
| 831 |
+
|
| 832 |
+
def get_tts_service_from_config(config: DictConfig, audio_logger: Optional[AudioLogger] = None) -> BaseNemoTTSService:
|
| 833 |
+
"""Get the TTS service from the configuration.
|
| 834 |
+
|
| 835 |
+
Args:
|
| 836 |
+
config: The DictConfig object containing the TTS configuration.
|
| 837 |
+
audio_logger: The audio logger to use for audio logging.
|
| 838 |
+
Returns:
|
| 839 |
+
The TTS service.
|
| 840 |
+
"""
|
| 841 |
+
if isinstance(config, DictConfig):
|
| 842 |
+
config = OmegaConf.to_container(config, resolve=True)
|
| 843 |
+
model = config.get("model", None)
|
| 844 |
+
device = config.get("device", "cuda")
|
| 845 |
+
if config.get("type", None) != "nemo":
|
| 846 |
+
raise ValueError(f"Invalid TTS type: {config.get('type', None)}, only 'nemo' is supported")
|
| 847 |
+
if model is None:
|
| 848 |
+
raise ValueError("Model is required for Nemo TTS service")
|
| 849 |
+
|
| 850 |
+
text_aggregator = SimpleSegmentedTextAggregator(
|
| 851 |
+
punctuation_marks=config.get("extra_separator", None),
|
| 852 |
+
ignore_marks=config.get("ignore_strings", None),
|
| 853 |
+
min_sentence_length=config.get("min_sentence_length", 5),
|
| 854 |
+
use_legacy_eos_detection=config.get("use_legacy_eos_detection", False),
|
| 855 |
+
)
|
| 856 |
+
|
| 857 |
+
if model == "fastpitch-hifigan":
|
| 858 |
+
return NeMoFastPitchHiFiGANTTSService(
|
| 859 |
+
fastpitch_model=config.get("main_model_id", None),
|
| 860 |
+
hifigan_model=config.get("sub_model_id", None),
|
| 861 |
+
device=device,
|
| 862 |
+
text_aggregator=text_aggregator,
|
| 863 |
+
think_tokens=config.get("think_tokens", None),
|
| 864 |
+
audio_logger=audio_logger,
|
| 865 |
+
ignore_strings=config.get("ignore_strings", None),
|
| 866 |
+
)
|
| 867 |
+
elif model == "magpie":
|
| 868 |
+
return MagpieTTSService(
|
| 869 |
+
model=config.get("main_model_id", None),
|
| 870 |
+
language=config.get("language", "en"),
|
| 871 |
+
speaker=config.get("speaker", "Sofia"),
|
| 872 |
+
apply_TN=config.get("apply_TN", False),
|
| 873 |
+
device=device,
|
| 874 |
+
text_aggregator=text_aggregator,
|
| 875 |
+
think_tokens=config.get("think_tokens", None),
|
| 876 |
+
audio_logger=audio_logger,
|
| 877 |
+
ignore_strings=config.get("ignore_strings", None),
|
| 878 |
+
)
|
| 879 |
+
elif model == "kokoro":
|
| 880 |
+
return KokoroTTSService(
|
| 881 |
+
model=config.get("main_model_id", "hexgrad/Kokoro-82M"),
|
| 882 |
+
voice=config.get("sub_model_id", "af_heart"),
|
| 883 |
+
device=device,
|
| 884 |
+
speed=config.get("speed", 1.0),
|
| 885 |
+
text_aggregator=text_aggregator,
|
| 886 |
+
think_tokens=config.get("think_tokens", None),
|
| 887 |
+
sample_rate=24000,
|
| 888 |
+
audio_logger=audio_logger,
|
| 889 |
+
ignore_strings=config.get("ignore_strings", None),
|
| 890 |
+
)
|
| 891 |
+
else:
|
| 892 |
+
raise ValueError(f"Invalid model: {model}, only 'fastpitch-hifigan', 'magpie' and 'kokoro' are supported")
|
nemo/agents/voice_agent/pipecat/services/nemo/turn_taking.py
ADDED
|
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import time
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import List, Optional, Union
|
| 19 |
+
|
| 20 |
+
import yaml
|
| 21 |
+
from loguru import logger
|
| 22 |
+
from pipecat.frames.frames import (
|
| 23 |
+
BotStartedSpeakingFrame,
|
| 24 |
+
BotStoppedSpeakingFrame,
|
| 25 |
+
Frame,
|
| 26 |
+
InterimTranscriptionFrame,
|
| 27 |
+
StartInterruptionFrame,
|
| 28 |
+
TranscriptionFrame,
|
| 29 |
+
UserStartedSpeakingFrame,
|
| 30 |
+
UserStoppedSpeakingFrame,
|
| 31 |
+
VADUserStartedSpeakingFrame,
|
| 32 |
+
VADUserStoppedSpeakingFrame,
|
| 33 |
+
)
|
| 34 |
+
from pipecat.processors.frame_processor import FrameDirection, FrameProcessor
|
| 35 |
+
from pipecat.transcriptions.language import Language
|
| 36 |
+
from pipecat.utils.time import time_now_iso8601
|
| 37 |
+
|
| 38 |
+
from nemo.agents.voice_agent.pipecat.frames.frames import DiarResultFrame
|
| 39 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.audio_logger import AudioLogger
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class NeMoTurnTakingService(FrameProcessor):
|
| 43 |
+
"""Service for handling turn-taking in voice conversations with backchannel detection."""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
backchannel_phrases: Union[str, List[str]] = None,
|
| 48 |
+
eou_string: str = "<EOU>",
|
| 49 |
+
eob_string: str = "<EOB>",
|
| 50 |
+
language: Language = Language.EN_US,
|
| 51 |
+
use_vad: bool = True,
|
| 52 |
+
use_diar: bool = False,
|
| 53 |
+
max_buffer_size: int = 2,
|
| 54 |
+
bot_stop_delay: float = 0.5,
|
| 55 |
+
audio_logger: Optional[AudioLogger] = None,
|
| 56 |
+
can_create_user_frames: bool = True,
|
| 57 |
+
**kwargs,
|
| 58 |
+
):
|
| 59 |
+
super().__init__(**kwargs)
|
| 60 |
+
self.eou_string = eou_string
|
| 61 |
+
self.eob_string = eob_string
|
| 62 |
+
self.language = language
|
| 63 |
+
self.use_vad = use_vad
|
| 64 |
+
self.use_diar = use_diar
|
| 65 |
+
self.max_buffer_size = max_buffer_size
|
| 66 |
+
|
| 67 |
+
self.backchannel_phrases = self._load_backchannel_phrases(backchannel_phrases)
|
| 68 |
+
self.backchannel_phrases_nopc = set([self.clean_text(phrase) for phrase in self.backchannel_phrases])
|
| 69 |
+
self.bot_stop_delay = bot_stop_delay
|
| 70 |
+
self.can_create_user_frames = can_create_user_frames
|
| 71 |
+
# internal data
|
| 72 |
+
self._current_speaker_id = None
|
| 73 |
+
self._prev_speaker_id = None
|
| 74 |
+
self._bot_stop_time = None
|
| 75 |
+
self._bot_speaking = False
|
| 76 |
+
self._vad_user_speaking = False
|
| 77 |
+
self._have_sent_user_started_speaking = False
|
| 78 |
+
self._user_speaking_buffer = ""
|
| 79 |
+
self._audio_logger = audio_logger
|
| 80 |
+
if not self.use_vad:
|
| 81 |
+
# if vad is not used, we assume the user is always speaking
|
| 82 |
+
self._vad_user_speaking = True
|
| 83 |
+
|
| 84 |
+
def _load_backchannel_phrases(self, backchannel_phrases: Optional[Union[str, List[str]]] = None):
|
| 85 |
+
if not backchannel_phrases:
|
| 86 |
+
return []
|
| 87 |
+
|
| 88 |
+
if isinstance(backchannel_phrases, str) and Path(backchannel_phrases).is_file():
|
| 89 |
+
logger.info(f"Loading backchannel phrases from file: {backchannel_phrases}")
|
| 90 |
+
if not Path(backchannel_phrases).exists():
|
| 91 |
+
raise FileNotFoundError(f"Backchannel phrases file not found: {backchannel_phrases}")
|
| 92 |
+
with open(backchannel_phrases, "r") as f:
|
| 93 |
+
backchannel_phrases = yaml.safe_load(f)
|
| 94 |
+
if not isinstance(backchannel_phrases, list):
|
| 95 |
+
raise ValueError(f"Backchannel phrases must be a list, got {type(backchannel_phrases)}")
|
| 96 |
+
logger.info(f"Loaded {len(backchannel_phrases)} backchannel phrases from file: {backchannel_phrases}")
|
| 97 |
+
elif isinstance(backchannel_phrases, list):
|
| 98 |
+
logger.info(f"Using backchannel phrases from list: {backchannel_phrases}")
|
| 99 |
+
else:
|
| 100 |
+
raise ValueError(f"Invalid backchannel phrases: {backchannel_phrases}")
|
| 101 |
+
return backchannel_phrases
|
| 102 |
+
|
| 103 |
+
def reset(self):
|
| 104 |
+
"""
|
| 105 |
+
Reset the turn-taking service.
|
| 106 |
+
"""
|
| 107 |
+
self._current_speaker_id = None
|
| 108 |
+
self._prev_speaker_id = None
|
| 109 |
+
self._bot_stop_time = None
|
| 110 |
+
self._bot_speaking = False
|
| 111 |
+
self._vad_user_speaking = False
|
| 112 |
+
self._have_sent_user_started_speaking = False
|
| 113 |
+
self._user_speaking_buffer = ""
|
| 114 |
+
if not self.use_vad:
|
| 115 |
+
# if vad is not used, we assume the user is always speaking
|
| 116 |
+
self._vad_user_speaking = True
|
| 117 |
+
logger.debug("TurnTaking service reset complete")
|
| 118 |
+
|
| 119 |
+
def clean_text(self, text: str) -> str:
|
| 120 |
+
"""
|
| 121 |
+
Clean the text so that it can be used for backchannel detection.
|
| 122 |
+
"""
|
| 123 |
+
if self.language != Language.EN_US:
|
| 124 |
+
raise ValueError(f"Language {self.language} not supported, currently only English is supported.")
|
| 125 |
+
for eou_string in [self.eou_string, self.eob_string]:
|
| 126 |
+
if text.endswith(eou_string):
|
| 127 |
+
text = text[: -len(eou_string)].strip()
|
| 128 |
+
text = text.lower()
|
| 129 |
+
valid_chars = "abcdefghijklmnopqrstuvwxyz'"
|
| 130 |
+
text = ''.join([c for c in text if c in valid_chars or c.isspace() or c == "'"])
|
| 131 |
+
return " ".join(text.split()).strip()
|
| 132 |
+
|
| 133 |
+
def is_backchannel(self, text: str) -> bool:
|
| 134 |
+
"""
|
| 135 |
+
Check if the text is a backchannel phrase.
|
| 136 |
+
"""
|
| 137 |
+
if not self.backchannel_phrases:
|
| 138 |
+
return False
|
| 139 |
+
if text.startswith("<speaker_"):
|
| 140 |
+
# if the text starts with a speaker tag, we remove it
|
| 141 |
+
text = text[len("<speaker_0>") :]
|
| 142 |
+
text = self.clean_text(text)
|
| 143 |
+
return text in self.backchannel_phrases_nopc
|
| 144 |
+
|
| 145 |
+
async def process_frame(self, frame: Frame, direction: FrameDirection):
|
| 146 |
+
"""Process incoming frames and handle turn-taking logic."""
|
| 147 |
+
await super().process_frame(frame, direction)
|
| 148 |
+
|
| 149 |
+
if self._bot_stop_time is not None:
|
| 150 |
+
# check if the bot has stopped speaking for more than the delay
|
| 151 |
+
if time.time() - self._bot_stop_time > self.bot_stop_delay:
|
| 152 |
+
# set the _bot_speaking flag to False to actually consider the bot as stopped speaking
|
| 153 |
+
logger.debug(
|
| 154 |
+
f"Bot stopped speaking for more than {self.bot_stop_delay} seconds, setting _bot_speaking to False"
|
| 155 |
+
)
|
| 156 |
+
self._bot_stop_time = None
|
| 157 |
+
self._bot_speaking = False
|
| 158 |
+
|
| 159 |
+
if isinstance(frame, (TranscriptionFrame, InterimTranscriptionFrame)):
|
| 160 |
+
await self._handle_transcription(frame, direction)
|
| 161 |
+
elif isinstance(frame, VADUserStartedSpeakingFrame):
|
| 162 |
+
await self._handle_vad_user_started_speaking(frame, direction)
|
| 163 |
+
elif isinstance(frame, VADUserStoppedSpeakingFrame):
|
| 164 |
+
await self._handle_vad_user_stopped_speaking(frame, direction)
|
| 165 |
+
elif isinstance(frame, BotStartedSpeakingFrame):
|
| 166 |
+
logger.debug("BotStartedSpeakingFrame received")
|
| 167 |
+
self._bot_speaking = True
|
| 168 |
+
# Capture the actual start time when audio starts playing
|
| 169 |
+
# This is more accurate than capturing during TTS generation
|
| 170 |
+
if self._audio_logger:
|
| 171 |
+
self._audio_logger.set_agent_turn_start_time()
|
| 172 |
+
elif isinstance(frame, BotStoppedSpeakingFrame):
|
| 173 |
+
logger.debug("BotStoppedSpeakingFrame received")
|
| 174 |
+
self._bot_stop_time = time.time()
|
| 175 |
+
if self.bot_stop_delay is None or self.bot_stop_delay <= 0:
|
| 176 |
+
# only set the flag if the delay is not set or is 0
|
| 177 |
+
self._bot_speaking = False
|
| 178 |
+
logger.debug("Setting _bot_speaking to False")
|
| 179 |
+
elif isinstance(frame, DiarResultFrame):
|
| 180 |
+
logger.debug("DiarResultFrame received")
|
| 181 |
+
await self._handle_diar_result(frame, direction)
|
| 182 |
+
else:
|
| 183 |
+
await self.push_frame(frame, direction)
|
| 184 |
+
|
| 185 |
+
async def _handle_backchannel_text(self, text: str):
|
| 186 |
+
# ignore the backchannel string while bot is speaking
|
| 187 |
+
# push the backchannel string upstream, not downstream
|
| 188 |
+
await self.push_frame(
|
| 189 |
+
TranscriptionFrame(
|
| 190 |
+
text=f"({text})",
|
| 191 |
+
user_id="",
|
| 192 |
+
timestamp=time_now_iso8601(),
|
| 193 |
+
language=self.language if self.language else Language.EN_US,
|
| 194 |
+
result={"text": f"Backchannel detected: {text}"},
|
| 195 |
+
),
|
| 196 |
+
direction=FrameDirection.UPSTREAM,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
async def _handle_transcription(
|
| 200 |
+
self, frame: TranscriptionFrame | InterimTranscriptionFrame, direction: FrameDirection
|
| 201 |
+
):
|
| 202 |
+
text_segment = frame.text
|
| 203 |
+
if self._vad_user_speaking:
|
| 204 |
+
self._user_speaking_buffer += text_segment
|
| 205 |
+
has_eou = self._user_speaking_buffer.endswith(self.eou_string)
|
| 206 |
+
has_eob = self._user_speaking_buffer.endswith(self.eob_string)
|
| 207 |
+
if has_eou:
|
| 208 |
+
# EOU detected, user is done speaking - push completed text and interrupt bot
|
| 209 |
+
logger.debug(f"<EOU> Detected: `{self._user_speaking_buffer}`")
|
| 210 |
+
completed_text = self._user_speaking_buffer[: -len(self.eou_string)].strip()
|
| 211 |
+
if self._bot_speaking and self.is_backchannel(completed_text):
|
| 212 |
+
logger.debug(f"<EOU> detected for a backchannel phrase while bot is speaking: `{completed_text}`")
|
| 213 |
+
await self._handle_backchannel_text(completed_text)
|
| 214 |
+
if self._audio_logger:
|
| 215 |
+
if self._audio_logger.staged_metadata is None:
|
| 216 |
+
self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
|
| 217 |
+
else:
|
| 218 |
+
self._audio_logger.staged_metadata["is_backchannel"] = True
|
| 219 |
+
|
| 220 |
+
else:
|
| 221 |
+
await self._handle_completed_text(completed_text, direction)
|
| 222 |
+
await self._handle_user_interruption(UserStoppedSpeakingFrame())
|
| 223 |
+
self._user_speaking_buffer = ""
|
| 224 |
+
self._have_sent_user_started_speaking = False # user is done speaking, so we reset the flag
|
| 225 |
+
elif has_eob and self._bot_speaking:
|
| 226 |
+
logger.debug(f"<EOB> detected while bot is speaking: `{self._user_speaking_buffer}`")
|
| 227 |
+
await self._handle_backchannel_text(str(self._user_speaking_buffer))
|
| 228 |
+
if self._audio_logger:
|
| 229 |
+
if self._audio_logger.staged_metadata is None:
|
| 230 |
+
self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
|
| 231 |
+
else:
|
| 232 |
+
self._audio_logger.staged_metadata["is_backchannel"] = True
|
| 233 |
+
self._user_speaking_buffer = ""
|
| 234 |
+
self._have_sent_user_started_speaking = False # user is done speaking, so we reset the flag
|
| 235 |
+
else:
|
| 236 |
+
# if bot is not speaking, the backchannel string is not considered a backchannel phrase
|
| 237 |
+
# user is still speaking, so we append the text segment to the buffer
|
| 238 |
+
logger.debug(f"User is speaking: `{self._user_speaking_buffer}`")
|
| 239 |
+
if has_eob:
|
| 240 |
+
logger.debug(
|
| 241 |
+
f"{self.eob_string} detected but ignored (bot NOT speaking): "
|
| 242 |
+
f"`{self._user_speaking_buffer}`"
|
| 243 |
+
)
|
| 244 |
+
self._user_speaking_buffer = self._user_speaking_buffer[: -len(self.eob_string)].strip()
|
| 245 |
+
# assume the last word is not completed
|
| 246 |
+
completed_words = self._user_speaking_buffer.strip().split()[:-1]
|
| 247 |
+
if len(completed_words) >= self.max_buffer_size:
|
| 248 |
+
completed_text = " ".join(completed_words)
|
| 249 |
+
await self._handle_completed_text(completed_text, direction, is_final=False)
|
| 250 |
+
|
| 251 |
+
else:
|
| 252 |
+
# if vad is not detecting user speaking
|
| 253 |
+
logger.debug(
|
| 254 |
+
f"VAD is not detecting user speaking, but still received text segment from STT: `{text_segment}`"
|
| 255 |
+
)
|
| 256 |
+
is_backchannel = self.is_backchannel(text_segment)
|
| 257 |
+
if text_segment.endswith(self.eob_string):
|
| 258 |
+
is_backchannel = True
|
| 259 |
+
logger.debug(f"Dropping EOB token: `{text_segment}`")
|
| 260 |
+
text_segment = text_segment[: -len(self.eob_string)].strip()
|
| 261 |
+
elif text_segment.endswith(self.eou_string):
|
| 262 |
+
logger.debug(f"Dropping EOU token: `{text_segment}`")
|
| 263 |
+
text_segment = text_segment[: -len(self.eou_string)].strip()
|
| 264 |
+
|
| 265 |
+
if not text_segment.strip():
|
| 266 |
+
return
|
| 267 |
+
if is_backchannel and self._bot_speaking:
|
| 268 |
+
logger.debug(f"Backchannel detected while bot is speaking: `{text_segment}`")
|
| 269 |
+
# push the backchannel string upstream, not downstream
|
| 270 |
+
curr_text = str(self._user_speaking_buffer + text_segment)
|
| 271 |
+
self._user_speaking_buffer = ""
|
| 272 |
+
if self._audio_logger:
|
| 273 |
+
if self._audio_logger.staged_metadata is None:
|
| 274 |
+
self._audio_logger.staged_metadata = {"is_backchannel": True, "start_time": datetime.now()}
|
| 275 |
+
else:
|
| 276 |
+
self._audio_logger.staged_metadata["is_backchannel"] = True
|
| 277 |
+
await self.push_frame(
|
| 278 |
+
TranscriptionFrame(
|
| 279 |
+
text=f"({curr_text})",
|
| 280 |
+
user_id="",
|
| 281 |
+
timestamp=time_now_iso8601(),
|
| 282 |
+
language=self.language if self.language else Language.EN_US,
|
| 283 |
+
result={"text": f"Backchannel detected: {self._user_speaking_buffer+text_segment}"},
|
| 284 |
+
),
|
| 285 |
+
direction=FrameDirection.UPSTREAM,
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
else:
|
| 289 |
+
# if the text segment is not empty and have non-space characters, we append it to the buffer
|
| 290 |
+
self._user_speaking_buffer += text_segment
|
| 291 |
+
if self.is_backchannel(self._user_speaking_buffer):
|
| 292 |
+
logger.debug(f"Backchannel detected: `{self._user_speaking_buffer}`")
|
| 293 |
+
self._user_speaking_buffer = ""
|
| 294 |
+
self._have_sent_user_started_speaking = False
|
| 295 |
+
return
|
| 296 |
+
logger.debug(f"Appending text segment to user speaking buffer: `{self._user_speaking_buffer}`")
|
| 297 |
+
|
| 298 |
+
async def _handle_completed_text(self, completed_text: str, direction: FrameDirection, is_final: bool = True):
|
| 299 |
+
if not self._have_sent_user_started_speaking:
|
| 300 |
+
# if we haven't sent the user started speaking frame, we send it now
|
| 301 |
+
# so that the bot can be interrupted and be ready to respond to the new user turn
|
| 302 |
+
await self._handle_user_interruption(UserStartedSpeakingFrame())
|
| 303 |
+
self._have_sent_user_started_speaking = True
|
| 304 |
+
|
| 305 |
+
completed_text = completed_text.strip()
|
| 306 |
+
completed_text = completed_text.replace(self.eou_string, "").replace(self.eob_string, "")
|
| 307 |
+
|
| 308 |
+
if self.use_diar and not completed_text.startswith("<speaker_") and self._prev_speaker_id is not None:
|
| 309 |
+
# Add the previous speaker tag to the beginning of the text
|
| 310 |
+
completed_text = f"<speaker_{self._prev_speaker_id}> {completed_text}"
|
| 311 |
+
|
| 312 |
+
frame_type = TranscriptionFrame if is_final else InterimTranscriptionFrame
|
| 313 |
+
text_frame = frame_type(
|
| 314 |
+
text=completed_text,
|
| 315 |
+
user_id="", # No speaker ID in this implementation
|
| 316 |
+
timestamp=time_now_iso8601(),
|
| 317 |
+
language=self.language if self.language else Language.EN_US,
|
| 318 |
+
result={"text": completed_text},
|
| 319 |
+
)
|
| 320 |
+
logger.debug(f"Pushing text frame: {text_frame}")
|
| 321 |
+
await self.push_frame(text_frame, direction)
|
| 322 |
+
|
| 323 |
+
def _contains_only_speaker_tags(self, text: str) -> bool:
|
| 324 |
+
"""
|
| 325 |
+
Check if the text contains only speaker tags.
|
| 326 |
+
"""
|
| 327 |
+
return text.strip().startswith("<speaker_") and text.strip().endswith(">")
|
| 328 |
+
|
| 329 |
+
async def _handle_vad_user_started_speaking(self, frame: VADUserStartedSpeakingFrame, direction: FrameDirection):
|
| 330 |
+
"""
|
| 331 |
+
Handle the user started speaking frame.
|
| 332 |
+
|
| 333 |
+
If there are no backchannel phrases and we haven't sent the user started speaking frame, we send it now
|
| 334 |
+
so that the bot can be interrupted and be ready to respond to the new user turn
|
| 335 |
+
"""
|
| 336 |
+
self._vad_user_speaking = True
|
| 337 |
+
logger.debug("NeMoTurnTakingService: VADUserStartedSpeakingFrame")
|
| 338 |
+
await self.push_frame(frame, direction)
|
| 339 |
+
if not self.backchannel_phrases and not self._have_sent_user_started_speaking:
|
| 340 |
+
await self._handle_user_interruption(UserStartedSpeakingFrame())
|
| 341 |
+
self._have_sent_user_started_speaking = True
|
| 342 |
+
|
| 343 |
+
async def _handle_vad_user_stopped_speaking(self, frame: VADUserStoppedSpeakingFrame, direction: FrameDirection):
|
| 344 |
+
"""
|
| 345 |
+
Handle the user stopped speaking frame.
|
| 346 |
+
|
| 347 |
+
If the buffer is not empty:
|
| 348 |
+
- If bot is not speaking: push completed text regardless of backchannel
|
| 349 |
+
- If bot is speaking: ignore backchannel strings
|
| 350 |
+
If the buffer is empty, do nothing.
|
| 351 |
+
"""
|
| 352 |
+
if self.use_vad:
|
| 353 |
+
self._vad_user_speaking = False
|
| 354 |
+
logger.debug("NeMoTurnTakingService: VADUserStoppedSpeakingFrame")
|
| 355 |
+
await self.push_frame(frame, direction)
|
| 356 |
+
|
| 357 |
+
# if user buffer only contains speaker tags, we don't push the completed text frame
|
| 358 |
+
if self._contains_only_speaker_tags(self._user_speaking_buffer):
|
| 359 |
+
logger.debug(f"User buffer only contains speaker tags: `{self._user_speaking_buffer}`, ignoring")
|
| 360 |
+
return
|
| 361 |
+
|
| 362 |
+
is_backchannel = self.is_backchannel(self._user_speaking_buffer)
|
| 363 |
+
if not self._user_speaking_buffer:
|
| 364 |
+
return
|
| 365 |
+
if not self._bot_speaking or not is_backchannel:
|
| 366 |
+
logger.debug(f"Bot talking: {self._bot_speaking}, backchannel: {is_backchannel}")
|
| 367 |
+
logger.debug(f"Pushing completed text frame for VAD user stopped speaking: {self._user_speaking_buffer}")
|
| 368 |
+
await self._handle_completed_text(self._user_speaking_buffer, direction)
|
| 369 |
+
self._user_speaking_buffer = ""
|
| 370 |
+
if self._have_sent_user_started_speaking:
|
| 371 |
+
await self._handle_user_interruption(UserStoppedSpeakingFrame())
|
| 372 |
+
self._have_sent_user_started_speaking = False
|
| 373 |
+
elif is_backchannel:
|
| 374 |
+
logger.debug(f"Backchannel detected: `{self._user_speaking_buffer}`")
|
| 375 |
+
if self._audio_logger:
|
| 376 |
+
self._audio_logger.save_user_audio(is_backchannel=True)
|
| 377 |
+
logger.debug(
|
| 378 |
+
f"[TurnTakingService] Saved backchannel audio (VAD stopped): {self._user_speaking_buffer}"
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
await self.push_frame(
|
| 382 |
+
TranscriptionFrame(
|
| 383 |
+
text=f"({self._user_speaking_buffer})",
|
| 384 |
+
user_id="",
|
| 385 |
+
timestamp=time_now_iso8601(),
|
| 386 |
+
language=self.language if self.language else Language.EN_US,
|
| 387 |
+
result={"text": f"Backchannel detected: {self._user_speaking_buffer}"},
|
| 388 |
+
),
|
| 389 |
+
direction=FrameDirection.UPSTREAM,
|
| 390 |
+
)
|
| 391 |
+
self._user_speaking_buffer = ""
|
| 392 |
+
self._have_sent_user_started_speaking = False
|
| 393 |
+
|
| 394 |
+
async def _handle_user_interruption(self, frame: Frame):
|
| 395 |
+
# Adapted from BaseInputTransport._handle_user_interruption
|
| 396 |
+
if isinstance(frame, UserStartedSpeakingFrame):
|
| 397 |
+
logger.debug("User started speaking")
|
| 398 |
+
if self.can_create_user_frames:
|
| 399 |
+
logger.debug("Pushing UserStartedSpeakingFrame and StartInterruptionFrame")
|
| 400 |
+
await self.push_frame(frame)
|
| 401 |
+
await self.push_frame(StartInterruptionFrame(), direction=FrameDirection.DOWNSTREAM)
|
| 402 |
+
else:
|
| 403 |
+
logger.debug(
|
| 404 |
+
"Skipping UserStartedSpeakingFrame and StartInterruptionFrame because can_create_user_frames is False"
|
| 405 |
+
)
|
| 406 |
+
# Record cutoff time for agent audio when TTS is interrupted
|
| 407 |
+
if self._audio_logger and self._bot_speaking:
|
| 408 |
+
self._audio_logger.set_agent_cutoff_time()
|
| 409 |
+
# Increment turn index when user starts speaking (only if speaker changed)
|
| 410 |
+
if self._audio_logger:
|
| 411 |
+
self._audio_logger.increment_turn_index(speaker="user")
|
| 412 |
+
elif isinstance(frame, UserStoppedSpeakingFrame):
|
| 413 |
+
logger.debug("User stopped speaking")
|
| 414 |
+
if self.can_create_user_frames:
|
| 415 |
+
logger.debug("Pushing UserStoppedSpeakingFrame")
|
| 416 |
+
await self.push_frame(frame)
|
| 417 |
+
else:
|
| 418 |
+
logger.debug("Skipping UserStoppedSpeakingFrame because can_create_user_frames is False")
|
| 419 |
+
else:
|
| 420 |
+
logger.debug(f"Unknown frame type for _handle_user_interruption: {type(frame)}")
|
| 421 |
+
|
| 422 |
+
async def _handle_diar_result(self, frame: DiarResultFrame, direction: FrameDirection):
|
| 423 |
+
if not self.use_diar:
|
| 424 |
+
logger.debug("Diarization is disabled, skipping")
|
| 425 |
+
return
|
| 426 |
+
|
| 427 |
+
new_speaker_id = frame.diar_result # speaker id of the dominant speaker
|
| 428 |
+
|
| 429 |
+
# logger.debug(f"Dominant speaker ID: {dominant_speaker_id}")
|
| 430 |
+
self._prev_speaker_id = self._current_speaker_id
|
| 431 |
+
last_speaker_id = self._current_speaker_id
|
| 432 |
+
|
| 433 |
+
if not self._user_speaking_buffer.startswith("<speaker_"):
|
| 434 |
+
# add speaker tag <speaker_{speaker_id}> to the beginning of the current utterance
|
| 435 |
+
self._user_speaking_buffer = f"<speaker_{new_speaker_id}> {self._user_speaking_buffer}"
|
| 436 |
+
elif last_speaker_id != new_speaker_id:
|
| 437 |
+
# change the speaker tag to the dominant speaker id
|
| 438 |
+
self._user_speaking_buffer = self._user_speaking_buffer[len("<speaker_0>") :]
|
| 439 |
+
self._user_speaking_buffer = f"<speaker_{new_speaker_id}> {self._user_speaking_buffer}"
|
| 440 |
+
logger.debug(f"Speaker changed from {last_speaker_id} to {new_speaker_id}")
|
| 441 |
+
self._current_speaker_id = new_speaker_id
|
nemo/agents/voice_agent/pipecat/services/nemo/utils.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
# NOTE: This file will be deprecated in the future, as the new inference pipeline will replace it.
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import torch
|
| 20 |
+
from omegaconf import DictConfig
|
| 21 |
+
|
| 22 |
+
import nemo.collections.asr as nemo_asr
|
| 23 |
+
|
| 24 |
+
LOG_MEL_ZERO = -16.635
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class AudioBufferer:
|
| 28 |
+
def __init__(self, sample_rate: int, buffer_size_in_secs: float):
|
| 29 |
+
self.buffer_size = int(buffer_size_in_secs * sample_rate)
|
| 30 |
+
self.sample_buffer = torch.zeros(self.buffer_size, dtype=torch.float32)
|
| 31 |
+
|
| 32 |
+
def reset(self) -> None:
|
| 33 |
+
"""
|
| 34 |
+
Reset the buffer to zero
|
| 35 |
+
"""
|
| 36 |
+
self.sample_buffer.zero_()
|
| 37 |
+
|
| 38 |
+
def update(self, audio: np.ndarray) -> None:
|
| 39 |
+
"""
|
| 40 |
+
Update the buffer with the new frame
|
| 41 |
+
Args:
|
| 42 |
+
frame (Frame): frame to update the buffer with
|
| 43 |
+
"""
|
| 44 |
+
if not isinstance(audio, torch.Tensor):
|
| 45 |
+
audio = torch.from_numpy(audio)
|
| 46 |
+
|
| 47 |
+
audio_size = audio.shape[0]
|
| 48 |
+
if audio_size > self.buffer_size:
|
| 49 |
+
raise ValueError(f"Frame size ({audio_size}) exceeds buffer size ({self.buffer_size})")
|
| 50 |
+
|
| 51 |
+
shift = audio_size
|
| 52 |
+
self.sample_buffer[:-shift] = self.sample_buffer[shift:].clone()
|
| 53 |
+
self.sample_buffer[-shift:] = audio.clone()
|
| 54 |
+
|
| 55 |
+
def get_buffer(self) -> torch.Tensor:
|
| 56 |
+
"""
|
| 57 |
+
Get the current buffer
|
| 58 |
+
Returns:
|
| 59 |
+
torch.Tensor: current state of the buffer
|
| 60 |
+
"""
|
| 61 |
+
return self.sample_buffer.clone()
|
| 62 |
+
|
| 63 |
+
def is_buffer_empty(self) -> bool:
|
| 64 |
+
"""
|
| 65 |
+
Check if the buffer is empty
|
| 66 |
+
Returns:
|
| 67 |
+
bool: True if the buffer is empty, False otherwise
|
| 68 |
+
"""
|
| 69 |
+
return self.sample_buffer.sum() == 0
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class CacheFeatureBufferer:
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
sample_rate: int,
|
| 76 |
+
buffer_size_in_secs: float,
|
| 77 |
+
chunk_size_in_secs: float,
|
| 78 |
+
preprocessor_cfg: DictConfig,
|
| 79 |
+
device: torch.device,
|
| 80 |
+
fill_value: float = LOG_MEL_ZERO,
|
| 81 |
+
):
|
| 82 |
+
|
| 83 |
+
if buffer_size_in_secs < chunk_size_in_secs:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
f"Buffer size ({buffer_size_in_secs}s) should be no less than chunk size ({chunk_size_in_secs}s)"
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
self.sample_rate = sample_rate
|
| 89 |
+
self.buffer_size_in_secs = buffer_size_in_secs
|
| 90 |
+
self.chunk_size_in_secs = chunk_size_in_secs
|
| 91 |
+
self.device = device
|
| 92 |
+
|
| 93 |
+
if hasattr(preprocessor_cfg, 'log') and preprocessor_cfg.log:
|
| 94 |
+
self.ZERO_LEVEL_SPEC_DB_VAL = LOG_MEL_ZERO # Log-Mel spectrogram value for zero signals
|
| 95 |
+
else:
|
| 96 |
+
self.ZERO_LEVEL_SPEC_DB_VAL = fill_value
|
| 97 |
+
|
| 98 |
+
self.n_feat = preprocessor_cfg.features
|
| 99 |
+
self.timestep_duration = preprocessor_cfg.window_stride
|
| 100 |
+
self.n_chunk_look_back = int(self.timestep_duration * self.sample_rate)
|
| 101 |
+
self.chunk_size = int(self.chunk_size_in_secs * self.sample_rate)
|
| 102 |
+
self.sample_buffer = AudioBufferer(sample_rate, buffer_size_in_secs)
|
| 103 |
+
|
| 104 |
+
self.feature_buffer_len = int(buffer_size_in_secs / self.timestep_duration)
|
| 105 |
+
self.feature_chunk_len = int(chunk_size_in_secs / self.timestep_duration)
|
| 106 |
+
self.feature_buffer = torch.full(
|
| 107 |
+
[self.n_feat, self.feature_buffer_len],
|
| 108 |
+
self.ZERO_LEVEL_SPEC_DB_VAL,
|
| 109 |
+
dtype=torch.float32,
|
| 110 |
+
device=self.device,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
self.preprocessor = nemo_asr.models.ASRModel.from_config_dict(preprocessor_cfg)
|
| 114 |
+
self.preprocessor.to(self.device)
|
| 115 |
+
|
| 116 |
+
def is_buffer_empty(self) -> bool:
|
| 117 |
+
"""
|
| 118 |
+
Check if the buffer is empty
|
| 119 |
+
Returns:
|
| 120 |
+
bool: True if the buffer is empty, False otherwise
|
| 121 |
+
"""
|
| 122 |
+
return self.sample_buffer.is_buffer_empty()
|
| 123 |
+
|
| 124 |
+
def reset(self) -> None:
|
| 125 |
+
"""
|
| 126 |
+
Reset the buffer to zero
|
| 127 |
+
"""
|
| 128 |
+
self.sample_buffer.reset()
|
| 129 |
+
self.feature_buffer.fill_(self.ZERO_LEVEL_SPEC_DB_VAL)
|
| 130 |
+
|
| 131 |
+
def _update_feature_buffer(self, feat_chunk: torch.Tensor) -> None:
|
| 132 |
+
"""
|
| 133 |
+
Add an extracted feature to `feature_buffer`
|
| 134 |
+
"""
|
| 135 |
+
self.feature_buffer[:, : -self.feature_chunk_len] = self.feature_buffer[:, self.feature_chunk_len :].clone()
|
| 136 |
+
self.feature_buffer[:, -self.feature_chunk_len :] = feat_chunk.clone()
|
| 137 |
+
|
| 138 |
+
def preprocess(self, audio_signal: torch.Tensor) -> torch.Tensor:
|
| 139 |
+
"""
|
| 140 |
+
Preprocess the audio signal using the preprocessor
|
| 141 |
+
Args:
|
| 142 |
+
audio_signal (torch.Tensor): audio signal
|
| 143 |
+
Returns:
|
| 144 |
+
torch.Tensor: preprocessed features
|
| 145 |
+
"""
|
| 146 |
+
audio_signal = audio_signal.unsqueeze_(0).to(self.device)
|
| 147 |
+
audio_signal_len = torch.tensor([audio_signal.shape[1]], device=self.device)
|
| 148 |
+
features, _ = self.preprocessor(
|
| 149 |
+
input_signal=audio_signal,
|
| 150 |
+
length=audio_signal_len,
|
| 151 |
+
)
|
| 152 |
+
features = features.squeeze()
|
| 153 |
+
return features
|
| 154 |
+
|
| 155 |
+
def update(self, audio: np.ndarray) -> None:
|
| 156 |
+
"""
|
| 157 |
+
Update the sample anf feature buffers with the new frame
|
| 158 |
+
Args:
|
| 159 |
+
frame (Frame): frame to update the buffer with
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
# Update the sample buffer with the new frame
|
| 163 |
+
self.sample_buffer.update(audio)
|
| 164 |
+
|
| 165 |
+
if math.isclose(self.buffer_size_in_secs, self.chunk_size_in_secs):
|
| 166 |
+
# If the buffer size is equal to the chunk size, just take the whole buffer
|
| 167 |
+
samples = self.sample_buffer.sample_buffer.clone()
|
| 168 |
+
else:
|
| 169 |
+
# Add look_back to have context for the first feature
|
| 170 |
+
samples = self.sample_buffer.sample_buffer[-(self.n_chunk_look_back + self.chunk_size) :]
|
| 171 |
+
|
| 172 |
+
# Get the mel spectrogram
|
| 173 |
+
features = self.preprocess(samples)
|
| 174 |
+
|
| 175 |
+
# If the features are longer than supposed to be, drop the last frames
|
| 176 |
+
# Drop the last diff frames because they might be incomplete
|
| 177 |
+
if (diff := features.shape[1] - self.feature_chunk_len - 1) > 0:
|
| 178 |
+
features = features[:, :-diff]
|
| 179 |
+
|
| 180 |
+
# Update the feature buffer with the new features
|
| 181 |
+
self._update_feature_buffer(features[:, -self.feature_chunk_len :])
|
| 182 |
+
|
| 183 |
+
def get_buffer(self) -> torch.Tensor:
|
| 184 |
+
"""
|
| 185 |
+
Get the current sample buffer
|
| 186 |
+
Returns:
|
| 187 |
+
torch.Tensor: current state of the buffer
|
| 188 |
+
"""
|
| 189 |
+
return self.sample_buffer.get_buffer()
|
| 190 |
+
|
| 191 |
+
def get_feature_buffer(self) -> torch.Tensor:
|
| 192 |
+
"""
|
| 193 |
+
Get the current feature buffer
|
| 194 |
+
Returns:
|
| 195 |
+
torch.Tensor: current state of the feature buffer
|
| 196 |
+
"""
|
| 197 |
+
return self.feature_buffer.clone()
|
nemo/agents/voice_agent/pipecat/transports/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/pipecat/transports/base_input.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
from loguru import logger
|
| 17 |
+
from pipecat.audio.vad.vad_analyzer import VADState
|
| 18 |
+
from pipecat.frames.frames import (
|
| 19 |
+
InputAudioRawFrame,
|
| 20 |
+
UserStartedSpeakingFrame,
|
| 21 |
+
UserStoppedSpeakingFrame,
|
| 22 |
+
VADUserStartedSpeakingFrame,
|
| 23 |
+
VADUserStoppedSpeakingFrame,
|
| 24 |
+
)
|
| 25 |
+
from pipecat.transports.base_input import BaseInputTransport as _BaseInputTransport
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class BaseInputTransport(_BaseInputTransport):
|
| 29 |
+
async def _handle_vad(self, audio_frame: InputAudioRawFrame, vad_state: VADState):
|
| 30 |
+
"""Handle Voice Activity Detection results and generate appropriate frames."""
|
| 31 |
+
new_vad_state = await self._vad_analyze(audio_frame)
|
| 32 |
+
if new_vad_state != vad_state and new_vad_state != VADState.STARTING and new_vad_state != VADState.STOPPING:
|
| 33 |
+
frame = None
|
| 34 |
+
# If the turn analyser is enabled, this will prevent:
|
| 35 |
+
# - Creating the UserStoppedSpeakingFrame
|
| 36 |
+
# - Creating the UserStartedSpeakingFrame multiple times
|
| 37 |
+
can_create_user_frames = (
|
| 38 |
+
self._params.turn_analyzer is None or not self._params.turn_analyzer.speech_triggered
|
| 39 |
+
) and self._params.can_create_user_frames
|
| 40 |
+
|
| 41 |
+
if new_vad_state == VADState.SPEAKING:
|
| 42 |
+
await self.push_frame(VADUserStartedSpeakingFrame())
|
| 43 |
+
if can_create_user_frames:
|
| 44 |
+
frame = UserStartedSpeakingFrame()
|
| 45 |
+
else:
|
| 46 |
+
logger.debug("base_input: VAD state changed to SPEAKING but can_create_user_frames is False")
|
| 47 |
+
elif new_vad_state == VADState.QUIET:
|
| 48 |
+
await self.push_frame(VADUserStoppedSpeakingFrame())
|
| 49 |
+
if can_create_user_frames:
|
| 50 |
+
frame = UserStoppedSpeakingFrame()
|
| 51 |
+
else:
|
| 52 |
+
logger.debug("base_input: VAD state changed to QUIET but can_create_user_frames is False")
|
| 53 |
+
|
| 54 |
+
if frame:
|
| 55 |
+
await self._handle_user_interruption(frame)
|
| 56 |
+
|
| 57 |
+
vad_state = new_vad_state
|
| 58 |
+
return vad_state
|
nemo/{collections/nlp/data/language_modeling/megatron/length_distribution_type.py → agents/voice_agent/pipecat/transports/base_transport.py}
RENAMED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -12,10 +12,9 @@
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
-
import enum
|
| 16 |
|
|
|
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
truncated_normal = 3
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
|
|
|
| 15 |
|
| 16 |
+
from pipecat.transports.base_transport import TransportParams as _TransportParams
|
| 17 |
|
| 18 |
+
|
| 19 |
+
class TransportParams(_TransportParams):
|
| 20 |
+
can_create_user_frames: bool = True
|
|
|
nemo/agents/voice_agent/pipecat/transports/network/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/pipecat/transports/network/websocket_server.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import asyncio
|
| 17 |
+
from typing import Optional
|
| 18 |
+
|
| 19 |
+
from loguru import logger
|
| 20 |
+
from pipecat.frames.frames import CancelFrame, EndFrame, InputAudioRawFrame, StartFrame
|
| 21 |
+
from pipecat.serializers.base_serializer import FrameSerializer
|
| 22 |
+
from pipecat.transports.base_transport import BaseTransport
|
| 23 |
+
from pipecat.transports.network.websocket_server import (
|
| 24 |
+
WebsocketServerCallbacks,
|
| 25 |
+
WebsocketServerOutputTransport,
|
| 26 |
+
WebsocketServerParams,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from nemo.agents.voice_agent.pipecat.transports.base_input import BaseInputTransport
|
| 30 |
+
from nemo.agents.voice_agent.pipecat.transports.base_transport import TransportParams
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
import websockets
|
| 34 |
+
except ModuleNotFoundError as e:
|
| 35 |
+
logger.error(f"Exception: {e}")
|
| 36 |
+
logger.error("In order to use websockets, you need to `pip install pipecat-ai[websocket]`.")
|
| 37 |
+
raise Exception(f"Missing module: {e}")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class WebsocketServerParams(TransportParams):
|
| 41 |
+
"""Configuration parameters for WebSocket server transport.
|
| 42 |
+
|
| 43 |
+
Parameters:
|
| 44 |
+
add_wav_header: Whether to add WAV headers to audio frames.
|
| 45 |
+
serializer: Frame serializer for message encoding/decoding.
|
| 46 |
+
session_timeout: Timeout in seconds for client sessions.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
add_wav_header: bool = False
|
| 50 |
+
serializer: Optional[FrameSerializer] = None
|
| 51 |
+
session_timeout: Optional[int] = None
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class WebsocketServerInputTransport(BaseInputTransport):
|
| 55 |
+
"""WebSocket server input transport for receiving client data.
|
| 56 |
+
|
| 57 |
+
Handles incoming WebSocket connections, message processing, and client
|
| 58 |
+
session management including timeout monitoring and connection lifecycle.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
transport: BaseTransport,
|
| 64 |
+
host: str,
|
| 65 |
+
port: int,
|
| 66 |
+
params: WebsocketServerParams,
|
| 67 |
+
callbacks: WebsocketServerCallbacks,
|
| 68 |
+
**kwargs,
|
| 69 |
+
):
|
| 70 |
+
"""Initialize the WebSocket server input transport.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
transport: The parent transport instance.
|
| 74 |
+
host: Host address to bind the WebSocket server to.
|
| 75 |
+
port: Port number to bind the WebSocket server to.
|
| 76 |
+
params: WebSocket server configuration parameters.
|
| 77 |
+
callbacks: Callback functions for WebSocket events.
|
| 78 |
+
**kwargs: Additional arguments passed to parent class.
|
| 79 |
+
"""
|
| 80 |
+
super().__init__(params, **kwargs)
|
| 81 |
+
|
| 82 |
+
self._transport = transport
|
| 83 |
+
self._host = host
|
| 84 |
+
self._port = port
|
| 85 |
+
self._params = params
|
| 86 |
+
self._callbacks = callbacks
|
| 87 |
+
|
| 88 |
+
self._websocket: Optional[websockets.WebSocketServerProtocol] = None
|
| 89 |
+
|
| 90 |
+
self._server_task = None
|
| 91 |
+
|
| 92 |
+
# This task will monitor the websocket connection periodically.
|
| 93 |
+
self._monitor_task = None
|
| 94 |
+
|
| 95 |
+
self._stop_server_event = asyncio.Event()
|
| 96 |
+
|
| 97 |
+
# Whether we have seen a StartFrame already.
|
| 98 |
+
self._initialized = False
|
| 99 |
+
|
| 100 |
+
async def start(self, frame: StartFrame):
|
| 101 |
+
"""Start the WebSocket server and initialize components.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
frame: The start frame containing initialization parameters.
|
| 105 |
+
"""
|
| 106 |
+
await super().start(frame)
|
| 107 |
+
|
| 108 |
+
if self._initialized:
|
| 109 |
+
return
|
| 110 |
+
|
| 111 |
+
self._initialized = True
|
| 112 |
+
|
| 113 |
+
if self._params.serializer:
|
| 114 |
+
await self._params.serializer.setup(frame)
|
| 115 |
+
if not self._server_task:
|
| 116 |
+
self._server_task = self.create_task(self._server_task_handler())
|
| 117 |
+
await self.set_transport_ready(frame)
|
| 118 |
+
|
| 119 |
+
async def stop(self, frame: EndFrame):
|
| 120 |
+
"""Stop the WebSocket server and cleanup resources.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
frame: The end frame signaling transport shutdown.
|
| 124 |
+
"""
|
| 125 |
+
await super().stop(frame)
|
| 126 |
+
self._stop_server_event.set()
|
| 127 |
+
if self._monitor_task:
|
| 128 |
+
await self.cancel_task(self._monitor_task)
|
| 129 |
+
self._monitor_task = None
|
| 130 |
+
if self._server_task:
|
| 131 |
+
await self.wait_for_task(self._server_task)
|
| 132 |
+
self._server_task = None
|
| 133 |
+
|
| 134 |
+
async def cancel(self, frame: CancelFrame):
|
| 135 |
+
"""Cancel the WebSocket server and stop all processing.
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
frame: The cancel frame signaling immediate cancellation.
|
| 139 |
+
"""
|
| 140 |
+
await super().cancel(frame)
|
| 141 |
+
if self._monitor_task:
|
| 142 |
+
await self.cancel_task(self._monitor_task)
|
| 143 |
+
self._monitor_task = None
|
| 144 |
+
if self._server_task:
|
| 145 |
+
await self.cancel_task(self._server_task)
|
| 146 |
+
self._server_task = None
|
| 147 |
+
|
| 148 |
+
async def cleanup(self):
|
| 149 |
+
"""Cleanup resources and parent transport."""
|
| 150 |
+
await super().cleanup()
|
| 151 |
+
await self._transport.cleanup()
|
| 152 |
+
|
| 153 |
+
async def _server_task_handler(self):
|
| 154 |
+
"""Handle WebSocket server startup and client connections."""
|
| 155 |
+
logger.info(f"Starting websocket server on {self._host}:{self._port}")
|
| 156 |
+
async with websockets.serve(self._client_handler, self._host, self._port) as server:
|
| 157 |
+
await self._callbacks.on_websocket_ready()
|
| 158 |
+
await self._stop_server_event.wait()
|
| 159 |
+
|
| 160 |
+
async def _client_handler(self, websocket: websockets.WebSocketServerProtocol, path: Optional[str] = None):
|
| 161 |
+
"""Handle individual client connections and message processing."""
|
| 162 |
+
logger.info(f"New client connection from {websocket.remote_address}")
|
| 163 |
+
if self._websocket:
|
| 164 |
+
await self._websocket.close()
|
| 165 |
+
logger.warning("Only one client connected, using new connection")
|
| 166 |
+
|
| 167 |
+
self._websocket = websocket
|
| 168 |
+
|
| 169 |
+
# Notify
|
| 170 |
+
await self._callbacks.on_client_connected(websocket)
|
| 171 |
+
|
| 172 |
+
# Create a task to monitor the websocket connection
|
| 173 |
+
if not self._monitor_task and self._params.session_timeout:
|
| 174 |
+
self._monitor_task = self.create_task(self._monitor_websocket(websocket, self._params.session_timeout))
|
| 175 |
+
|
| 176 |
+
# Handle incoming messages
|
| 177 |
+
try:
|
| 178 |
+
async for message in websocket:
|
| 179 |
+
if not self._params.serializer:
|
| 180 |
+
continue
|
| 181 |
+
|
| 182 |
+
frame = await self._params.serializer.deserialize(message)
|
| 183 |
+
|
| 184 |
+
if not frame:
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
if isinstance(frame, InputAudioRawFrame):
|
| 188 |
+
await self.push_audio_frame(frame)
|
| 189 |
+
else:
|
| 190 |
+
await self.push_frame(frame)
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logger.error(f"{self} exception receiving data: {e.__class__.__name__} ({e})")
|
| 193 |
+
|
| 194 |
+
# Notify disconnection
|
| 195 |
+
await self._callbacks.on_client_disconnected(websocket)
|
| 196 |
+
|
| 197 |
+
await self._websocket.close()
|
| 198 |
+
self._websocket = None
|
| 199 |
+
|
| 200 |
+
logger.info(f"Client {websocket.remote_address} disconnected")
|
| 201 |
+
|
| 202 |
+
async def _monitor_websocket(self, websocket: websockets.WebSocketServerProtocol, session_timeout: int):
|
| 203 |
+
"""Monitor WebSocket connection for session timeout."""
|
| 204 |
+
try:
|
| 205 |
+
await asyncio.sleep(session_timeout)
|
| 206 |
+
if not websocket.closed:
|
| 207 |
+
await self._callbacks.on_session_timeout(websocket)
|
| 208 |
+
except asyncio.CancelledError:
|
| 209 |
+
logger.info(f"Monitoring task cancelled for: {websocket.remote_address}")
|
| 210 |
+
raise
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class WebsocketServerTransport(BaseTransport):
|
| 214 |
+
"""WebSocket server transport for bidirectional real-time communication.
|
| 215 |
+
|
| 216 |
+
Provides a complete WebSocket server implementation with separate input and
|
| 217 |
+
output transports, client connection management, and event handling for
|
| 218 |
+
real-time audio and data streaming applications.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
def __init__(
|
| 222 |
+
self,
|
| 223 |
+
params: WebsocketServerParams,
|
| 224 |
+
host: str = "localhost",
|
| 225 |
+
port: int = 8765,
|
| 226 |
+
input_name: Optional[str] = None,
|
| 227 |
+
output_name: Optional[str] = None,
|
| 228 |
+
):
|
| 229 |
+
"""Initialize the WebSocket server transport.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
params: WebSocket server configuration parameters.
|
| 233 |
+
host: Host address to bind the server to. Defaults to "localhost".
|
| 234 |
+
port: Port number to bind the server to. Defaults to 8765.
|
| 235 |
+
input_name: Optional name for the input processor.
|
| 236 |
+
output_name: Optional name for the output processor.
|
| 237 |
+
"""
|
| 238 |
+
super().__init__(input_name=input_name, output_name=output_name)
|
| 239 |
+
self._host = host
|
| 240 |
+
self._port = port
|
| 241 |
+
self._params = params
|
| 242 |
+
|
| 243 |
+
self._callbacks = WebsocketServerCallbacks(
|
| 244 |
+
on_client_connected=self._on_client_connected,
|
| 245 |
+
on_client_disconnected=self._on_client_disconnected,
|
| 246 |
+
on_session_timeout=self._on_session_timeout,
|
| 247 |
+
on_websocket_ready=self._on_websocket_ready,
|
| 248 |
+
)
|
| 249 |
+
self._input: Optional[WebsocketServerInputTransport] = None
|
| 250 |
+
self._output: Optional[WebsocketServerOutputTransport] = None
|
| 251 |
+
self._websocket: Optional[websockets.WebSocketServerProtocol] = None
|
| 252 |
+
|
| 253 |
+
# Register supported handlers. The user will only be able to register
|
| 254 |
+
# these handlers.
|
| 255 |
+
self._register_event_handler("on_client_connected")
|
| 256 |
+
self._register_event_handler("on_client_disconnected")
|
| 257 |
+
self._register_event_handler("on_session_timeout")
|
| 258 |
+
self._register_event_handler("on_websocket_ready")
|
| 259 |
+
|
| 260 |
+
def input(self) -> WebsocketServerInputTransport:
|
| 261 |
+
"""Get the input transport for receiving client data.
|
| 262 |
+
|
| 263 |
+
Returns:
|
| 264 |
+
The WebSocket server input transport instance.
|
| 265 |
+
"""
|
| 266 |
+
if not self._input:
|
| 267 |
+
self._input = WebsocketServerInputTransport(
|
| 268 |
+
self, self._host, self._port, self._params, self._callbacks, name=self._input_name
|
| 269 |
+
)
|
| 270 |
+
return self._input
|
| 271 |
+
|
| 272 |
+
def output(self) -> WebsocketServerOutputTransport:
|
| 273 |
+
"""Get the output transport for sending data to clients.
|
| 274 |
+
|
| 275 |
+
Returns:
|
| 276 |
+
The WebSocket server output transport instance.
|
| 277 |
+
"""
|
| 278 |
+
if not self._output:
|
| 279 |
+
self._output = WebsocketServerOutputTransport(self, self._params, name=self._output_name)
|
| 280 |
+
return self._output
|
| 281 |
+
|
| 282 |
+
async def _on_client_connected(self, websocket):
|
| 283 |
+
"""Handle client connection events."""
|
| 284 |
+
if self._output:
|
| 285 |
+
await self._output.set_client_connection(websocket)
|
| 286 |
+
await self._call_event_handler("on_client_connected", websocket)
|
| 287 |
+
else:
|
| 288 |
+
logger.error("A WebsocketServerTransport output is missing in the pipeline")
|
| 289 |
+
|
| 290 |
+
async def _on_client_disconnected(self, websocket):
|
| 291 |
+
"""Handle client disconnection events."""
|
| 292 |
+
if self._output:
|
| 293 |
+
await self._output.set_client_connection(None)
|
| 294 |
+
await self._call_event_handler("on_client_disconnected", websocket)
|
| 295 |
+
else:
|
| 296 |
+
logger.error("A WebsocketServerTransport output is missing in the pipeline")
|
| 297 |
+
|
| 298 |
+
async def _on_session_timeout(self, websocket):
|
| 299 |
+
"""Handle client session timeout events."""
|
| 300 |
+
await self._call_event_handler("on_session_timeout", websocket)
|
| 301 |
+
|
| 302 |
+
async def _on_websocket_ready(self):
|
| 303 |
+
"""Handle WebSocket server ready events."""
|
| 304 |
+
await self._call_event_handler("on_websocket_ready")
|
nemo/agents/voice_agent/pipecat/utils/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/pipecat/utils/text/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/pipecat/utils/text/simple_text_aggregator.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import re
|
| 16 |
+
from typing import AsyncIterator, Optional
|
| 17 |
+
|
| 18 |
+
from loguru import logger
|
| 19 |
+
from pipecat.utils.string import match_endofsentence
|
| 20 |
+
from pipecat.utils.text.base_text_aggregator import Aggregation, AggregationType
|
| 21 |
+
from pipecat.utils.text.simple_text_aggregator import SimpleTextAggregator
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def has_partial_decimal(text: str) -> bool:
|
| 25 |
+
"""Check if the text ends with a partial decimal.
|
| 26 |
+
|
| 27 |
+
Returns True if the text ends with a number that looks like it could
|
| 28 |
+
be a partial decimal (e.g., "3.", "3.14", "($3.14)"), but NOT if it's
|
| 29 |
+
clearly a complete sentence (e.g., "It costs $3.14.") or a bullet point
|
| 30 |
+
(e.g., "1. Alpha; 2.").
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
# Check for bullet point pattern: ends with 1-3 digits followed by period
|
| 34 |
+
# Examples: "1.", "12.", "123.", or "text; 2."
|
| 35 |
+
# Bullet points are typically small numbers (1-999) at the end
|
| 36 |
+
bullet_match = re.search(r'(?:^|[\s;,]|[^\d])(\d{1,3})\.$', text)
|
| 37 |
+
if bullet_match:
|
| 38 |
+
# It's likely a bullet point, not a partial decimal
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
# Pattern to find decimal numbers near the end, allowing for trailing
|
| 42 |
+
# non-word characters like ), ], ", ', etc.
|
| 43 |
+
# Match: digit(s) + period + optional digit(s) + optional trailing non-word chars
|
| 44 |
+
match = re.search(r'\d+\.(?:\d+)?([^\w\s]*)$', text)
|
| 45 |
+
|
| 46 |
+
if not match:
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
trailing = match.group(1) # e.g., ")" or "" or "."
|
| 50 |
+
|
| 51 |
+
# If trailing contains a period, it's sentence-ending punctuation
|
| 52 |
+
# e.g., "3.14." means complete sentence
|
| 53 |
+
if '.' in trailing:
|
| 54 |
+
return False
|
| 55 |
+
|
| 56 |
+
# Otherwise, it's a partial decimal (either incomplete like "3."
|
| 57 |
+
# or complete number but sentence not finished like "($3.14)")
|
| 58 |
+
return True
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def find_last_period_index(text: str) -> int:
|
| 62 |
+
"""
|
| 63 |
+
Find the last occurrence of a period in the text,
|
| 64 |
+
but return -1 if the text doesn't seem to be a complete sentence.
|
| 65 |
+
"""
|
| 66 |
+
num_periods = text.count(".")
|
| 67 |
+
if num_periods == 0:
|
| 68 |
+
return -1
|
| 69 |
+
|
| 70 |
+
if num_periods == 1:
|
| 71 |
+
if has_partial_decimal(text):
|
| 72 |
+
# if the only period in the text is part of a number, return -1
|
| 73 |
+
return -1
|
| 74 |
+
# Check if the only period is a bullet point (e.g., "1. Alpha" or incomplete "1.")
|
| 75 |
+
if re.search(r'(?:^|[\s;,]|[^\d])(\d{1,3})\.(?:\s+\w|\s*$)', text):
|
| 76 |
+
# The period is after a bullet point number, either:
|
| 77 |
+
# - followed by content (e.g., "1. Alpha")
|
| 78 |
+
# - or at the end with optional whitespace (e.g., "1." or "1. ")
|
| 79 |
+
return -1
|
| 80 |
+
|
| 81 |
+
# Check if any of the abbreviations "e.", "i." "g.", "etc." are present in the text
|
| 82 |
+
if re.search(r'\b(e\.|i\.|g\.)\b', text):
|
| 83 |
+
# The period is after a character/word that is likely to be a abbreviation, return -1
|
| 84 |
+
return -1
|
| 85 |
+
|
| 86 |
+
# otherwise, check the last occurrence of a period
|
| 87 |
+
idx = text.rfind(".")
|
| 88 |
+
if idx <= 0:
|
| 89 |
+
return idx
|
| 90 |
+
if text[idx - 1].isdigit():
|
| 91 |
+
# if the period is after a digit, it's likely a partial decimal, return -1
|
| 92 |
+
return -1
|
| 93 |
+
elif text[idx - 1].isupper():
|
| 94 |
+
# if the period is after a capital letter (e.g., "Washington, D.C."), it's likely a abbreviation, return -1
|
| 95 |
+
return -1
|
| 96 |
+
elif idx > 1 and text[idx - 2 : idx + 1].lower() in ["a.m.", "p.m."]:
|
| 97 |
+
# if the period is after a.m. or p.m., it's likely a time, return -1
|
| 98 |
+
return -1
|
| 99 |
+
elif idx > 2 and text[idx - 3 : idx + 1] in ["e.g.", "i.e.", "etc."]:
|
| 100 |
+
# The period is after a character/word that is likely to be a abbreviation, return -1
|
| 101 |
+
return -1
|
| 102 |
+
elif idx >= 2 and text[idx - 2 : idx + 1].lower() in ["st.", "mr.", "mrs.", "ms.", "dr."]:
|
| 103 |
+
# if the period is after a character/word that is likely to be a abbreviation, return -1
|
| 104 |
+
return -1
|
| 105 |
+
|
| 106 |
+
# the text seems to have a complete sentence, return the index of the last period
|
| 107 |
+
return idx
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def find_last_comma_index(text: str, min_residual_length: int = 5) -> int:
|
| 111 |
+
"""
|
| 112 |
+
Find the last occurrence of a valid comma in the text,
|
| 113 |
+
ignoring the commas in the numbers (e.g., "1,234,567").
|
| 114 |
+
If the leftover text after the comma is too short, it may be an abbreviation, return -1.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
text: The text to find the last occurrence of a valid comma.
|
| 118 |
+
min_residual_length: The minimum length of the leftover text after the rightmost comma
|
| 119 |
+
to be considered as a valid sentence (e.g., "Santa Clara, CA, US.").
|
| 120 |
+
Returns:
|
| 121 |
+
The index of the last occurrence of a valid comma, or -1 if no valid comma is found.
|
| 122 |
+
"""
|
| 123 |
+
# find the last occurrence of a comma in the text
|
| 124 |
+
idx = text.rfind(",")
|
| 125 |
+
if idx == -1:
|
| 126 |
+
return -1
|
| 127 |
+
# check if the comma is in a number
|
| 128 |
+
if re.search(r'\d+,\d+', text[: idx + 1]):
|
| 129 |
+
# the comma is in a number, return -1
|
| 130 |
+
return -1
|
| 131 |
+
|
| 132 |
+
# check if the leftover text after the comma is too short
|
| 133 |
+
if len(text[idx + 1 :]) <= min_residual_length:
|
| 134 |
+
# the leftover text is too short, it may be an abbreviation, return -1
|
| 135 |
+
return -1
|
| 136 |
+
|
| 137 |
+
# the comma is not in a number, return the index of the comma
|
| 138 |
+
return idx
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class SimpleSegmentedTextAggregator(SimpleTextAggregator):
|
| 142 |
+
"""A simple text aggregator that segments the text into sentences based on punctuation marks."""
|
| 143 |
+
|
| 144 |
+
def __init__(
|
| 145 |
+
self,
|
| 146 |
+
punctuation_marks: str | list[str] = ".,!?;:\n",
|
| 147 |
+
ignore_marks: str | list[str] = "*",
|
| 148 |
+
min_sentence_length: int = 0,
|
| 149 |
+
use_legacy_eos_detection: bool = False,
|
| 150 |
+
**kwargs,
|
| 151 |
+
):
|
| 152 |
+
"""
|
| 153 |
+
Args:
|
| 154 |
+
punctuation_marks: The punctuation marks to use for sentence detection.
|
| 155 |
+
ignore_marks: The strings to ignore in the text (e.g., "*").
|
| 156 |
+
min_sentence_length: The minimum length of a sentence to be considered.
|
| 157 |
+
use_legacy_eos_detection: Whether to use the legacy EOS detection from pipecat.
|
| 158 |
+
**kwargs: Additional arguments to pass to the SimpleTextAggregator constructor.
|
| 159 |
+
"""
|
| 160 |
+
super().__init__(**kwargs)
|
| 161 |
+
self._use_legacy_eos_detection = use_legacy_eos_detection
|
| 162 |
+
self._min_sentence_length = min_sentence_length
|
| 163 |
+
self._ignore_marks = set(["*"] if ignore_marks is None else set(ignore_marks))
|
| 164 |
+
if not punctuation_marks:
|
| 165 |
+
self._punctuation_marks = list()
|
| 166 |
+
else:
|
| 167 |
+
punctuation_marks = (
|
| 168 |
+
[c for c in punctuation_marks] if isinstance(punctuation_marks, str) else punctuation_marks
|
| 169 |
+
)
|
| 170 |
+
if "." in punctuation_marks:
|
| 171 |
+
punctuation_marks.remove(".")
|
| 172 |
+
# put period at the end of the list to ensure it's the last punctuation mark to be matched
|
| 173 |
+
punctuation_marks += ["."]
|
| 174 |
+
self._punctuation_marks = punctuation_marks
|
| 175 |
+
|
| 176 |
+
def _find_segment_end(self, text: str) -> Optional[int]:
|
| 177 |
+
"""find the end of text segment.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
text: The text to find the end of the segment.
|
| 181 |
+
|
| 182 |
+
Returns:
|
| 183 |
+
The index of the end of the segment, or None if the text is too short.
|
| 184 |
+
"""
|
| 185 |
+
# drop leading whitespace but keep trailing whitespace to
|
| 186 |
+
# allow "\n" to trigger the end of the sentence
|
| 187 |
+
text_len = len(text)
|
| 188 |
+
text = text.lstrip()
|
| 189 |
+
offset = text_len - len(text)
|
| 190 |
+
if len(text) < self._min_sentence_length:
|
| 191 |
+
return None
|
| 192 |
+
|
| 193 |
+
for punc in self._punctuation_marks:
|
| 194 |
+
if punc == ".":
|
| 195 |
+
idx = find_last_period_index(text)
|
| 196 |
+
elif punc == ",":
|
| 197 |
+
idx = find_last_comma_index(text)
|
| 198 |
+
else:
|
| 199 |
+
idx = text.find(punc)
|
| 200 |
+
if idx != -1:
|
| 201 |
+
# add the offset to the index to account for the leading whitespace
|
| 202 |
+
return idx + 1 + offset
|
| 203 |
+
return None
|
| 204 |
+
|
| 205 |
+
async def aggregate(self, text: str) -> AsyncIterator[Aggregation]:
|
| 206 |
+
"""Aggregate the input text and return the first complete sentence in the text.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
text: The text to aggregate.
|
| 210 |
+
|
| 211 |
+
Returns:
|
| 212 |
+
The first complete sentence in the text, or None if none is found.
|
| 213 |
+
"""
|
| 214 |
+
result: Optional[str] = None
|
| 215 |
+
self._text += str(text)
|
| 216 |
+
|
| 217 |
+
eos_end_index = self._find_segment_end(self._text)
|
| 218 |
+
|
| 219 |
+
if not eos_end_index and not has_partial_decimal(self._text) and self._use_legacy_eos_detection:
|
| 220 |
+
# if the text doesn't have partial decimal, and no punctuation marks,
|
| 221 |
+
# we use match_endofsentence to find the end of the sentence
|
| 222 |
+
eos_end_index = match_endofsentence(self._text)
|
| 223 |
+
|
| 224 |
+
if eos_end_index:
|
| 225 |
+
result = self._text[:eos_end_index]
|
| 226 |
+
if len(result.strip()) < self._min_sentence_length:
|
| 227 |
+
logger.debug(
|
| 228 |
+
f"Text is too short, skipping: `{result}`, full text: `{self._text}`, input text: `{text}`"
|
| 229 |
+
)
|
| 230 |
+
result = None
|
| 231 |
+
else:
|
| 232 |
+
logger.debug(f"Text Aggregator Result: `{result}`, full text: `{self._text}`, input text: `{text}`")
|
| 233 |
+
self._text = self._text[eos_end_index:]
|
| 234 |
+
|
| 235 |
+
if result:
|
| 236 |
+
for ignore_mark in self._ignore_marks:
|
| 237 |
+
result = result.replace(ignore_mark, "")
|
| 238 |
+
yield Aggregation(text=result, type=AggregationType.SENTENCE)
|
nemo/{collections/vlm/clip/data → agents/voice_agent/utils}/__init__.py
RENAMED
|
@@ -11,6 +11,5 @@
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
-
from nemo.collections.vlm.clip.data.mock import MockDataModule as ClipMockDataModule
|
| 15 |
|
| 16 |
-
|
|
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
|
|
|
| 14 |
|
| 15 |
+
from nemo.agents.voice_agent.utils.config_manager import ConfigManager
|
nemo/agents/voice_agent/utils/config_manager.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from typing import Any, Dict, Optional
|
| 17 |
+
|
| 18 |
+
from loguru import logger
|
| 19 |
+
from omegaconf import OmegaConf
|
| 20 |
+
from pipecat.audio.vad.silero import VADParams
|
| 21 |
+
|
| 22 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.diar import NeMoDiarInputParams
|
| 23 |
+
from nemo.agents.voice_agent.pipecat.services.nemo.stt import NeMoSTTInputParams
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ConfigManager:
|
| 27 |
+
"""
|
| 28 |
+
Manages configuration for the voice agent server.
|
| 29 |
+
Handles loading, merging, and providing access to all configuration parameters.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, server_base_path: str, server_config_path: Optional[str] = None):
|
| 33 |
+
"""
|
| 34 |
+
Initialize the configuration manager.
|
| 35 |
+
|
| 36 |
+
Args:
|
| 37 |
+
config_path: Path to the main server configuration file.
|
| 38 |
+
If None, uses default path from environment variable.
|
| 39 |
+
"""
|
| 40 |
+
if not os.path.exists(server_base_path):
|
| 41 |
+
raise FileNotFoundError(f"Server base path not found at {server_base_path}")
|
| 42 |
+
|
| 43 |
+
self._server_base_path = server_base_path
|
| 44 |
+
if server_config_path is not None:
|
| 45 |
+
self._server_config_path = server_config_path
|
| 46 |
+
else:
|
| 47 |
+
self._server_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/default.yaml"
|
| 48 |
+
|
| 49 |
+
if not os.path.exists(self._server_config_path):
|
| 50 |
+
raise FileNotFoundError(f"Server configuration file not found at {self._server_config_path}")
|
| 51 |
+
|
| 52 |
+
# Load model registry
|
| 53 |
+
self.model_registry_path = f"{os.path.abspath(self._server_base_path)}/model_registry.yaml"
|
| 54 |
+
self.model_registry = self._load_model_registry()
|
| 55 |
+
|
| 56 |
+
# Load and process main configuration
|
| 57 |
+
self.server_config = self._load_server_config()
|
| 58 |
+
|
| 59 |
+
# Initialize configuration parameters
|
| 60 |
+
self._initialize_config_parameters()
|
| 61 |
+
|
| 62 |
+
self._generic_hf_llm_model_id = "hf_llm_generic"
|
| 63 |
+
|
| 64 |
+
logger.info(f"Configuration loaded from: {self._server_config_path}")
|
| 65 |
+
logger.info(f"Model registry loaded from: {self.model_registry_path}")
|
| 66 |
+
|
| 67 |
+
def _load_model_registry(self) -> Dict[str, Any]:
|
| 68 |
+
"""Load model registry from YAML file."""
|
| 69 |
+
try:
|
| 70 |
+
return OmegaConf.load(self.model_registry_path)
|
| 71 |
+
except Exception as e:
|
| 72 |
+
logger.error(f"Failed to load model registry: {e}")
|
| 73 |
+
raise ValueError(f"Failed to load model registry: {e}")
|
| 74 |
+
|
| 75 |
+
def _load_server_config(self) -> OmegaConf:
|
| 76 |
+
"""Load and process the main server configuration."""
|
| 77 |
+
server_config = OmegaConf.load(self._server_config_path)
|
| 78 |
+
server_config = OmegaConf.to_container(server_config, resolve=True)
|
| 79 |
+
server_config = OmegaConf.create(server_config)
|
| 80 |
+
return server_config
|
| 81 |
+
|
| 82 |
+
def _initialize_config_parameters(self):
|
| 83 |
+
"""Initialize all configuration parameters from the loaded config."""
|
| 84 |
+
# Default constants
|
| 85 |
+
self.SAMPLE_RATE = 16000
|
| 86 |
+
self.RAW_AUDIO_FRAME_LEN_IN_SECS = 0.016
|
| 87 |
+
self.SYSTEM_PROMPT = " ".join(
|
| 88 |
+
[
|
| 89 |
+
"You are a helpful AI agent named Lisa.",
|
| 90 |
+
"Begin by warmly greeting the user and introducing yourself in one sentence.",
|
| 91 |
+
"Keep your answers concise and to the point.",
|
| 92 |
+
]
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# Transport configuration
|
| 96 |
+
self.TRANSPORT_AUDIO_OUT_10MS_CHUNKS = self.server_config.transport.audio_out_10ms_chunks
|
| 97 |
+
|
| 98 |
+
# VAD configuration
|
| 99 |
+
self.vad_params = VADParams(
|
| 100 |
+
confidence=self.server_config.vad.confidence,
|
| 101 |
+
start_secs=self.server_config.vad.start_secs,
|
| 102 |
+
stop_secs=self.server_config.vad.stop_secs,
|
| 103 |
+
min_volume=self.server_config.vad.min_volume,
|
| 104 |
+
)
|
| 105 |
+
# STT configuration
|
| 106 |
+
self._configure_stt()
|
| 107 |
+
|
| 108 |
+
# Diarization configuration
|
| 109 |
+
self._configure_diarization()
|
| 110 |
+
|
| 111 |
+
# Turn taking configuration
|
| 112 |
+
self._configure_turn_taking()
|
| 113 |
+
|
| 114 |
+
# LLM configuration
|
| 115 |
+
self._configure_llm()
|
| 116 |
+
|
| 117 |
+
# TTS configuration
|
| 118 |
+
self._configure_tts()
|
| 119 |
+
|
| 120 |
+
def _configure_stt(self):
|
| 121 |
+
"""Configure STT parameters."""
|
| 122 |
+
self.STT_MODEL = self.server_config.stt.model
|
| 123 |
+
self.STT_DEVICE = self.server_config.stt.device
|
| 124 |
+
# Apply STT-specific configuration based on model type
|
| 125 |
+
# Try to get STT config file name from server config first
|
| 126 |
+
if self.server_config.stt.get("model_config", None) is not None:
|
| 127 |
+
yaml_file_name = os.path.basename(self.server_config.stt.model_config)
|
| 128 |
+
else:
|
| 129 |
+
# Get STT configuration from registry
|
| 130 |
+
if str(self.STT_MODEL).endswith(".nemo"):
|
| 131 |
+
model_name = os.path.splitext(os.path.basename(self.STT_MODEL))[0]
|
| 132 |
+
else:
|
| 133 |
+
model_name = self.STT_MODEL
|
| 134 |
+
if model_name in self.model_registry.stt_models:
|
| 135 |
+
yaml_file_name = self.model_registry.stt_models[model_name].yaml_id
|
| 136 |
+
else:
|
| 137 |
+
error_msg = f"STT model {model_name} is not in model registry: {self.model_registry.stt_models}."
|
| 138 |
+
logger.error(error_msg)
|
| 139 |
+
raise ValueError(error_msg)
|
| 140 |
+
|
| 141 |
+
stt_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/stt_configs/{yaml_file_name}"
|
| 142 |
+
if not os.path.exists(stt_config_path):
|
| 143 |
+
raise FileNotFoundError(f"STT config file not found at {stt_config_path}")
|
| 144 |
+
stt_config = OmegaConf.load(stt_config_path)
|
| 145 |
+
|
| 146 |
+
# merge stt config with server config
|
| 147 |
+
for key in stt_config:
|
| 148 |
+
if key in self.server_config.stt and self.server_config.stt[key] != stt_config[key]:
|
| 149 |
+
logger.info(
|
| 150 |
+
f"STT config field `{key}` is overridden from `{self.server_config.stt[key]}` "
|
| 151 |
+
f"to `{stt_config[key]}` by {stt_config_path}"
|
| 152 |
+
)
|
| 153 |
+
self.server_config.stt[key] = stt_config[key]
|
| 154 |
+
|
| 155 |
+
logger.info(f"Final STT config: {self.server_config.stt}")
|
| 156 |
+
|
| 157 |
+
audio_chunk_size_in_secs = self.server_config.stt.get("audio_chunk_size_in_secs", 0.08)
|
| 158 |
+
buffer_size = audio_chunk_size_in_secs // self.RAW_AUDIO_FRAME_LEN_IN_SECS
|
| 159 |
+
self.stt_params = NeMoSTTInputParams(
|
| 160 |
+
att_context_size=self.server_config.stt.att_context_size,
|
| 161 |
+
frame_len_in_secs=self.server_config.stt.frame_len_in_secs,
|
| 162 |
+
raw_audio_frame_len_in_secs=self.RAW_AUDIO_FRAME_LEN_IN_SECS,
|
| 163 |
+
buffer_size=buffer_size,
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
def _configure_diarization(self):
|
| 167 |
+
"""
|
| 168 |
+
Configure diarization parameters.
|
| 169 |
+
Currently only NeMo End-to-End Diarization is supported.
|
| 170 |
+
"""
|
| 171 |
+
self.DIAR_MODEL = self.server_config.diar.model
|
| 172 |
+
self.USE_DIAR = self.server_config.diar.enabled
|
| 173 |
+
self.diar_params = NeMoDiarInputParams(
|
| 174 |
+
frame_len_in_secs=self.server_config.diar.frame_len_in_secs,
|
| 175 |
+
threshold=self.server_config.diar.threshold,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
def _configure_turn_taking(self):
|
| 179 |
+
"""Configure turn taking parameters."""
|
| 180 |
+
self.TURN_TAKING_BACKCHANNEL_PHRASES_PATH = self.server_config.turn_taking.backchannel_phrases_path
|
| 181 |
+
self.TURN_TAKING_MAX_BUFFER_SIZE = self.server_config.turn_taking.max_buffer_size
|
| 182 |
+
self.TURN_TAKING_BOT_STOP_DELAY = self.server_config.turn_taking.bot_stop_delay
|
| 183 |
+
|
| 184 |
+
def _configure_llm(self):
|
| 185 |
+
"""Configure LLM parameters."""
|
| 186 |
+
llm_model_id = self.server_config.llm.model
|
| 187 |
+
is_registry_model = False
|
| 188 |
+
|
| 189 |
+
# Try to get LLM config file name from server config first
|
| 190 |
+
if self.server_config.llm.get("model_config", None) is not None:
|
| 191 |
+
yaml_file_name = os.path.basename(self.server_config.llm.model_config)
|
| 192 |
+
else:
|
| 193 |
+
# Get LLM configuration from registry
|
| 194 |
+
if llm_model_id in self.model_registry.llm_models:
|
| 195 |
+
yaml_file_name = self.model_registry.llm_models[llm_model_id].yaml_id
|
| 196 |
+
is_registry_model = True
|
| 197 |
+
else:
|
| 198 |
+
logger.warning(
|
| 199 |
+
f"LLM model {llm_model_id} is not included in the model registry. "
|
| 200 |
+
"Using a generic HuggingFace LLM config instead."
|
| 201 |
+
)
|
| 202 |
+
yaml_file_name = self.model_registry.llm_models[self._generic_hf_llm_model_id].yaml_id
|
| 203 |
+
|
| 204 |
+
# Load and merge LLM configuration
|
| 205 |
+
llm_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/llm_configs/{yaml_file_name}"
|
| 206 |
+
|
| 207 |
+
if (
|
| 208 |
+
is_registry_model
|
| 209 |
+
and self.model_registry.llm_models[llm_model_id].get("reasoning_supported", False)
|
| 210 |
+
and self.server_config.llm.get("enable_reasoning", False)
|
| 211 |
+
):
|
| 212 |
+
llm_config_path = llm_config_path.replace(".yaml", "_think.yaml")
|
| 213 |
+
|
| 214 |
+
if not os.path.exists(llm_config_path):
|
| 215 |
+
raise FileNotFoundError(f"LLM config file not found at {llm_config_path}")
|
| 216 |
+
logger.info(f"Loading LLM config from: {llm_config_path}")
|
| 217 |
+
|
| 218 |
+
llm_config = OmegaConf.load(llm_config_path)
|
| 219 |
+
# merge llm config with server config
|
| 220 |
+
# print the override keys
|
| 221 |
+
for key in llm_config:
|
| 222 |
+
if key in self.server_config.llm and self.server_config.llm[key] != llm_config[key]:
|
| 223 |
+
logger.info(
|
| 224 |
+
f"LLM config field `{key}` is overridden from `{self.server_config.llm[key]}` to "
|
| 225 |
+
f"`{llm_config[key]}` by {llm_config_path}"
|
| 226 |
+
)
|
| 227 |
+
self.server_config.llm[key] = llm_config[key]
|
| 228 |
+
|
| 229 |
+
logger.info(f"Final LLM config: {self.server_config.llm}")
|
| 230 |
+
|
| 231 |
+
# Configure system prompt
|
| 232 |
+
self.SYSTEM_ROLE = self.server_config.llm.get("system_role", "system")
|
| 233 |
+
if self.server_config.llm.get("system_prompt", None) is not None:
|
| 234 |
+
system_prompt = self.server_config.llm.system_prompt
|
| 235 |
+
if os.path.isfile(system_prompt):
|
| 236 |
+
with open(system_prompt, "r") as f:
|
| 237 |
+
system_prompt = f.read()
|
| 238 |
+
self.SYSTEM_PROMPT = system_prompt
|
| 239 |
+
else:
|
| 240 |
+
logger.info(f"No system prompt provided, using default system prompt: {self.SYSTEM_PROMPT}")
|
| 241 |
+
|
| 242 |
+
if self.server_config.llm.get("system_prompt_suffix", None) is not None:
|
| 243 |
+
self.SYSTEM_PROMPT += "\n" + self.server_config.llm.system_prompt_suffix
|
| 244 |
+
logger.info(f"Adding system prompt suffix: {self.server_config.llm.system_prompt_suffix}")
|
| 245 |
+
|
| 246 |
+
logger.info(f"System prompt: {self.SYSTEM_PROMPT}")
|
| 247 |
+
|
| 248 |
+
def _configure_tts(self):
|
| 249 |
+
"""Configure TTS parameters."""
|
| 250 |
+
tts_model_id = self.server_config.tts.model
|
| 251 |
+
|
| 252 |
+
# Try to get TTS config file name from server config first
|
| 253 |
+
if self.server_config.tts.get("model_config", None) is not None:
|
| 254 |
+
yaml_file_name = os.path.basename(self.server_config.tts.model_config)
|
| 255 |
+
else:
|
| 256 |
+
# Get TTS configuration from registry
|
| 257 |
+
if tts_model_id in self.model_registry.tts_models:
|
| 258 |
+
yaml_file_name = self.model_registry.tts_models[tts_model_id].yaml_id
|
| 259 |
+
else:
|
| 260 |
+
error_msg = f"TTS model {tts_model_id} is not in model registry: {self.model_registry.tts_models}"
|
| 261 |
+
logger.error(error_msg)
|
| 262 |
+
raise ValueError(error_msg)
|
| 263 |
+
|
| 264 |
+
tts_config_path = f"{os.path.abspath(self._server_base_path)}/server_configs/tts_configs/{yaml_file_name}"
|
| 265 |
+
if not os.path.exists(tts_config_path):
|
| 266 |
+
raise FileNotFoundError(f"Default TTS config file not found at {tts_config_path}")
|
| 267 |
+
tts_config = OmegaConf.load(tts_config_path)
|
| 268 |
+
|
| 269 |
+
# merge tts config with server config
|
| 270 |
+
for key in tts_config:
|
| 271 |
+
if key in self.server_config.tts and self.server_config.tts[key] != tts_config[key]:
|
| 272 |
+
logger.info(
|
| 273 |
+
f"TTS config field `{key}` is overridden from `{self.server_config.tts[key]}` to "
|
| 274 |
+
f"`{tts_config[key]}` by {tts_config_path}"
|
| 275 |
+
)
|
| 276 |
+
self.server_config.tts[key] = tts_config[key]
|
| 277 |
+
|
| 278 |
+
logger.info(f"Final TTS config: {self.server_config.tts}")
|
| 279 |
+
|
| 280 |
+
# Extract TTS parameters
|
| 281 |
+
self.TTS_MAIN_MODEL_ID = self.server_config.tts.get("main_model_id", None)
|
| 282 |
+
self.TTS_SUB_MODEL_ID = self.server_config.tts.get("sub_model_id", None)
|
| 283 |
+
self.TTS_DEVICE = self.server_config.tts.get("device", None)
|
| 284 |
+
|
| 285 |
+
# Handle optional TTS parameters
|
| 286 |
+
self.TTS_THINK_TOKENS = self.server_config.tts.get("think_tokens", None)
|
| 287 |
+
if self.TTS_THINK_TOKENS is not None:
|
| 288 |
+
self.TTS_THINK_TOKENS = OmegaConf.to_container(self.TTS_THINK_TOKENS)
|
| 289 |
+
|
| 290 |
+
self.TTS_EXTRA_SEPARATOR = self.server_config.tts.get("extra_separator", None)
|
| 291 |
+
if self.TTS_EXTRA_SEPARATOR is not None:
|
| 292 |
+
self.TTS_EXTRA_SEPARATOR = OmegaConf.to_container(self.TTS_EXTRA_SEPARATOR)
|
| 293 |
+
|
| 294 |
+
def get_server_config(self) -> OmegaConf:
|
| 295 |
+
"""Get the complete server configuration."""
|
| 296 |
+
return self.server_config
|
| 297 |
+
|
| 298 |
+
def get_model_registry(self) -> Dict[str, Any]:
|
| 299 |
+
"""Get the model registry configuration."""
|
| 300 |
+
return self.model_registry
|
| 301 |
+
|
| 302 |
+
def get_vad_params(self) -> VADParams:
|
| 303 |
+
"""Get VAD parameters."""
|
| 304 |
+
return self.vad_params
|
| 305 |
+
|
| 306 |
+
def get_stt_params(self) -> NeMoSTTInputParams:
|
| 307 |
+
"""Get STT parameters."""
|
| 308 |
+
return self.stt_params
|
| 309 |
+
|
| 310 |
+
def get_diar_params(self) -> NeMoDiarInputParams:
|
| 311 |
+
"""Get diarization parameters."""
|
| 312 |
+
return self.diar_params
|
nemo/agents/voice_agent/utils/tool_calling/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/agents/voice_agent/utils/tool_calling/basic_tools.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import asyncio
|
| 16 |
+
import python_weather
|
| 17 |
+
from loguru import logger
|
| 18 |
+
from pipecat.frames.frames import LLMTextFrame, TTSSpeakFrame
|
| 19 |
+
from pipecat.processors.frame_processor import FrameDirection
|
| 20 |
+
from pipecat.services.llm_service import FunctionCallParams
|
| 21 |
+
|
| 22 |
+
HTTP_REQUEST_TIMEOUT = 10.0
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
async def tool_get_city_weather(params: FunctionCallParams, city_name: str):
|
| 26 |
+
"""Get the current weather of a city. The result includes city name, weather description,
|
| 27 |
+
temperature, wind speed, wind direction, precipitation, humidity, visibility, and UV index.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
city_name: The name of the city to get the weather of. For example, "London", "Beijing", "Paris".
|
| 31 |
+
Other examples are: "Paris, TX, US", "Paris, FR" and "Tokyo, JP".
|
| 32 |
+
"""
|
| 33 |
+
message = f"Looking up weather data for {city_name}. Please wait a moment..."
|
| 34 |
+
# Send the message to upstream so that RTVI can log it while doesn't block the actual tool call.
|
| 35 |
+
await params.llm.push_frame(LLMTextFrame(message), direction=FrameDirection.UPSTREAM)
|
| 36 |
+
# Send the message to TTS directly so that the user can hear it immediately.
|
| 37 |
+
await params.llm.push_frame(TTSSpeakFrame(message))
|
| 38 |
+
|
| 39 |
+
# The measuring unit defaults to metric (Celsius)
|
| 40 |
+
# Use imperial for Fahrenheit: python_weather.IMPERIAL
|
| 41 |
+
async with python_weather.Client(unit=python_weather.METRIC) as client:
|
| 42 |
+
# Fetch a weather forecast from a city
|
| 43 |
+
logger.debug(f"Fetching weather forecast for `{city_name}`")
|
| 44 |
+
try:
|
| 45 |
+
weather: python_weather.Forecast = await asyncio.wait_for(
|
| 46 |
+
client.get(city_name),
|
| 47 |
+
timeout=HTTP_REQUEST_TIMEOUT,
|
| 48 |
+
)
|
| 49 |
+
except asyncio.TimeoutError:
|
| 50 |
+
error_msg = f"python_weather API request timed out after {HTTP_REQUEST_TIMEOUT} seconds for `{city_name}`"
|
| 51 |
+
logger.error(error_msg)
|
| 52 |
+
await params.result_callback({"error": error_msg})
|
| 53 |
+
return
|
| 54 |
+
except Exception as e:
|
| 55 |
+
error_msg = f"Error fetching weather forecast for `{city_name}`: {str(e)}"
|
| 56 |
+
logger.error(error_msg)
|
| 57 |
+
await params.result_callback({"error": error_msg})
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
results = {
|
| 61 |
+
"city": city_name,
|
| 62 |
+
"description": str(weather.description),
|
| 63 |
+
"temperature": f"{weather.temperature} degrees Celsius",
|
| 64 |
+
"wind_speed": f"{weather.wind_speed} kilometers per hour",
|
| 65 |
+
"wind_direction": str(weather.wind_direction.name),
|
| 66 |
+
"precipitation": f"{weather.precipitation} millimeters",
|
| 67 |
+
"humidity": f"{weather.humidity} percent",
|
| 68 |
+
"visibility": f"{weather.visibility} kilometers",
|
| 69 |
+
"uv_index": str(weather.ultraviolet),
|
| 70 |
+
}
|
| 71 |
+
logger.debug(f"Weather results for {city_name}: {results}")
|
| 72 |
+
await params.result_callback(results)
|
nemo/agents/voice_agent/utils/tool_calling/mixins.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from loguru import logger
|
| 16 |
+
from pipecat.adapters.schemas.direct_function import DirectFunction
|
| 17 |
+
from pipecat.adapters.schemas.tools_schema import ToolsSchema
|
| 18 |
+
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
|
| 19 |
+
from pipecat.services.openai.llm import OpenAILLMService
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class ToolCallingMixin:
|
| 23 |
+
"""
|
| 24 |
+
A mixin class for tool calling.
|
| 25 |
+
Subclasses must implement the `setup_tool_calling` method to register all available tools
|
| 26 |
+
using `self.register_direct_function()`. Then the `__init__` method of the subclass should
|
| 27 |
+
call the `setup_tool_calling` method to register the tools.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def setup_tool_calling(self):
|
| 31 |
+
"""
|
| 32 |
+
Setup the tool calling mixin by registering all available tools using self.register_direct_function().
|
| 33 |
+
"""
|
| 34 |
+
raise NotImplementedError(
|
| 35 |
+
"Subclasses must implement this method to register all available functions "
|
| 36 |
+
"using self.register_direct_function()"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
def register_direct_function(self, function_name: str, function: DirectFunction):
|
| 40 |
+
"""
|
| 41 |
+
Register a direct function to be called by the LLM.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
function_name: The name of the function to register.
|
| 45 |
+
function: The direct function to register.
|
| 46 |
+
"""
|
| 47 |
+
if not hasattr(self, "direct_functions"):
|
| 48 |
+
self.direct_functions = {}
|
| 49 |
+
logger.info(
|
| 50 |
+
f"[{self.__class__.__name__}] Registering direct function name {function_name} to "
|
| 51 |
+
f"{function.__module__ + '.' + function.__qualname__}"
|
| 52 |
+
)
|
| 53 |
+
self.direct_functions[function_name] = function
|
| 54 |
+
|
| 55 |
+
@property
|
| 56 |
+
def available_tools(self) -> dict[str, DirectFunction]:
|
| 57 |
+
"""
|
| 58 |
+
Return a dictionary of available tools, where the key is the tool name and the value is the direct function.
|
| 59 |
+
"""
|
| 60 |
+
tools = {}
|
| 61 |
+
for function_name, function in self.direct_functions.items():
|
| 62 |
+
tools[function_name] = function
|
| 63 |
+
return tools
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def register_direct_tools_to_llm(
|
| 67 |
+
*,
|
| 68 |
+
llm: OpenAILLMService,
|
| 69 |
+
context: OpenAILLMContext,
|
| 70 |
+
tool_mixins: list[ToolCallingMixin] = [],
|
| 71 |
+
tools: list[DirectFunction] = [],
|
| 72 |
+
cancel_on_interruption: bool = True,
|
| 73 |
+
) -> None:
|
| 74 |
+
"""
|
| 75 |
+
Register direct tools to the LLM.
|
| 76 |
+
Args:
|
| 77 |
+
llm: The LLM service to use.
|
| 78 |
+
context: The LLM context to use.
|
| 79 |
+
tools: The list of tools (instances of either `DirectFunction` or `ToolCallingMixin`) to use.
|
| 80 |
+
"""
|
| 81 |
+
all_tools = []
|
| 82 |
+
for tool in tool_mixins:
|
| 83 |
+
if not isinstance(tool, ToolCallingMixin):
|
| 84 |
+
logger.warning(f"Tool {tool.__class__.__name__} is not a ToolCallingMixin, skipping.")
|
| 85 |
+
continue
|
| 86 |
+
for function_name, function in tool.available_tools.items():
|
| 87 |
+
logger.info(f"Registering direct function {function_name} from {tool.__class__.__name__}")
|
| 88 |
+
all_tools.append(function)
|
| 89 |
+
|
| 90 |
+
for tool in tools:
|
| 91 |
+
logger.info(f"Registering direct function: {tool.__module__ + '.' + tool.__qualname__}")
|
| 92 |
+
all_tools.append(tool)
|
| 93 |
+
|
| 94 |
+
if not all_tools:
|
| 95 |
+
logger.warning("No direct tools provided.")
|
| 96 |
+
return
|
| 97 |
+
else:
|
| 98 |
+
logger.info(f"Registering {len(all_tools)} direct tools to the LLM.")
|
| 99 |
+
|
| 100 |
+
tools_schema = ToolsSchema(standard_tools=all_tools)
|
| 101 |
+
context.set_tools(tools_schema)
|
| 102 |
+
|
| 103 |
+
for tool in all_tools:
|
| 104 |
+
llm.register_direct_function(tool, cancel_on_interruption=cancel_on_interruption)
|
nemo/collections/asr/README.md
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Automatic Speech Recognition (ASR)
|
| 2 |
+
|
| 3 |
+
## Key Features
|
| 4 |
+
|
| 5 |
+
* [HuggingFace Space for Audio Transcription (File, Microphone and YouTube)](https://huggingface.co/spaces/smajumdar/nemo_multilingual_language_id)
|
| 6 |
+
* [Pretrained models](https://ngc.nvidia.com/catalog/collections/nvidia:nemo_asr) available in 14+ languages
|
| 7 |
+
* [Automatic Speech Recognition (ASR)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/intro.html)
|
| 8 |
+
* Supported ASR [models](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html):
|
| 9 |
+
* Jasper, QuartzNet, CitriNet, ContextNet
|
| 10 |
+
* Conformer-CTC, Conformer-Transducer, FastConformer-CTC, FastConformer-Transducer
|
| 11 |
+
* Squeezeformer-CTC and Squeezeformer-Transducer
|
| 12 |
+
* LSTM-Transducer (RNNT) and LSTM-CTC
|
| 13 |
+
* Supports the following decoders/losses:
|
| 14 |
+
* CTC
|
| 15 |
+
* Transducer/RNNT
|
| 16 |
+
* Hybrid Transducer/CTC
|
| 17 |
+
* NeMo Original [Multi-blank Transducers](https://arxiv.org/abs/2211.03541) and [Token-and-Duration Transducers (TDT)](https://arxiv.org/abs/2304.06795)
|
| 18 |
+
* Streaming/Buffered ASR (CTC/Transducer) - [Chunked Inference Examples](https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_chunked_inference)
|
| 19 |
+
* [Cache-aware Streaming Conformer](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/models.html#cache-aware-streaming-conformer) with multiple lookaheads (including microphone streaming [tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Online_ASR_Microphone_Demo_Cache_Aware_Streaming.ipynb).
|
| 20 |
+
* Beam Search decoding
|
| 21 |
+
* [Language Modelling for ASR (CTC and RNNT)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html): N-gram LM in fusion with Beam Search decoding, Neural Rescoring with Transformer
|
| 22 |
+
* [Support of long audios for Conformer with memory efficient local attention](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html#inference-on-long-audio)
|
| 23 |
+
* [Speech Classification, Speech Command Recognition and Language Identification](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/intro.html): MatchboxNet (Command Recognition), AmberNet (LangID)
|
| 24 |
+
* [Voice activity Detection (VAD)](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/asr/speech_classification/models.html#marblenet-vad): MarbleNet
|
| 25 |
+
* ASR with VAD Inference - [Example](https://github.com/NVIDIA/NeMo/tree/stable/examples/asr/asr_vad)
|
| 26 |
+
* [Speaker Recognition](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_recognition/intro.html): TitaNet, ECAPA_TDNN, SpeakerNet
|
| 27 |
+
* [Speaker Diarization](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speaker_diarization/intro.html)
|
| 28 |
+
* Clustering Diarizer: TitaNet, ECAPA_TDNN, SpeakerNet
|
| 29 |
+
* Neural Diarizer: Sortformer
|
| 30 |
+
* [Speech Intent Detection and Slot Filling](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_intent_slot/intro.html): Conformer-Transformer
|
| 31 |
+
|
| 32 |
+
You can also get a high-level overview of NeMo ASR by watching the talk *NVIDIA NeMo: Toolkit for Conversational AI*, presented at PyData Yerevan 2022:
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
[](https://www.youtube.com/embed/J-P6Sczmas8?mute=0&start=14&autoplay=0
|
| 37 |
+
"NeMo presentation at PyData@Yerevan 2022")
|
nemo/collections/asr/data/audio_to_diar_label.py
CHANGED
|
@@ -13,140 +13,18 @@
|
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
import os
|
| 16 |
-
from collections import OrderedDict
|
| 17 |
-
from statistics import mode
|
| 18 |
from typing import Dict, List, Optional, Tuple
|
| 19 |
|
| 20 |
import numpy as np
|
| 21 |
import torch
|
| 22 |
|
| 23 |
-
from nemo.collections.asr.parts.utils.
|
| 24 |
-
from nemo.collections.
|
| 25 |
-
from nemo.collections.common.parts.preprocessing.collections import (
|
| 26 |
-
DiarizationSpeechLabel,
|
| 27 |
-
EndtoEndDiarizationSpeechLabel,
|
| 28 |
-
)
|
| 29 |
from nemo.core.classes import Dataset
|
| 30 |
-
from nemo.core.neural_types import AudioSignal,
|
| 31 |
from nemo.utils import logging
|
| 32 |
|
| 33 |
|
| 34 |
-
def get_scale_mapping_list(uniq_timestamps):
|
| 35 |
-
"""
|
| 36 |
-
Call get_argmin_mat function to find the index of the non-base-scale segment that is closest to the
|
| 37 |
-
given base-scale segment. For each scale and each segment, a base-scale segment is assigned.
|
| 38 |
-
|
| 39 |
-
Args:
|
| 40 |
-
uniq_timestamps: (dict)
|
| 41 |
-
The dictionary containing embeddings, timestamps and multiscale weights.
|
| 42 |
-
If uniq_timestamps contains only one scale, single scale diarization is performed.
|
| 43 |
-
|
| 44 |
-
Returns:
|
| 45 |
-
scale_mapping_argmat (torch.tensor):
|
| 46 |
-
|
| 47 |
-
The element at the m-th row and the n-th column of the scale mapping matrix indicates the (m+1)-th scale
|
| 48 |
-
segment index which has the closest center distance with (n+1)-th segment in the base scale.
|
| 49 |
-
|
| 50 |
-
- Example:
|
| 51 |
-
`scale_mapping_argmat[2][101] = 85`
|
| 52 |
-
|
| 53 |
-
In the above example, the code snippet means that 86-th segment in the 3rd scale (python index is 2) is
|
| 54 |
-
mapped to the 102-th segment in the base scale. Thus, the longer segments bound to have more repeating
|
| 55 |
-
numbers since multiple base scale segments (since the base scale has the shortest length) fall into the
|
| 56 |
-
range of the longer segments. At the same time, each row contains N numbers of indices where N is number
|
| 57 |
-
of segments in the base-scale (i.e., the finest scale).
|
| 58 |
-
"""
|
| 59 |
-
timestamps_in_scales = []
|
| 60 |
-
for key, val in uniq_timestamps['scale_dict'].items():
|
| 61 |
-
timestamps_in_scales.append(torch.tensor(val['time_stamps']))
|
| 62 |
-
session_scale_mapping_list = get_argmin_mat(timestamps_in_scales)
|
| 63 |
-
scale_mapping_argmat = [[] for _ in range(len(uniq_timestamps['scale_dict'].keys()))]
|
| 64 |
-
for scale_idx in range(len(session_scale_mapping_list)):
|
| 65 |
-
scale_mapping_argmat[scale_idx] = session_scale_mapping_list[scale_idx]
|
| 66 |
-
scale_mapping_argmat = torch.stack(scale_mapping_argmat)
|
| 67 |
-
return scale_mapping_argmat
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
def extract_seg_info_from_rttm(rttm_lines, mapping_dict=None, target_spks=None):
|
| 71 |
-
"""
|
| 72 |
-
Get RTTM lines containing speaker labels, start time and end time. target_spks contains two targeted
|
| 73 |
-
speaker indices for creating groundtruth label files. Only speakers in target_spks variable will be
|
| 74 |
-
included in the output lists.
|
| 75 |
-
|
| 76 |
-
Args:
|
| 77 |
-
uniq_id (str):
|
| 78 |
-
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
|
| 79 |
-
rttm_lines (list):
|
| 80 |
-
List containing RTTM lines in str format.
|
| 81 |
-
mapping_dict (dict):
|
| 82 |
-
Mapping between the estimated speakers and the speakers in the ground-truth annotation.
|
| 83 |
-
`mapping_dict` variable is only provided when the inference mode is running in sequence-eval mode.
|
| 84 |
-
Sequence eval mode uses the mapping between the estimated speakers and the speakers
|
| 85 |
-
in ground-truth annotation.
|
| 86 |
-
Returns:
|
| 87 |
-
rttm_tup (tuple):
|
| 88 |
-
Tuple containing lists of start time, end time and speaker labels.
|
| 89 |
-
|
| 90 |
-
"""
|
| 91 |
-
stt_list, end_list, speaker_list, pairwise_infer_spks = [], [], [], []
|
| 92 |
-
if target_spks:
|
| 93 |
-
inv_map = {v: k for k, v in mapping_dict.items()}
|
| 94 |
-
for spk_idx in target_spks:
|
| 95 |
-
spk_str = f'speaker_{spk_idx}'
|
| 96 |
-
if spk_str in inv_map:
|
| 97 |
-
pairwise_infer_spks.append(inv_map[spk_str])
|
| 98 |
-
for rttm_line in rttm_lines:
|
| 99 |
-
start, end, speaker = convert_rttm_line(rttm_line)
|
| 100 |
-
if target_spks is None or speaker in pairwise_infer_spks:
|
| 101 |
-
end_list.append(end)
|
| 102 |
-
stt_list.append(start)
|
| 103 |
-
speaker_list.append(speaker)
|
| 104 |
-
rttm_tup = (stt_list, end_list, speaker_list)
|
| 105 |
-
return rttm_tup
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
def assign_frame_level_spk_vector(rttm_timestamps, round_digits, frame_per_sec, target_spks, min_spks=2):
|
| 109 |
-
"""
|
| 110 |
-
Create a multi-dimensional vector sequence containing speaker timestamp information in RTTM.
|
| 111 |
-
The unit-length is the frame shift length of the acoustic feature. The feature-level annotations
|
| 112 |
-
`fr_level_target` will later be converted to base-segment level diarization label.
|
| 113 |
-
|
| 114 |
-
Args:
|
| 115 |
-
rttm_timestamps (list):
|
| 116 |
-
List containing start and end time for each speaker segment label.
|
| 117 |
-
`stt_list`, `end_list` and `speaker_list` are contained.
|
| 118 |
-
frame_per_sec (int):
|
| 119 |
-
Number of feature frames per second. This quantity is determined by
|
| 120 |
-
`window_stride` variable in preprocessing module.
|
| 121 |
-
target_spks (tuple):
|
| 122 |
-
Speaker indices that are generated from combinations.
|
| 123 |
-
If there are only one or two speakers,
|
| 124 |
-
only a single `target_spks` variable is generated.
|
| 125 |
-
|
| 126 |
-
Returns:
|
| 127 |
-
fr_level_target (torch.tensor):
|
| 128 |
-
Tensor containing label for each feature level frame.
|
| 129 |
-
"""
|
| 130 |
-
stt_list, end_list, speaker_list = rttm_timestamps
|
| 131 |
-
if len(speaker_list) == 0:
|
| 132 |
-
return None
|
| 133 |
-
else:
|
| 134 |
-
sorted_speakers = sorted(list(set(speaker_list)))
|
| 135 |
-
total_fr_len = int(max(end_list) * (10**round_digits))
|
| 136 |
-
spk_num = max(len(sorted_speakers), min_spks)
|
| 137 |
-
speaker_mapping_dict = {rttm_key: x_int for x_int, rttm_key in enumerate(sorted_speakers)}
|
| 138 |
-
fr_level_target = torch.zeros(total_fr_len, spk_num)
|
| 139 |
-
|
| 140 |
-
# If RTTM is not provided, then there is no speaker mapping dict in target_spks.
|
| 141 |
-
# Thus, return a zero-filled tensor as a placeholder.
|
| 142 |
-
for count, (stt, end, spk_rttm_key) in enumerate(zip(stt_list, end_list, speaker_list)):
|
| 143 |
-
stt, end = round(stt, round_digits), round(end, round_digits)
|
| 144 |
-
spk = speaker_mapping_dict[spk_rttm_key]
|
| 145 |
-
stt_fr, end_fr = int(round(stt, 2) * frame_per_sec), int(round(end, round_digits) * frame_per_sec)
|
| 146 |
-
fr_level_target[stt_fr:end_fr, spk] = 1
|
| 147 |
-
return fr_level_target
|
| 148 |
-
|
| 149 |
-
|
| 150 |
def get_subsegments_to_timestamps(
|
| 151 |
subsegments: List[Tuple[float, float]], feat_per_sec: int = 100, max_end_ts: float = None, decimals=2
|
| 152 |
):
|
|
@@ -281,736 +159,6 @@ def get_frame_targets_from_rttm(
|
|
| 281 |
return feat_level_target
|
| 282 |
|
| 283 |
|
| 284 |
-
class _AudioMSDDTrainDataset(Dataset):
|
| 285 |
-
"""
|
| 286 |
-
Dataset class that loads a json file containing paths to audio files,
|
| 287 |
-
RTTM files and number of speakers. This Dataset class is designed for
|
| 288 |
-
training or fine-tuning speaker embedding extractor and diarization decoder
|
| 289 |
-
at the same time.
|
| 290 |
-
|
| 291 |
-
Example:
|
| 292 |
-
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
|
| 293 |
-
"rttm_filepath": "/path/to/diar_label_0.rttm}
|
| 294 |
-
...
|
| 295 |
-
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
|
| 296 |
-
"rttm_filepath": "/path/to/diar_label_n.rttm}
|
| 297 |
-
|
| 298 |
-
Args:
|
| 299 |
-
manifest_filepath (str):
|
| 300 |
-
Path to input manifest json files.
|
| 301 |
-
multiscale_args_dict (dict):
|
| 302 |
-
Dictionary containing the parameters for multiscale segmentation and clustering.
|
| 303 |
-
emb_dir (str):
|
| 304 |
-
Path to a temporary folder where segmentation information for embedding extraction is saved.
|
| 305 |
-
soft_label_thres (float):
|
| 306 |
-
Threshold that determines the label of each segment based on RTTM file information.
|
| 307 |
-
featurizer:
|
| 308 |
-
Featurizer instance for generating features from the raw waveform.
|
| 309 |
-
window_stride (float):
|
| 310 |
-
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
|
| 311 |
-
emb_batch_size (int):
|
| 312 |
-
Number of embedding vectors that are trained with attached computational graphs.
|
| 313 |
-
pairwise_infer (bool):
|
| 314 |
-
This variable should be True if dataloader is created for an inference task.
|
| 315 |
-
random_flip (bool):
|
| 316 |
-
If True, the two labels and input signals are randomly flipped per every epoch while training.
|
| 317 |
-
"""
|
| 318 |
-
|
| 319 |
-
@property
|
| 320 |
-
def output_types(self) -> Optional[Dict[str, NeuralType]]:
|
| 321 |
-
"""Returns definitions of module output ports."""
|
| 322 |
-
output_types = {
|
| 323 |
-
"features": NeuralType(('B', 'T'), AudioSignal()),
|
| 324 |
-
"feature_length": NeuralType(('B'), LengthsType()),
|
| 325 |
-
"ms_seg_timestamps": NeuralType(('B', 'C', 'T', 'D'), LengthsType()),
|
| 326 |
-
"ms_seg_counts": NeuralType(('B', 'C'), LengthsType()),
|
| 327 |
-
"clus_label_index": NeuralType(('B', 'T'), LengthsType()),
|
| 328 |
-
"scale_mapping": NeuralType(('B', 'C', 'T'), LengthsType()),
|
| 329 |
-
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
|
| 330 |
-
}
|
| 331 |
-
|
| 332 |
-
return output_types
|
| 333 |
-
|
| 334 |
-
def __init__(
|
| 335 |
-
self,
|
| 336 |
-
*,
|
| 337 |
-
manifest_filepath: str,
|
| 338 |
-
multiscale_args_dict: str,
|
| 339 |
-
emb_dir: str,
|
| 340 |
-
soft_label_thres: float,
|
| 341 |
-
featurizer,
|
| 342 |
-
window_stride,
|
| 343 |
-
emb_batch_size,
|
| 344 |
-
pairwise_infer: bool,
|
| 345 |
-
random_flip: bool = True,
|
| 346 |
-
global_rank: int = 0,
|
| 347 |
-
):
|
| 348 |
-
super().__init__()
|
| 349 |
-
self.collection = DiarizationSpeechLabel(
|
| 350 |
-
manifests_files=manifest_filepath.split(','),
|
| 351 |
-
emb_dict=None,
|
| 352 |
-
clus_label_dict=None,
|
| 353 |
-
pairwise_infer=pairwise_infer,
|
| 354 |
-
)
|
| 355 |
-
self.featurizer = featurizer
|
| 356 |
-
self.multiscale_args_dict = multiscale_args_dict
|
| 357 |
-
self.emb_dir = emb_dir
|
| 358 |
-
self.round_digits = 2
|
| 359 |
-
self.decim = 10**self.round_digits
|
| 360 |
-
self.soft_label_thres = soft_label_thres
|
| 361 |
-
self.pairwise_infer = pairwise_infer
|
| 362 |
-
self.max_spks = 2
|
| 363 |
-
self.frame_per_sec = int(1 / window_stride)
|
| 364 |
-
self.emb_batch_size = emb_batch_size
|
| 365 |
-
self.random_flip = random_flip
|
| 366 |
-
self.global_rank = global_rank
|
| 367 |
-
self.manifest_filepath = manifest_filepath
|
| 368 |
-
self.multiscale_timestamp_dict = prepare_split_data(
|
| 369 |
-
self.manifest_filepath,
|
| 370 |
-
self.emb_dir,
|
| 371 |
-
self.multiscale_args_dict,
|
| 372 |
-
self.global_rank,
|
| 373 |
-
)
|
| 374 |
-
|
| 375 |
-
def __len__(self):
|
| 376 |
-
return len(self.collection)
|
| 377 |
-
|
| 378 |
-
def assign_labels_to_longer_segs(self, uniq_id, base_scale_clus_label):
|
| 379 |
-
"""
|
| 380 |
-
Assign the generated speaker labels from the base scale (the finest scale) to the longer scales.
|
| 381 |
-
This process is needed to get the cluster labels for each scale. The cluster labels are needed to
|
| 382 |
-
calculate the cluster-average speaker embedding for each scale.
|
| 383 |
-
|
| 384 |
-
Args:
|
| 385 |
-
uniq_id (str):
|
| 386 |
-
Unique sample ID for training.
|
| 387 |
-
base_scale_clus_label (torch.tensor):
|
| 388 |
-
Tensor variable containing the speaker labels for the base-scale segments.
|
| 389 |
-
|
| 390 |
-
Returns:
|
| 391 |
-
per_scale_clus_label (torch.tensor):
|
| 392 |
-
Tensor variable containing the speaker labels for each segment in each scale.
|
| 393 |
-
Note that the total length of the speaker label sequence differs over scale since
|
| 394 |
-
each scale has a different number of segments for the same session.
|
| 395 |
-
|
| 396 |
-
scale_mapping (torch.tensor):
|
| 397 |
-
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
|
| 398 |
-
multiscale embeddings to form an input matrix for the MSDD model.
|
| 399 |
-
"""
|
| 400 |
-
per_scale_clus_label = []
|
| 401 |
-
self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])
|
| 402 |
-
uniq_scale_mapping = get_scale_mapping_list(self.multiscale_timestamp_dict[uniq_id])
|
| 403 |
-
for scale_index in range(self.scale_n):
|
| 404 |
-
new_clus_label = []
|
| 405 |
-
scale_seq_len = len(self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_index]["time_stamps"])
|
| 406 |
-
for seg_idx in range(scale_seq_len):
|
| 407 |
-
if seg_idx in uniq_scale_mapping[scale_index]:
|
| 408 |
-
seg_clus_label = mode(base_scale_clus_label[uniq_scale_mapping[scale_index] == seg_idx])
|
| 409 |
-
else:
|
| 410 |
-
seg_clus_label = 0 if len(new_clus_label) == 0 else new_clus_label[-1]
|
| 411 |
-
new_clus_label.append(seg_clus_label)
|
| 412 |
-
per_scale_clus_label.extend(new_clus_label)
|
| 413 |
-
per_scale_clus_label = torch.tensor(per_scale_clus_label)
|
| 414 |
-
return per_scale_clus_label, uniq_scale_mapping
|
| 415 |
-
|
| 416 |
-
def get_diar_target_labels(self, uniq_id, sample, fr_level_target):
|
| 417 |
-
"""
|
| 418 |
-
Convert frame-level diarization target variable into segment-level target variable.
|
| 419 |
-
Since the granularity is reduced from frame level (10ms) to segment level (100ms~500ms),
|
| 420 |
-
we need a threshold value, `soft_label_thres`, which determines the label of each segment
|
| 421 |
-
based on the overlap between a segment range (start and end time) and the frame-level target variable.
|
| 422 |
-
|
| 423 |
-
Args:
|
| 424 |
-
uniq_id (str):
|
| 425 |
-
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
|
| 426 |
-
sample:
|
| 427 |
-
`DiarizationSpeechLabel` instance containing sample information such as
|
| 428 |
-
audio filepath and RTTM filepath.
|
| 429 |
-
fr_level_target (torch.tensor):
|
| 430 |
-
Tensor containing label for each feature-level frame.
|
| 431 |
-
|
| 432 |
-
Returns:
|
| 433 |
-
seg_target (torch.tensor):
|
| 434 |
-
Tensor containing binary speaker labels for base-scale segments.
|
| 435 |
-
base_clus_label (torch.tensor):
|
| 436 |
-
Representative speaker label for each segment. This variable only has one speaker label
|
| 437 |
-
for each base-scale segment.
|
| 438 |
-
-1 means that there is no corresponding speaker in the target_spks tuple.
|
| 439 |
-
"""
|
| 440 |
-
seg_target_list, base_clus_label = [], []
|
| 441 |
-
self.scale_n = len(self.multiscale_timestamp_dict[uniq_id]['scale_dict'])
|
| 442 |
-
subseg_time_stamp_list = self.multiscale_timestamp_dict[uniq_id]["scale_dict"][self.scale_n - 1]["time_stamps"]
|
| 443 |
-
for seg_stt, seg_end in subseg_time_stamp_list:
|
| 444 |
-
seg_stt_fr, seg_end_fr = int(seg_stt * self.frame_per_sec), int(seg_end * self.frame_per_sec)
|
| 445 |
-
soft_label_vec_sess = torch.sum(fr_level_target[seg_stt_fr:seg_end_fr, :], axis=0) / (
|
| 446 |
-
seg_end_fr - seg_stt_fr
|
| 447 |
-
)
|
| 448 |
-
label_int_sess = torch.argmax(soft_label_vec_sess)
|
| 449 |
-
soft_label_vec = soft_label_vec_sess.unsqueeze(0)[:, sample.target_spks].squeeze()
|
| 450 |
-
if label_int_sess in sample.target_spks and torch.sum(soft_label_vec_sess) > 0:
|
| 451 |
-
label_int = sample.target_spks.index(label_int_sess)
|
| 452 |
-
else:
|
| 453 |
-
label_int = -1
|
| 454 |
-
label_vec = (soft_label_vec > self.soft_label_thres).float()
|
| 455 |
-
seg_target_list.append(label_vec.detach())
|
| 456 |
-
base_clus_label.append(label_int)
|
| 457 |
-
seg_target = torch.stack(seg_target_list)
|
| 458 |
-
base_clus_label = torch.tensor(base_clus_label)
|
| 459 |
-
return seg_target, base_clus_label
|
| 460 |
-
|
| 461 |
-
def parse_rttm_for_ms_targets(self, sample):
|
| 462 |
-
"""
|
| 463 |
-
Generate target tensor variable by extracting groundtruth diarization labels from an RTTM file.
|
| 464 |
-
This function converts (start, end, speaker_id) format into base-scale (the finest scale) segment level
|
| 465 |
-
diarization label in a matrix form.
|
| 466 |
-
|
| 467 |
-
Example of seg_target:
|
| 468 |
-
[[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]]
|
| 469 |
-
|
| 470 |
-
Args:
|
| 471 |
-
sample:
|
| 472 |
-
`DiarizationSpeechLabel` instance containing sample information such as
|
| 473 |
-
audio filepath and RTTM filepath.
|
| 474 |
-
target_spks (tuple):
|
| 475 |
-
Speaker indices that are generated from combinations. If there are only one or two speakers,
|
| 476 |
-
only a single target_spks tuple is generated.
|
| 477 |
-
|
| 478 |
-
Returns:
|
| 479 |
-
clus_label_index (torch.tensor):
|
| 480 |
-
Groundtruth clustering label (cluster index for each segment) from RTTM files for training purpose.
|
| 481 |
-
seg_target (torch.tensor):
|
| 482 |
-
Tensor variable containing hard-labels of speaker activity in each base-scale segment.
|
| 483 |
-
scale_mapping (torch.tensor):
|
| 484 |
-
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
|
| 485 |
-
multiscale embeddings to form an input matrix for the MSDD model.
|
| 486 |
-
|
| 487 |
-
"""
|
| 488 |
-
with open(sample.rttm_file, 'r') as file:
|
| 489 |
-
rttm_lines = file.readlines()
|
| 490 |
-
uniq_id = self.get_uniq_id_with_range(sample)
|
| 491 |
-
rttm_timestamps = extract_seg_info_from_rttm(rttm_lines)
|
| 492 |
-
fr_level_target = assign_frame_level_spk_vector(
|
| 493 |
-
rttm_timestamps, self.round_digits, self.frame_per_sec, target_spks=sample.target_spks
|
| 494 |
-
)
|
| 495 |
-
seg_target, base_clus_label = self.get_diar_target_labels(uniq_id, sample, fr_level_target)
|
| 496 |
-
clus_label_index, scale_mapping = self.assign_labels_to_longer_segs(uniq_id, base_clus_label)
|
| 497 |
-
return clus_label_index, seg_target, scale_mapping
|
| 498 |
-
|
| 499 |
-
def get_uniq_id_with_range(self, sample, deci=3):
|
| 500 |
-
"""
|
| 501 |
-
Generate unique training sample ID from unique file ID, offset and duration. The start-end time added
|
| 502 |
-
unique ID is required for identifying the sample since multiple short audio samples are generated from a single
|
| 503 |
-
audio file. The start time and end time of the audio stream uses millisecond units if `deci=3`.
|
| 504 |
-
|
| 505 |
-
Args:
|
| 506 |
-
sample:
|
| 507 |
-
`DiarizationSpeechLabel` instance from collections.
|
| 508 |
-
|
| 509 |
-
Returns:
|
| 510 |
-
uniq_id (str):
|
| 511 |
-
Unique sample ID which includes start and end time of the audio stream.
|
| 512 |
-
Example: abc1001_3122_6458
|
| 513 |
-
|
| 514 |
-
"""
|
| 515 |
-
bare_uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]
|
| 516 |
-
offset = str(int(round(sample.offset, deci) * pow(10, deci)))
|
| 517 |
-
endtime = str(int(round(sample.offset + sample.duration, deci) * pow(10, deci)))
|
| 518 |
-
uniq_id = f"{bare_uniq_id}_{offset}_{endtime}"
|
| 519 |
-
return uniq_id
|
| 520 |
-
|
| 521 |
-
def get_ms_seg_timestamps(self, sample):
|
| 522 |
-
"""
|
| 523 |
-
Get start and end time of each diarization frame.
|
| 524 |
-
|
| 525 |
-
Args:
|
| 526 |
-
sample:
|
| 527 |
-
`DiarizationSpeechLabel` instance from preprocessing.collections
|
| 528 |
-
Returns:
|
| 529 |
-
ms_seg_timestamps (torch.tensor):
|
| 530 |
-
Tensor containing timestamps for each frame.
|
| 531 |
-
ms_seg_counts (torch.tensor):
|
| 532 |
-
Number of segments for each scale. This information is used for reshaping embedding batch
|
| 533 |
-
during forward propagation.
|
| 534 |
-
"""
|
| 535 |
-
uniq_id = self.get_uniq_id_with_range(sample)
|
| 536 |
-
ms_seg_timestamps_list = []
|
| 537 |
-
max_seq_len = len(self.multiscale_timestamp_dict[uniq_id]["scale_dict"][self.scale_n - 1]["time_stamps"])
|
| 538 |
-
ms_seg_counts = [0 for _ in range(self.scale_n)]
|
| 539 |
-
for scale_idx in range(self.scale_n):
|
| 540 |
-
scale_ts_list = []
|
| 541 |
-
for k, (seg_stt, seg_end) in enumerate(
|
| 542 |
-
self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_idx]["time_stamps"]
|
| 543 |
-
):
|
| 544 |
-
stt, end = (
|
| 545 |
-
int((seg_stt - sample.offset) * self.frame_per_sec),
|
| 546 |
-
int((seg_end - sample.offset) * self.frame_per_sec),
|
| 547 |
-
)
|
| 548 |
-
scale_ts_list.append(torch.tensor([stt, end]).detach())
|
| 549 |
-
ms_seg_counts[scale_idx] = len(
|
| 550 |
-
self.multiscale_timestamp_dict[uniq_id]["scale_dict"][scale_idx]["time_stamps"]
|
| 551 |
-
)
|
| 552 |
-
scale_ts = torch.stack(scale_ts_list)
|
| 553 |
-
scale_ts_padded = torch.cat([scale_ts, torch.zeros(max_seq_len - len(scale_ts_list), 2)], dim=0)
|
| 554 |
-
ms_seg_timestamps_list.append(scale_ts_padded.detach())
|
| 555 |
-
ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)
|
| 556 |
-
ms_seg_counts = torch.tensor(ms_seg_counts)
|
| 557 |
-
return ms_seg_timestamps, ms_seg_counts
|
| 558 |
-
|
| 559 |
-
def __getitem__(self, index):
|
| 560 |
-
sample = self.collection[index]
|
| 561 |
-
if sample.offset is None:
|
| 562 |
-
sample.offset = 0
|
| 563 |
-
clus_label_index, targets, scale_mapping = self.parse_rttm_for_ms_targets(sample)
|
| 564 |
-
features = self.featurizer.process(sample.audio_file, offset=sample.offset, duration=sample.duration)
|
| 565 |
-
feature_length = torch.tensor(features.shape[0]).long()
|
| 566 |
-
ms_seg_timestamps, ms_seg_counts = self.get_ms_seg_timestamps(sample)
|
| 567 |
-
if self.random_flip:
|
| 568 |
-
torch.manual_seed(index)
|
| 569 |
-
flip = torch.cat([torch.randperm(self.max_spks), torch.tensor(-1).unsqueeze(0)])
|
| 570 |
-
clus_label_index, targets = flip[clus_label_index], targets[:, flip[: self.max_spks]]
|
| 571 |
-
return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
class _AudioMSDDInferDataset(Dataset):
|
| 575 |
-
"""
|
| 576 |
-
Dataset class that loads a json file containing paths to audio files,
|
| 577 |
-
RTTM files and number of speakers. This Dataset class is built for diarization inference and
|
| 578 |
-
evaluation. Speaker embedding sequences, segment timestamps, cluster-average speaker embeddings
|
| 579 |
-
are loaded from memory and fed into the dataloader.
|
| 580 |
-
|
| 581 |
-
Example:
|
| 582 |
-
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
|
| 583 |
-
"rttm_filepath": "/path/to/diar_label_0.rttm}
|
| 584 |
-
...
|
| 585 |
-
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
|
| 586 |
-
"rttm_filepath": "/path/to/diar_label_n.rttm}
|
| 587 |
-
|
| 588 |
-
Args:
|
| 589 |
-
manifest_filepath (str):
|
| 590 |
-
Path to input manifest json files.
|
| 591 |
-
emb_dict (dict):
|
| 592 |
-
Dictionary containing cluster-average embeddings and speaker mapping information.
|
| 593 |
-
emb_seq (dict):
|
| 594 |
-
Dictionary containing multiscale speaker embedding sequence,
|
| 595 |
-
scale mapping and corresponding segment timestamps.
|
| 596 |
-
clus_label_dict (dict):
|
| 597 |
-
Subsegment-level (from base-scale) speaker labels from clustering results.
|
| 598 |
-
soft_label_thres (float):
|
| 599 |
-
A threshold that determines the label of each segment based on RTTM file information.
|
| 600 |
-
featurizer:
|
| 601 |
-
Featurizer instance for generating features from raw waveform.
|
| 602 |
-
seq_eval_mode (bool):
|
| 603 |
-
If True, F1 score will be calculated for each speaker pair during inference mode.
|
| 604 |
-
window_stride (float):
|
| 605 |
-
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
|
| 606 |
-
use_single_scale_clus (bool):
|
| 607 |
-
Use only one scale for clustering instead of using multiple scales of embeddings for clustering.
|
| 608 |
-
pairwise_infer (bool):
|
| 609 |
-
This variable should be True if dataloader is created for an inference task.
|
| 610 |
-
"""
|
| 611 |
-
|
| 612 |
-
@property
|
| 613 |
-
def output_types(self) -> Optional[Dict[str, NeuralType]]:
|
| 614 |
-
"""Returns definitions of module output ports."""
|
| 615 |
-
output_types = OrderedDict(
|
| 616 |
-
{
|
| 617 |
-
"ms_emb_seq": NeuralType(('B', 'T', 'C', 'D'), SpectrogramType()),
|
| 618 |
-
"length": NeuralType(tuple('B'), LengthsType()),
|
| 619 |
-
"ms_avg_embs": NeuralType(('B', 'C', 'D', 'C'), EncodedRepresentation()),
|
| 620 |
-
"targets": NeuralType(('B', 'T', 'C'), ProbsType()),
|
| 621 |
-
}
|
| 622 |
-
)
|
| 623 |
-
return output_types
|
| 624 |
-
|
| 625 |
-
def __init__(
|
| 626 |
-
self,
|
| 627 |
-
*,
|
| 628 |
-
manifest_filepath: str,
|
| 629 |
-
emb_dict: Dict,
|
| 630 |
-
emb_seq: Dict,
|
| 631 |
-
clus_label_dict: Dict,
|
| 632 |
-
soft_label_thres: float,
|
| 633 |
-
seq_eval_mode: bool,
|
| 634 |
-
window_stride: float,
|
| 635 |
-
use_single_scale_clus: bool,
|
| 636 |
-
pairwise_infer: bool,
|
| 637 |
-
):
|
| 638 |
-
super().__init__()
|
| 639 |
-
self.collection = DiarizationSpeechLabel(
|
| 640 |
-
manifests_files=manifest_filepath.split(','),
|
| 641 |
-
emb_dict=emb_dict,
|
| 642 |
-
clus_label_dict=clus_label_dict,
|
| 643 |
-
seq_eval_mode=seq_eval_mode,
|
| 644 |
-
pairwise_infer=pairwise_infer,
|
| 645 |
-
)
|
| 646 |
-
self.emb_dict = emb_dict
|
| 647 |
-
self.emb_seq = emb_seq
|
| 648 |
-
self.clus_label_dict = clus_label_dict
|
| 649 |
-
self.round_digits = 2
|
| 650 |
-
self.decim = 10**self.round_digits
|
| 651 |
-
self.frame_per_sec = int(1 / window_stride)
|
| 652 |
-
self.soft_label_thres = soft_label_thres
|
| 653 |
-
self.pairwise_infer = pairwise_infer
|
| 654 |
-
self.max_spks = 2
|
| 655 |
-
self.use_single_scale_clus = use_single_scale_clus
|
| 656 |
-
self.seq_eval_mode = seq_eval_mode
|
| 657 |
-
|
| 658 |
-
def __len__(self):
|
| 659 |
-
return len(self.collection)
|
| 660 |
-
|
| 661 |
-
def parse_rttm_multiscale(self, sample):
|
| 662 |
-
"""
|
| 663 |
-
Generate target tensor variable by extracting groundtruth diarization labels from an RTTM file.
|
| 664 |
-
This function is only used when ``self.seq_eval_mode=True`` and RTTM files are provided. This function converts
|
| 665 |
-
(start, end, speaker_id) format into base-scale (the finest scale) segment level diarization label in a matrix
|
| 666 |
-
form to create target matrix.
|
| 667 |
-
|
| 668 |
-
Args:
|
| 669 |
-
sample:
|
| 670 |
-
DiarizationSpeechLabel instance containing sample information such as audio filepath and RTTM filepath.
|
| 671 |
-
target_spks (tuple):
|
| 672 |
-
Two Indices of targeted speakers for evaluation.
|
| 673 |
-
Example of target_spks: (2, 3)
|
| 674 |
-
Returns:
|
| 675 |
-
seg_target (torch.tensor):
|
| 676 |
-
Tensor variable containing hard-labels of speaker activity in each base-scale segment.
|
| 677 |
-
"""
|
| 678 |
-
if sample.rttm_file is None:
|
| 679 |
-
raise ValueError(f"RTTM file is not provided for this sample {sample}")
|
| 680 |
-
rttm_lines = open(sample.rttm_file).readlines()
|
| 681 |
-
uniq_id = os.path.splitext(os.path.basename(sample.rttm_file))[0]
|
| 682 |
-
mapping_dict = self.emb_dict[max(self.emb_dict.keys())][uniq_id]['mapping']
|
| 683 |
-
rttm_timestamps = extract_seg_info_from_rttm(rttm_lines, mapping_dict, sample.target_spks)
|
| 684 |
-
fr_level_target = assign_frame_level_spk_vector(
|
| 685 |
-
rttm_timestamps, self.round_digits, self.frame_per_sec, sample.target_spks
|
| 686 |
-
)
|
| 687 |
-
seg_target = self.get_diar_target_labels_from_fr_target(uniq_id, fr_level_target)
|
| 688 |
-
return seg_target
|
| 689 |
-
|
| 690 |
-
def get_diar_target_labels_from_fr_target(self, uniq_id: str, fr_level_target: torch.Tensor) -> torch.Tensor:
|
| 691 |
-
"""
|
| 692 |
-
Generate base-scale level binary diarization label from frame-level target matrix. For the given frame-level
|
| 693 |
-
speaker target matrix fr_level_target, we count the number of frames that belong to each speaker and calculate
|
| 694 |
-
ratios for each speaker into the `soft_label_vec` variable. Finally, `soft_label_vec` variable is compared
|
| 695 |
-
with `soft_label_thres` to determine whether a label vector should contain 0 or 1 for each speaker bin.
|
| 696 |
-
Note that seg_target variable has dimension of (number of base-scale segments x 2) dimension.
|
| 697 |
-
|
| 698 |
-
Example of seg_target:
|
| 699 |
-
[[0., 1.], [0., 1.], [1., 1.], [1., 0.], [1., 0.], ..., [0., 1.]]
|
| 700 |
-
|
| 701 |
-
Args:
|
| 702 |
-
uniq_id (str):
|
| 703 |
-
Unique file ID that refers to an input audio file and corresponding RTTM (Annotation) file.
|
| 704 |
-
fr_level_target (torch.tensor):
|
| 705 |
-
frame-level binary speaker annotation (1: exist 0: non-exist) generated from RTTM file.
|
| 706 |
-
|
| 707 |
-
Returns:
|
| 708 |
-
seg_target (torch.tensor):
|
| 709 |
-
Tensor variable containing binary hard-labels of speaker activity in each base-scale segment.
|
| 710 |
-
|
| 711 |
-
"""
|
| 712 |
-
if fr_level_target is None:
|
| 713 |
-
return None
|
| 714 |
-
else:
|
| 715 |
-
seg_target_list = []
|
| 716 |
-
for seg_stt, seg_end, label_int in self.clus_label_dict[uniq_id]:
|
| 717 |
-
seg_stt_fr, seg_end_fr = int(seg_stt * self.frame_per_sec), int(seg_end * self.frame_per_sec)
|
| 718 |
-
soft_label_vec = torch.sum(fr_level_target[seg_stt_fr:seg_end_fr, :], axis=0) / (
|
| 719 |
-
seg_end_fr - seg_stt_fr
|
| 720 |
-
)
|
| 721 |
-
label_vec = (soft_label_vec > self.soft_label_thres).int()
|
| 722 |
-
seg_target_list.append(label_vec)
|
| 723 |
-
seg_target = torch.stack(seg_target_list)
|
| 724 |
-
return seg_target
|
| 725 |
-
|
| 726 |
-
def __getitem__(self, index):
|
| 727 |
-
sample = self.collection[index]
|
| 728 |
-
if sample.offset is None:
|
| 729 |
-
sample.offset = 0
|
| 730 |
-
|
| 731 |
-
uniq_id = os.path.splitext(os.path.basename(sample.audio_file))[0]
|
| 732 |
-
scale_n = len(self.emb_dict.keys())
|
| 733 |
-
_avg_embs = torch.stack([self.emb_dict[scale_index][uniq_id]['avg_embs'] for scale_index in range(scale_n)])
|
| 734 |
-
|
| 735 |
-
if self.pairwise_infer:
|
| 736 |
-
avg_embs = _avg_embs[:, :, self.collection[index].target_spks]
|
| 737 |
-
else:
|
| 738 |
-
avg_embs = _avg_embs
|
| 739 |
-
|
| 740 |
-
if avg_embs.shape[2] > self.max_spks:
|
| 741 |
-
raise ValueError(
|
| 742 |
-
f" avg_embs.shape[2] {avg_embs.shape[2]} should be less than or equal to "
|
| 743 |
-
f"self.max_num_speakers {self.max_spks}"
|
| 744 |
-
)
|
| 745 |
-
|
| 746 |
-
feats = []
|
| 747 |
-
for scale_index in range(scale_n):
|
| 748 |
-
repeat_mat = self.emb_seq["session_scale_mapping"][uniq_id][scale_index]
|
| 749 |
-
feats.append(self.emb_seq[scale_index][uniq_id][repeat_mat, :])
|
| 750 |
-
feats_out = torch.stack(feats).permute(1, 0, 2)
|
| 751 |
-
feats_len = feats_out.shape[0]
|
| 752 |
-
|
| 753 |
-
if self.seq_eval_mode:
|
| 754 |
-
targets = self.parse_rttm_multiscale(sample)
|
| 755 |
-
else:
|
| 756 |
-
targets = torch.zeros(feats_len, 2).float()
|
| 757 |
-
|
| 758 |
-
return feats_out, feats_len, targets, avg_embs
|
| 759 |
-
|
| 760 |
-
|
| 761 |
-
def _msdd_train_collate_fn(self, batch):
|
| 762 |
-
"""
|
| 763 |
-
Collate batch of variables that are needed for raw waveform to diarization label training.
|
| 764 |
-
The following variables are included in training/validation batch:
|
| 765 |
-
|
| 766 |
-
Args:
|
| 767 |
-
batch (tuple):
|
| 768 |
-
Batch tuple containing the variables for the diarization training.
|
| 769 |
-
Returns:
|
| 770 |
-
features (torch.tensor):
|
| 771 |
-
Raw waveform samples (time series) loaded from the audio_filepath in the input manifest file.
|
| 772 |
-
feature lengths (time series sample length):
|
| 773 |
-
A list of lengths of the raw waveform samples.
|
| 774 |
-
ms_seg_timestamps (torch.tensor):
|
| 775 |
-
Matrix containing the start time and end time (timestamps) for each segment and each scale.
|
| 776 |
-
ms_seg_timestamps is needed for extracting acoustic features from raw waveforms.
|
| 777 |
-
ms_seg_counts (torch.tensor):
|
| 778 |
-
Matrix containing The number of segments for each scale. ms_seg_counts is necessary for reshaping
|
| 779 |
-
the input matrix for the MSDD model.
|
| 780 |
-
clus_label_index (torch.tensor):
|
| 781 |
-
Groundtruth Clustering label (cluster index for each segment) from RTTM files for training purpose.
|
| 782 |
-
clus_label_index is necessary for calculating cluster-average embedding.
|
| 783 |
-
scale_mapping (torch.tensor):
|
| 784 |
-
Matrix containing the segment indices of each scale. scale_mapping is necessary for reshaping the
|
| 785 |
-
multiscale embeddings to form an input matrix for the MSDD model.
|
| 786 |
-
targets (torch.tensor):
|
| 787 |
-
Groundtruth Speaker label for the given input embedding sequence.
|
| 788 |
-
"""
|
| 789 |
-
packed_batch = list(zip(*batch))
|
| 790 |
-
features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets = packed_batch
|
| 791 |
-
features_list, feature_length_list = [], []
|
| 792 |
-
ms_seg_timestamps_list, ms_seg_counts_list, scale_clus_label_list, scale_mapping_list, targets_list = (
|
| 793 |
-
[],
|
| 794 |
-
[],
|
| 795 |
-
[],
|
| 796 |
-
[],
|
| 797 |
-
[],
|
| 798 |
-
)
|
| 799 |
-
|
| 800 |
-
max_raw_feat_len = max([x.shape[0] for x in features])
|
| 801 |
-
max_target_len = max([x.shape[0] for x in targets])
|
| 802 |
-
max_total_seg_len = max([x.shape[0] for x in clus_label_index])
|
| 803 |
-
|
| 804 |
-
for feat, feat_len, ms_seg_ts, ms_seg_ct, scale_clus, scl_map, tgt in batch:
|
| 805 |
-
seq_len = tgt.shape[0]
|
| 806 |
-
pad_feat = (0, max_raw_feat_len - feat_len)
|
| 807 |
-
pad_tgt = (0, 0, 0, max_target_len - seq_len)
|
| 808 |
-
pad_sm = (0, max_target_len - seq_len)
|
| 809 |
-
pad_ts = (0, 0, 0, max_target_len - seq_len)
|
| 810 |
-
pad_sc = (0, max_total_seg_len - scale_clus.shape[0])
|
| 811 |
-
padded_feat = torch.nn.functional.pad(feat, pad_feat)
|
| 812 |
-
padded_tgt = torch.nn.functional.pad(tgt, pad_tgt)
|
| 813 |
-
padded_sm = torch.nn.functional.pad(scl_map, pad_sm)
|
| 814 |
-
padded_ms_seg_ts = torch.nn.functional.pad(ms_seg_ts, pad_ts)
|
| 815 |
-
padded_scale_clus = torch.nn.functional.pad(scale_clus, pad_sc)
|
| 816 |
-
|
| 817 |
-
features_list.append(padded_feat)
|
| 818 |
-
feature_length_list.append(feat_len.clone().detach())
|
| 819 |
-
ms_seg_timestamps_list.append(padded_ms_seg_ts)
|
| 820 |
-
ms_seg_counts_list.append(ms_seg_ct.clone().detach())
|
| 821 |
-
scale_clus_label_list.append(padded_scale_clus)
|
| 822 |
-
scale_mapping_list.append(padded_sm)
|
| 823 |
-
targets_list.append(padded_tgt)
|
| 824 |
-
|
| 825 |
-
features = torch.stack(features_list)
|
| 826 |
-
feature_length = torch.stack(feature_length_list)
|
| 827 |
-
ms_seg_timestamps = torch.stack(ms_seg_timestamps_list)
|
| 828 |
-
clus_label_index = torch.stack(scale_clus_label_list)
|
| 829 |
-
ms_seg_counts = torch.stack(ms_seg_counts_list)
|
| 830 |
-
scale_mapping = torch.stack(scale_mapping_list)
|
| 831 |
-
targets = torch.stack(targets_list)
|
| 832 |
-
return features, feature_length, ms_seg_timestamps, ms_seg_counts, clus_label_index, scale_mapping, targets
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
def _msdd_infer_collate_fn(self, batch):
|
| 836 |
-
"""
|
| 837 |
-
Collate batch of feats (speaker embeddings), feature lengths, target label sequences
|
| 838 |
-
and cluster-average embeddings.
|
| 839 |
-
|
| 840 |
-
Args:
|
| 841 |
-
batch (tuple):
|
| 842 |
-
Batch tuple containing feats, feats_len, targets and ms_avg_embs.
|
| 843 |
-
Returns:
|
| 844 |
-
feats (torch.tensor):
|
| 845 |
-
Collated speaker embedding with unified length.
|
| 846 |
-
feats_len (torch.tensor):
|
| 847 |
-
The actual length of each embedding sequence without zero padding.
|
| 848 |
-
targets (torch.tensor):
|
| 849 |
-
Groundtruth Speaker label for the given input embedding sequence.
|
| 850 |
-
ms_avg_embs (torch.tensor):
|
| 851 |
-
Cluster-average speaker embedding vectors.
|
| 852 |
-
"""
|
| 853 |
-
|
| 854 |
-
packed_batch = list(zip(*batch))
|
| 855 |
-
feats, feats_len, targets, ms_avg_embs = packed_batch
|
| 856 |
-
feats_list, flen_list, targets_list, ms_avg_embs_list = [], [], [], []
|
| 857 |
-
max_audio_len = max(feats_len)
|
| 858 |
-
max_target_len = max([x.shape[0] for x in targets])
|
| 859 |
-
|
| 860 |
-
for feature, feat_len, target, ivector in batch:
|
| 861 |
-
flen_list.append(feat_len)
|
| 862 |
-
ms_avg_embs_list.append(ivector)
|
| 863 |
-
if feat_len < max_audio_len:
|
| 864 |
-
pad_a = (0, 0, 0, 0, 0, max_audio_len - feat_len)
|
| 865 |
-
pad_t = (0, 0, 0, max_target_len - target.shape[0])
|
| 866 |
-
padded_feature = torch.nn.functional.pad(feature, pad_a)
|
| 867 |
-
padded_target = torch.nn.functional.pad(target, pad_t)
|
| 868 |
-
feats_list.append(padded_feature)
|
| 869 |
-
targets_list.append(padded_target)
|
| 870 |
-
else:
|
| 871 |
-
targets_list.append(target.clone().detach())
|
| 872 |
-
feats_list.append(feature.clone().detach())
|
| 873 |
-
|
| 874 |
-
feats = torch.stack(feats_list)
|
| 875 |
-
feats_len = torch.tensor(flen_list)
|
| 876 |
-
targets = torch.stack(targets_list)
|
| 877 |
-
ms_avg_embs = torch.stack(ms_avg_embs_list)
|
| 878 |
-
return feats, feats_len, targets, ms_avg_embs
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
class AudioToSpeechMSDDTrainDataset(_AudioMSDDTrainDataset):
|
| 882 |
-
"""
|
| 883 |
-
Dataset class that loads a json file containing paths to audio files,
|
| 884 |
-
rttm files and number of speakers. This Dataset class is designed for
|
| 885 |
-
training or fine-tuning speaker embedding extractor and diarization decoder
|
| 886 |
-
at the same time.
|
| 887 |
-
|
| 888 |
-
Example:
|
| 889 |
-
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
|
| 890 |
-
"rttm_filepath": "/path/to/diar_label_0.rttm}
|
| 891 |
-
...
|
| 892 |
-
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
|
| 893 |
-
"rttm_filepath": "/path/to/diar_label_n.rttm}
|
| 894 |
-
|
| 895 |
-
Args:
|
| 896 |
-
manifest_filepath (str):
|
| 897 |
-
Path to input manifest json files.
|
| 898 |
-
multiscale_args_dict (dict):
|
| 899 |
-
Dictionary containing the parameters for multiscale segmentation and clustering.
|
| 900 |
-
emb_dir (str):
|
| 901 |
-
Path to a temporary folder where segmentation information for embedding extraction is saved.
|
| 902 |
-
soft_label_thres (float):
|
| 903 |
-
A threshold that determines the label of each segment based on RTTM file information.
|
| 904 |
-
featurizer:
|
| 905 |
-
Featurizer instance for generating features from the raw waveform.
|
| 906 |
-
window_stride (float):
|
| 907 |
-
Window stride for acoustic feature. This value is used for calculating the numbers of feature-level frames.
|
| 908 |
-
emb_batch_size (int):
|
| 909 |
-
Number of embedding vectors that are trained with attached computational graphs.
|
| 910 |
-
pairwise_infer (bool):
|
| 911 |
-
This variable should be True if dataloader is created for an inference task.
|
| 912 |
-
"""
|
| 913 |
-
|
| 914 |
-
def __init__(
|
| 915 |
-
self,
|
| 916 |
-
*,
|
| 917 |
-
manifest_filepath: str,
|
| 918 |
-
multiscale_args_dict: Dict,
|
| 919 |
-
emb_dir: str,
|
| 920 |
-
soft_label_thres: float,
|
| 921 |
-
featurizer,
|
| 922 |
-
window_stride,
|
| 923 |
-
emb_batch_size,
|
| 924 |
-
pairwise_infer: bool,
|
| 925 |
-
global_rank: int,
|
| 926 |
-
):
|
| 927 |
-
super().__init__(
|
| 928 |
-
manifest_filepath=manifest_filepath,
|
| 929 |
-
multiscale_args_dict=multiscale_args_dict,
|
| 930 |
-
emb_dir=emb_dir,
|
| 931 |
-
soft_label_thres=soft_label_thres,
|
| 932 |
-
featurizer=featurizer,
|
| 933 |
-
window_stride=window_stride,
|
| 934 |
-
emb_batch_size=emb_batch_size,
|
| 935 |
-
pairwise_infer=pairwise_infer,
|
| 936 |
-
global_rank=global_rank,
|
| 937 |
-
)
|
| 938 |
-
|
| 939 |
-
def msdd_train_collate_fn(self, batch):
|
| 940 |
-
"""Collate batch of audio features, feature lengths, target label sequences for training."""
|
| 941 |
-
return _msdd_train_collate_fn(self, batch)
|
| 942 |
-
|
| 943 |
-
|
| 944 |
-
class AudioToSpeechMSDDInferDataset(_AudioMSDDInferDataset):
|
| 945 |
-
"""
|
| 946 |
-
Dataset class that loads a json file containing paths to audio files,
|
| 947 |
-
rttm files and number of speakers. The created labels are used for diarization inference.
|
| 948 |
-
|
| 949 |
-
Example:
|
| 950 |
-
{"audio_filepath": "/path/to/audio_0.wav", "num_speakers": 2,
|
| 951 |
-
"rttm_filepath": "/path/to/diar_label_0.rttm}
|
| 952 |
-
...
|
| 953 |
-
{"audio_filepath": "/path/to/audio_n.wav", "num_speakers": 2,
|
| 954 |
-
"rttm_filepath": "/path/to/diar_label_n.rttm}
|
| 955 |
-
|
| 956 |
-
Args:
|
| 957 |
-
manifest_filepath (str):
|
| 958 |
-
Path to input manifest json files.
|
| 959 |
-
emb_dict (dict):
|
| 960 |
-
Dictionary containing cluster-average embeddings and speaker mapping information.
|
| 961 |
-
emb_seq (dict):
|
| 962 |
-
Dictionary containing multiscale speaker embedding sequence, scale mapping
|
| 963 |
-
and corresponding segment timestamps.
|
| 964 |
-
clus_label_dict (dict):
|
| 965 |
-
Subsegment-level (from base-scale) speaker labels from clustering results.
|
| 966 |
-
soft_label_thres (float):
|
| 967 |
-
Threshold that determines speaker labels of segments depending on the overlap
|
| 968 |
-
with groundtruth speaker timestamps.
|
| 969 |
-
featurizer:
|
| 970 |
-
Featurizer instance for generating features from raw waveform.
|
| 971 |
-
use_single_scale_clus (bool):
|
| 972 |
-
Use only one scale for clustering instead of using multiple scales of embeddings for clustering.
|
| 973 |
-
seq_eval_mode (bool):
|
| 974 |
-
If True, F1 score will be calculated for each speaker pair during inference mode.
|
| 975 |
-
window_stride (float):
|
| 976 |
-
Window stride for acoustic feature. This value is used for calculating the numbers of
|
| 977 |
-
feature-level frames.
|
| 978 |
-
pairwise_infer (bool):
|
| 979 |
-
If True, this Dataset class operates in inference mode. In inference mode, a set of speakers
|
| 980 |
-
in the input audio is split into multiple pairs of speakers and speaker tuples
|
| 981 |
-
(e.g. 3 speakers: [(0,1), (1,2), (0,2)]) and then fed into the MSDD to merge the individual results.
|
| 982 |
-
"""
|
| 983 |
-
|
| 984 |
-
def __init__(
|
| 985 |
-
self,
|
| 986 |
-
*,
|
| 987 |
-
manifest_filepath: str,
|
| 988 |
-
emb_dict: Dict,
|
| 989 |
-
emb_seq: Dict,
|
| 990 |
-
clus_label_dict: Dict,
|
| 991 |
-
soft_label_thres: float,
|
| 992 |
-
use_single_scale_clus: bool,
|
| 993 |
-
seq_eval_mode: bool,
|
| 994 |
-
window_stride: float,
|
| 995 |
-
pairwise_infer: bool,
|
| 996 |
-
):
|
| 997 |
-
super().__init__(
|
| 998 |
-
manifest_filepath=manifest_filepath,
|
| 999 |
-
emb_dict=emb_dict,
|
| 1000 |
-
emb_seq=emb_seq,
|
| 1001 |
-
clus_label_dict=clus_label_dict,
|
| 1002 |
-
soft_label_thres=soft_label_thres,
|
| 1003 |
-
use_single_scale_clus=use_single_scale_clus,
|
| 1004 |
-
window_stride=window_stride,
|
| 1005 |
-
seq_eval_mode=seq_eval_mode,
|
| 1006 |
-
pairwise_infer=pairwise_infer,
|
| 1007 |
-
)
|
| 1008 |
-
|
| 1009 |
-
def msdd_infer_collate_fn(self, batch):
|
| 1010 |
-
"""Collate batch of audio features, feature lengths, target label sequences for inference."""
|
| 1011 |
-
return _msdd_infer_collate_fn(self, batch)
|
| 1012 |
-
|
| 1013 |
-
|
| 1014 |
class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
| 1015 |
"""
|
| 1016 |
Dataset class that loads a json file containing paths to audio files,
|
|
@@ -1058,6 +206,7 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1058 |
session_len_sec: float,
|
| 1059 |
num_spks: int,
|
| 1060 |
featurizer,
|
|
|
|
| 1061 |
window_stride: float,
|
| 1062 |
min_subsegment_duration: float = 0.03,
|
| 1063 |
global_rank: int = 0,
|
|
@@ -1073,6 +222,13 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1073 |
round_digits=round_digits,
|
| 1074 |
)
|
| 1075 |
self.featurizer = featurizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1076 |
self.round_digits = round_digits
|
| 1077 |
self.feat_per_sec = int(1 / window_stride)
|
| 1078 |
self.diar_frame_length = round(subsampling_factor * window_stride, round_digits)
|
|
@@ -1086,10 +242,30 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1086 |
self.round_digits = 2
|
| 1087 |
self.floor_decimal = 10**self.round_digits
|
| 1088 |
self.device = device
|
|
|
|
| 1089 |
|
| 1090 |
def __len__(self):
|
| 1091 |
return len(self.collection)
|
| 1092 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1093 |
def get_uniq_id_with_range(self, sample, deci=3):
|
| 1094 |
"""
|
| 1095 |
Generate unique training sample ID from unique file ID, offset and duration. The start-end time added
|
|
@@ -1098,7 +274,7 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1098 |
|
| 1099 |
Args:
|
| 1100 |
sample:
|
| 1101 |
-
`
|
| 1102 |
|
| 1103 |
Returns:
|
| 1104 |
uniq_id (str):
|
|
@@ -1188,7 +364,7 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1188 |
|
| 1189 |
Args:
|
| 1190 |
sample:
|
| 1191 |
-
`
|
| 1192 |
Returns:
|
| 1193 |
segment_timestamps (torch.tensor):
|
| 1194 |
Tensor containing Multiscale segment timestamps.
|
|
@@ -1238,10 +414,15 @@ class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
|
| 1238 |
)
|
| 1239 |
audio_signal = audio_signal[: round(self.featurizer.sample_rate * session_len_sec)]
|
| 1240 |
audio_signal_length = torch.tensor(audio_signal.shape[0]).long()
|
|
|
|
|
|
|
| 1241 |
target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate)
|
|
|
|
|
|
|
| 1242 |
targets = self.parse_rttm_for_targets_and_lens(
|
| 1243 |
rttm_file=sample.rttm_file, offset=offset, duration=session_len_sec, target_len=target_len
|
| 1244 |
)
|
|
|
|
| 1245 |
return audio_signal, audio_signal_length, targets, target_len
|
| 1246 |
|
| 1247 |
|
|
@@ -1357,6 +538,7 @@ class AudioToSpeechE2ESpkDiarDataset(_AudioToSpeechE2ESpkDiarDataset):
|
|
| 1357 |
session_len_sec: float,
|
| 1358 |
num_spks: int,
|
| 1359 |
featurizer,
|
|
|
|
| 1360 |
window_stride,
|
| 1361 |
global_rank: int,
|
| 1362 |
soft_targets: bool,
|
|
@@ -1368,6 +550,7 @@ class AudioToSpeechE2ESpkDiarDataset(_AudioToSpeechE2ESpkDiarDataset):
|
|
| 1368 |
session_len_sec=session_len_sec,
|
| 1369 |
num_spks=num_spks,
|
| 1370 |
featurizer=featurizer,
|
|
|
|
| 1371 |
window_stride=window_stride,
|
| 1372 |
global_rank=global_rank,
|
| 1373 |
soft_targets=soft_targets,
|
|
|
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
import os
|
|
|
|
|
|
|
| 16 |
from typing import Dict, List, Optional, Tuple
|
| 17 |
|
| 18 |
import numpy as np
|
| 19 |
import torch
|
| 20 |
|
| 21 |
+
from nemo.collections.asr.parts.utils.speaker_utils import convert_rttm_line, get_subsegments
|
| 22 |
+
from nemo.collections.common.parts.preprocessing.collections import EndtoEndDiarizationSpeechLabel
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
from nemo.core.classes import Dataset
|
| 24 |
+
from nemo.core.neural_types import AudioSignal, LengthsType, NeuralType, ProbsType
|
| 25 |
from nemo.utils import logging
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
def get_subsegments_to_timestamps(
|
| 29 |
subsegments: List[Tuple[float, float]], feat_per_sec: int = 100, max_end_ts: float = None, decimals=2
|
| 30 |
):
|
|
|
|
| 159 |
return feat_level_target
|
| 160 |
|
| 161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
class _AudioToSpeechE2ESpkDiarDataset(Dataset):
|
| 163 |
"""
|
| 164 |
Dataset class that loads a json file containing paths to audio files,
|
|
|
|
| 206 |
session_len_sec: float,
|
| 207 |
num_spks: int,
|
| 208 |
featurizer,
|
| 209 |
+
fb_featurizer,
|
| 210 |
window_stride: float,
|
| 211 |
min_subsegment_duration: float = 0.03,
|
| 212 |
global_rank: int = 0,
|
|
|
|
| 222 |
round_digits=round_digits,
|
| 223 |
)
|
| 224 |
self.featurizer = featurizer
|
| 225 |
+
self.fb_featurizer = fb_featurizer
|
| 226 |
+
# STFT and subsampling factor parameters
|
| 227 |
+
self.n_fft = self.fb_featurizer.n_fft
|
| 228 |
+
self.hop_length = self.fb_featurizer.hop_length
|
| 229 |
+
self.stft_pad_amount = self.fb_featurizer.stft_pad_amount
|
| 230 |
+
self.subsampling_factor = subsampling_factor
|
| 231 |
+
# Annotation and target length parameters
|
| 232 |
self.round_digits = round_digits
|
| 233 |
self.feat_per_sec = int(1 / window_stride)
|
| 234 |
self.diar_frame_length = round(subsampling_factor * window_stride, round_digits)
|
|
|
|
| 242 |
self.round_digits = 2
|
| 243 |
self.floor_decimal = 10**self.round_digits
|
| 244 |
self.device = device
|
| 245 |
+
self.global_rank = global_rank
|
| 246 |
|
| 247 |
def __len__(self):
|
| 248 |
return len(self.collection)
|
| 249 |
|
| 250 |
+
def get_frame_count_from_time_series_length(self, seq_len):
|
| 251 |
+
"""
|
| 252 |
+
This function is used to get the sequence length of the audio signal. This is required to match
|
| 253 |
+
the feature frame length with ASR (STT) models. This function is copied from
|
| 254 |
+
NeMo/nemo/collections/asr/parts/preprocessing/features.py::FilterbankFeatures::get_seq_len.
|
| 255 |
+
|
| 256 |
+
Args:
|
| 257 |
+
seq_len (int):
|
| 258 |
+
The sequence length of the time-series data.
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
seq_len (int):
|
| 262 |
+
The sequence length of the feature frames.
|
| 263 |
+
"""
|
| 264 |
+
pad_amount = self.stft_pad_amount * 2 if self.stft_pad_amount is not None else self.n_fft // 2 * 2
|
| 265 |
+
seq_len = torch.floor_divide((seq_len + pad_amount - self.n_fft), self.hop_length).to(dtype=torch.long)
|
| 266 |
+
frame_count = int(np.ceil(seq_len / self.subsampling_factor))
|
| 267 |
+
return frame_count
|
| 268 |
+
|
| 269 |
def get_uniq_id_with_range(self, sample, deci=3):
|
| 270 |
"""
|
| 271 |
Generate unique training sample ID from unique file ID, offset and duration. The start-end time added
|
|
|
|
| 274 |
|
| 275 |
Args:
|
| 276 |
sample:
|
| 277 |
+
`EndtoEndDiarizationSpeechLabel` instance from collections.
|
| 278 |
|
| 279 |
Returns:
|
| 280 |
uniq_id (str):
|
|
|
|
| 364 |
|
| 365 |
Args:
|
| 366 |
sample:
|
| 367 |
+
`EndtoEndDiarizationSpeechLabel` instance from preprocessing.collections
|
| 368 |
Returns:
|
| 369 |
segment_timestamps (torch.tensor):
|
| 370 |
Tensor containing Multiscale segment timestamps.
|
|
|
|
| 414 |
)
|
| 415 |
audio_signal = audio_signal[: round(self.featurizer.sample_rate * session_len_sec)]
|
| 416 |
audio_signal_length = torch.tensor(audio_signal.shape[0]).long()
|
| 417 |
+
|
| 418 |
+
# Target length should be following the ASR feature extraction convention: Use self.get_frame_count_from_time_series_length.
|
| 419 |
target_len = self.get_segment_timestamps(duration=session_len_sec, sample_rate=self.featurizer.sample_rate)
|
| 420 |
+
target_len = torch.clamp(target_len, max=self.get_frame_count_from_time_series_length(audio_signal.shape[0]))
|
| 421 |
+
|
| 422 |
targets = self.parse_rttm_for_targets_and_lens(
|
| 423 |
rttm_file=sample.rttm_file, offset=offset, duration=session_len_sec, target_len=target_len
|
| 424 |
)
|
| 425 |
+
targets = targets[:target_len, :]
|
| 426 |
return audio_signal, audio_signal_length, targets, target_len
|
| 427 |
|
| 428 |
|
|
|
|
| 538 |
session_len_sec: float,
|
| 539 |
num_spks: int,
|
| 540 |
featurizer,
|
| 541 |
+
fb_featurizer,
|
| 542 |
window_stride,
|
| 543 |
global_rank: int,
|
| 544 |
soft_targets: bool,
|
|
|
|
| 550 |
session_len_sec=session_len_sec,
|
| 551 |
num_spks=num_spks,
|
| 552 |
featurizer=featurizer,
|
| 553 |
+
fb_featurizer=fb_featurizer,
|
| 554 |
window_stride=window_stride,
|
| 555 |
global_rank=global_rank,
|
| 556 |
soft_targets=soft_targets,
|
nemo/collections/asr/data/audio_to_diar_label_lhotse.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -23,6 +23,7 @@ from nemo.collections.asr.parts.utils.asr_multispeaker_utils import (
|
|
| 23 |
speaker_to_target,
|
| 24 |
)
|
| 25 |
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
|
|
|
|
| 26 |
|
| 27 |
|
| 28 |
class LhotseAudioToSpeechE2ESpkDiarDataset(torch.utils.data.Dataset):
|
|
@@ -55,22 +56,53 @@ class LhotseAudioToSpeechE2ESpkDiarDataset(torch.utils.data.Dataset):
|
|
| 55 |
self.cfg.get('window_stride', 0.01) * self.cfg.get('sample_rate', 16000)
|
| 56 |
) # 160 samples for every 1ms by default
|
| 57 |
self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
|
| 58 |
-
self.spk_tar_all_zero = self.cfg.get('spk_tar_all_zero', False)
|
| 59 |
|
| 60 |
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
| 61 |
-
|
|
|
|
|
|
|
| 62 |
speaker_activities = []
|
| 63 |
for cut in cuts:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
speaker_activity = speaker_to_target(
|
| 65 |
-
a_cut=
|
| 66 |
num_speakers=self.num_speakers,
|
| 67 |
num_sample_per_mel_frame=self.num_sample_per_mel_frame,
|
| 68 |
num_mel_frame_per_asr_frame=self.num_mel_frame_per_target_frame,
|
| 69 |
-
spk_tar_all_zero=self.spk_tar_all_zero,
|
| 70 |
boundary_segments=True,
|
| 71 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
speaker_activities.append(speaker_activity)
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
target_lens_list = []
|
| 75 |
for audio_len in audio_lens:
|
| 76 |
target_fr_len = get_hidden_length_from_sample_length(
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 23 |
speaker_to_target,
|
| 24 |
)
|
| 25 |
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
|
| 26 |
+
from nemo.utils import logging
|
| 27 |
|
| 28 |
|
| 29 |
class LhotseAudioToSpeechE2ESpkDiarDataset(torch.utils.data.Dataset):
|
|
|
|
| 56 |
self.cfg.get('window_stride', 0.01) * self.cfg.get('sample_rate', 16000)
|
| 57 |
) # 160 samples for every 1ms by default
|
| 58 |
self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
|
|
|
|
| 59 |
|
| 60 |
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
| 61 |
+
# NOTE: This end-to-end diarization dataloader only loads the 1st ch of the audio file.
|
| 62 |
+
# Process cuts in a single loop: convert to mono and compute speaker activities
|
| 63 |
+
mono_cuts = []
|
| 64 |
speaker_activities = []
|
| 65 |
for cut in cuts:
|
| 66 |
+
if cut.num_channels is not None and cut.num_channels > 1:
|
| 67 |
+
logging.warning(
|
| 68 |
+
"Multiple channels detected in cut '%s' (%d channels). "
|
| 69 |
+
"Only the first channel will be used; remaining channels are ignored.",
|
| 70 |
+
cut.id,
|
| 71 |
+
cut.num_channels,
|
| 72 |
+
)
|
| 73 |
+
mono_cut = cut.with_channels(channels=[0])
|
| 74 |
+
mono_cuts.append(mono_cut)
|
| 75 |
+
|
| 76 |
speaker_activity = speaker_to_target(
|
| 77 |
+
a_cut=mono_cut,
|
| 78 |
num_speakers=self.num_speakers,
|
| 79 |
num_sample_per_mel_frame=self.num_sample_per_mel_frame,
|
| 80 |
num_mel_frame_per_asr_frame=self.num_mel_frame_per_target_frame,
|
|
|
|
| 81 |
boundary_segments=True,
|
| 82 |
)
|
| 83 |
+
# This line prevents dimension mismatch error in the collate_matrices function.
|
| 84 |
+
if speaker_activity.shape[1] > self.num_speakers:
|
| 85 |
+
logging.warning(
|
| 86 |
+
"Number of speakers in the target %s is greater than "
|
| 87 |
+
"the maximum number of speakers %s. Truncating extra speakers. "
|
| 88 |
+
"Set the `num_speakers` to higher value to avoid this warning.",
|
| 89 |
+
speaker_activity.shape[1],
|
| 90 |
+
self.num_speakers,
|
| 91 |
+
)
|
| 92 |
+
speaker_activity = speaker_activity[:, : self.num_speakers]
|
| 93 |
speaker_activities.append(speaker_activity)
|
| 94 |
+
|
| 95 |
+
cuts = type(cuts).from_cuts(mono_cuts)
|
| 96 |
+
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 97 |
+
targets = collate_matrices(speaker_activities).to(audio.dtype) # (B, T, N)
|
| 98 |
+
|
| 99 |
+
if targets.shape[2] > self.num_speakers:
|
| 100 |
+
targets = targets[:, :, : self.num_speakers]
|
| 101 |
+
elif targets.shape[2] < self.num_speakers:
|
| 102 |
+
targets = torch.nn.functional.pad(
|
| 103 |
+
targets, (0, self.num_speakers - targets.shape[2]), mode='constant', value=0
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
target_lens_list = []
|
| 107 |
for audio_len in audio_lens:
|
| 108 |
target_fr_len = get_hidden_length_from_sample_length(
|
nemo/collections/asr/data/audio_to_eou_label_lhotse.py
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import math
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from typing import Dict, List, Optional
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch.utils.data
|
| 21 |
+
from lhotse.cut import Cut, CutSet, MixedCut
|
| 22 |
+
from lhotse.dataset import AudioSamples
|
| 23 |
+
from lhotse.dataset.collation import collate_vectors
|
| 24 |
+
from omegaconf import DictConfig, OmegaConf
|
| 25 |
+
|
| 26 |
+
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
|
| 27 |
+
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
|
| 28 |
+
from nemo.collections.common.tokenizers.aggregate_tokenizer import TokenizerWrapper
|
| 29 |
+
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
|
| 30 |
+
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
|
| 31 |
+
from nemo.utils import logging
|
| 32 |
+
|
| 33 |
+
NON_SPEECH_LABEL = 0
|
| 34 |
+
SPEECH_LABEL = 1
|
| 35 |
+
EOU_LABEL = 2
|
| 36 |
+
EOB_LABEL = 3
|
| 37 |
+
EOU_STRING = '<EOU>'
|
| 38 |
+
EOB_STRING = '<EOB>'
|
| 39 |
+
|
| 40 |
+
# These augmentations are not supported yet, since they will need to change the SOU/EOU timestamps
|
| 41 |
+
EOU_INVALID_AUGMENTATIONS = ['random_segment', 'speed', 'time_stretch']
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@dataclass
|
| 45 |
+
class AudioToTextEOUBatch:
|
| 46 |
+
"""
|
| 47 |
+
Data class for ASR-EOU batch.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
sample_ids: List | None = None
|
| 51 |
+
audio_filepaths: List | None = None
|
| 52 |
+
audio_signal: torch.Tensor | None = None
|
| 53 |
+
audio_lengths: torch.Tensor | None = None
|
| 54 |
+
text_tokens: torch.Tensor | None = None
|
| 55 |
+
text_token_lengths: torch.Tensor | None = None
|
| 56 |
+
eou_targets: torch.Tensor | None = None
|
| 57 |
+
eou_target_lengths: torch.Tensor | None = None
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@dataclass
|
| 61 |
+
class RandomPaddingConfig:
|
| 62 |
+
prob: float = 0.9 # probability of applying padding
|
| 63 |
+
min_pad_duration: float = 0.0 # minimum duration of pre/post padding in seconds
|
| 64 |
+
max_pad_duration: float = 5.0 # maximum duration of pre/post padding in seconds
|
| 65 |
+
max_total_duration: float = 40.0 # maximum total duration of the padded audio in seconds
|
| 66 |
+
min_pre_pad_duration: float = 0.0 # minimum duration of pre-padding in seconds
|
| 67 |
+
min_post_pad_duration: float = 2.0 # minimum duration of post-padding in seconds
|
| 68 |
+
pad_distribution: str = 'uniform' # distribution of padding duration, 'uniform' or 'normal' or 'constant'
|
| 69 |
+
normal_mean: float = 0.5 # mean of normal distribution for padding duration
|
| 70 |
+
normal_std: float = 2.0 # standard deviation of normal distribution for padding duration
|
| 71 |
+
pre_pad_duration: float = 0.2 # amount of left-padding when pad_distribution='constant'
|
| 72 |
+
post_pad_duration: float = 3.0 # amount of right-padding when pad_distribution='constant'
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class LhotseSpeechToTextBpeEOUDataset(torch.utils.data.Dataset):
|
| 76 |
+
"""
|
| 77 |
+
This dataset processes the audio data and the corresponding text data to generate the ASR labels,
|
| 78 |
+
along with EOU labels for each frame. The audios used in this dataset should only contain speech with
|
| 79 |
+
NO precedding or following silence. The dataset also randomly pads non-speech frames before and after
|
| 80 |
+
the audio signal for training EOU prediction task.
|
| 81 |
+
|
| 82 |
+
To generate EOU labels, the last frame of utterance will be marked as "end of utterance" (labeled as `2`),
|
| 83 |
+
while if it's a backchannel utterance it'll be marked asd "end of backchannel" (labeled as `3`).
|
| 84 |
+
The rest of the speech frames will be marked as "speech" (labeled as `1`).
|
| 85 |
+
The padded non-speech signals will be marked as "non-speech" (labeled as 0).
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
cfg: DictConfig object container following keys, usually taken from your `model.train_ds`
|
| 89 |
+
or `model.validation_ds` config:
|
| 90 |
+
```
|
| 91 |
+
sample_rate: # int, Sample rate of the audio signal
|
| 92 |
+
window_stride: # float, Window stride for audio encoder
|
| 93 |
+
subsampling_factor: # Subsampling factor for audio encoder
|
| 94 |
+
random_padding: # Random padding configuration
|
| 95 |
+
prob: 0.9 # probability of applying padding
|
| 96 |
+
min_pad_duration: 0.5 # minimum duration of pre/post padding in seconds
|
| 97 |
+
max_pad_duration: 2.0 # maximum duration of pre/post padding in seconds
|
| 98 |
+
max_total_duration: 30.0 # maximum total duration of the padded audio in seconds
|
| 99 |
+
pad_distribution: 'uniform' # distribution of padding duration, 'uniform' or 'normal' or 'constant'
|
| 100 |
+
normal_mean: 0.5 # mean of normal distribution for padding duration
|
| 101 |
+
normal_std: 2.0 # standard deviation of normal distribution for padding duration
|
| 102 |
+
pre_pad_duration: 0.2 # amount of left-padding when pad_distribution='constant'
|
| 103 |
+
post_pad_duration: 3.0 # amount of right-padding when pad_distribution='constant'
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
audio: torch.Tensor of audio signal
|
| 108 |
+
audio_lens: torch.Tensor of audio signal length
|
| 109 |
+
text_tokens: torch.Tensor of text text_tokens
|
| 110 |
+
text_token_lens: torch.Tensor of text token length
|
| 111 |
+
eou_targets (optional): torch.Tensor of EOU labels
|
| 112 |
+
eou_target_lens (optional): torch.Tensor of EOU label length
|
| 113 |
+
|
| 114 |
+
The input manifest should be a jsonl file where each line is a python dictionary.
|
| 115 |
+
Example manifest sample:
|
| 116 |
+
{
|
| 117 |
+
"audio_filepath": "/path/to/audio.wav",
|
| 118 |
+
"offset": 0.0,
|
| 119 |
+
"duration": 6.0,
|
| 120 |
+
"sou_time": [0.3, 4.0],
|
| 121 |
+
"eou_time": [1.3, 4.5],
|
| 122 |
+
"utterances": ["Tell me a joke", "Ah-ha"],
|
| 123 |
+
"is_backchannel": [False, True],
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
Padding logic:
|
| 127 |
+
0. Don't pad when `random_padding` is None or during validation/test
|
| 128 |
+
1. randomly draw a probability to decide whether to apply padding
|
| 129 |
+
2. if not padding or audio duration is longer than the maximum duration,
|
| 130 |
+
1) return the original audio and EOU labels
|
| 131 |
+
3. if apply padding,
|
| 132 |
+
1) get the max padding duration based on the maximum total duration and the audio duration
|
| 133 |
+
2) randomly draw a total padding duration based on the given distribution
|
| 134 |
+
3) randomly split the total padding duration into pre-padding and post-padding
|
| 135 |
+
4) randomly generate the non-speech signal (audio signal=0) for pre-padding and post-padding
|
| 136 |
+
5) concatenate the pre-padding, audio, and post-padding to get the padded audio signal
|
| 137 |
+
6) update the EOU labels accordingly
|
| 138 |
+
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def output_types(self) -> Optional[Dict[str, NeuralType]]:
|
| 143 |
+
"""Define the output types of the dataset."""
|
| 144 |
+
return {
|
| 145 |
+
'audio': NeuralType(('B', 'T'), AudioSignal()),
|
| 146 |
+
'audio_lens': NeuralType(tuple('B'), LengthsType()),
|
| 147 |
+
'eou_targets': NeuralType(('B', 'T'), LabelsType()),
|
| 148 |
+
'eou_target_lens': NeuralType(tuple('B'), LengthsType()),
|
| 149 |
+
'text_tokens': NeuralType(tuple('B', 'T'), LengthsType(), optional=True),
|
| 150 |
+
'text_token_lens': NeuralType(tuple('B'), LengthsType(), optional=True),
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
def __init__(self, cfg: DictConfig, tokenizer: TokenizerSpec, return_cuts: bool = False):
|
| 154 |
+
super().__init__()
|
| 155 |
+
self.cfg = cfg
|
| 156 |
+
self.return_cuts = return_cuts
|
| 157 |
+
self.eou_string = self.cfg.get('eou_string', EOU_STRING)
|
| 158 |
+
self.eob_string = self.cfg.get('eob_string', EOB_STRING)
|
| 159 |
+
if cfg.get('check_tokenizer', True):
|
| 160 |
+
self._check_special_tokens(tokenizer)
|
| 161 |
+
|
| 162 |
+
self.tokenizer = TokenizerWrapper(tokenizer)
|
| 163 |
+
self.load_audio = AudioSamples(fault_tolerant=True)
|
| 164 |
+
self.sample_rate = self.cfg.get('sample_rate', 16000)
|
| 165 |
+
self.window_stride = self.cfg.get('window_stride', 0.01)
|
| 166 |
+
self.num_sample_per_mel_frame = int(
|
| 167 |
+
self.window_stride * self.sample_rate
|
| 168 |
+
) # 160 samples for every 1ms by default
|
| 169 |
+
self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
|
| 170 |
+
self.add_sep_before_eou = self.cfg.get('add_sep_before_eou', False)
|
| 171 |
+
self.add_eou_to_text = self.cfg.get('add_eou_to_text', True)
|
| 172 |
+
self.pad_eou_label_secs = self.cfg.get('pad_eou_label_secs', 0.0)
|
| 173 |
+
self.padding_cfg = self.cfg.get('random_padding', None)
|
| 174 |
+
if self.padding_cfg is not None:
|
| 175 |
+
self.padding_cfg = OmegaConf.to_container(self.padding_cfg, resolve=True)
|
| 176 |
+
self.padding_cfg = RandomPaddingConfig(**self.padding_cfg)
|
| 177 |
+
self.ignore_eob_label = self.cfg.get('ignore_eob_label', False)
|
| 178 |
+
self.augmentor = None
|
| 179 |
+
if self.cfg.get('augmentor', None) is not None:
|
| 180 |
+
augmentor = {}
|
| 181 |
+
aug_cfg = OmegaConf.to_container(self.cfg.augmentor, resolve=True)
|
| 182 |
+
for k, v in aug_cfg.items():
|
| 183 |
+
if k in EOU_INVALID_AUGMENTATIONS:
|
| 184 |
+
logging.warning(f"EOU dataset does not support {k} augmentation yet, skipping.")
|
| 185 |
+
continue
|
| 186 |
+
augmentor[k] = v
|
| 187 |
+
|
| 188 |
+
if len(augmentor) > 0:
|
| 189 |
+
logging.info(f"EOU dataset will apply augmentations: {augmentor}")
|
| 190 |
+
self.augmentor = process_augmentations(augmentor)
|
| 191 |
+
|
| 192 |
+
def _check_special_tokens(self, tokenizer: TokenizerSpec):
|
| 193 |
+
"""
|
| 194 |
+
Check if the special tokens are in the tokenizer vocab.
|
| 195 |
+
"""
|
| 196 |
+
special_tokens = set([self.eou_string, self.eob_string])
|
| 197 |
+
vocab_size = tokenizer.vocab_size
|
| 198 |
+
special_tokens_in_vocab = set([tokenizer.ids_to_text(vocab_size - 1), tokenizer.ids_to_text(vocab_size - 2)])
|
| 199 |
+
if special_tokens != special_tokens_in_vocab:
|
| 200 |
+
raise ValueError(
|
| 201 |
+
f"Input special tokens {special_tokens} don't match with the tokenizer vocab {special_tokens_in_vocab}. "
|
| 202 |
+
f"Please add them to tokenizer or change input `eou_string` and/or `eob_string` accordingly. "
|
| 203 |
+
"Special tokens should be added as the last two tokens in the new tokenizer. "
|
| 204 |
+
"Please refer to scripts/asr_end_of_utterance/tokenizers/add_special_tokens_to_sentencepiece.py for details."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
def __getitem__(self, cuts: CutSet) -> AudioToTextEOUBatch:
|
| 208 |
+
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 209 |
+
audio_signals = []
|
| 210 |
+
audio_lengths = []
|
| 211 |
+
eou_targets = []
|
| 212 |
+
text_tokens = []
|
| 213 |
+
sample_ids = []
|
| 214 |
+
audio_filepaths = []
|
| 215 |
+
|
| 216 |
+
for i in range(len(cuts)):
|
| 217 |
+
c = cuts[i]
|
| 218 |
+
if isinstance(c, MixedCut):
|
| 219 |
+
c = c.first_non_padding_cut
|
| 220 |
+
|
| 221 |
+
sample_ids.append(c.id)
|
| 222 |
+
audio_filepaths.append(c.recording.sources[0].source)
|
| 223 |
+
|
| 224 |
+
audio_i = audio[i]
|
| 225 |
+
audio_len_i = audio_lens[i]
|
| 226 |
+
|
| 227 |
+
# Get EOU labels and text tokens
|
| 228 |
+
eou_targets_i = self._get_frame_labels(c, audio_len_i)
|
| 229 |
+
text_tokens_i = self._get_text_tokens(c)
|
| 230 |
+
|
| 231 |
+
# Maybe apply random padding to both sides of the audio
|
| 232 |
+
audio_i, audio_len_i, eou_targets_i = self._random_pad_audio(audio_i, audio_len_i, eou_targets_i)
|
| 233 |
+
|
| 234 |
+
# Maybe apply augmentations to the audio signal after padding
|
| 235 |
+
audio_i, audio_len_i = self._maybe_augment_audio(audio_i, audio_len_i)
|
| 236 |
+
|
| 237 |
+
# Append the processed audio, EOU labels, and text tokens to the lists
|
| 238 |
+
audio_signals.append(audio_i)
|
| 239 |
+
audio_lengths.append(audio_len_i)
|
| 240 |
+
eou_targets.append(eou_targets_i)
|
| 241 |
+
text_tokens.append(text_tokens_i)
|
| 242 |
+
|
| 243 |
+
audio_signals = collate_vectors(audio_signals, padding_value=0)
|
| 244 |
+
audio_lengths = torch.tensor(audio_lengths, dtype=torch.long)
|
| 245 |
+
eou_target_lens = torch.tensor([t.size(0) for t in eou_targets], dtype=torch.long)
|
| 246 |
+
eou_targets = collate_vectors(eou_targets, padding_value=0)
|
| 247 |
+
text_token_lens = torch.tensor([t.size(0) for t in text_tokens], dtype=torch.long)
|
| 248 |
+
text_tokens = collate_vectors(text_tokens, padding_value=0)
|
| 249 |
+
|
| 250 |
+
if self.return_cuts:
|
| 251 |
+
return audio_signals, audio_lengths, cuts
|
| 252 |
+
|
| 253 |
+
return AudioToTextEOUBatch(
|
| 254 |
+
sample_ids=sample_ids,
|
| 255 |
+
audio_filepaths=audio_filepaths,
|
| 256 |
+
audio_signal=audio_signals,
|
| 257 |
+
audio_lengths=audio_lengths,
|
| 258 |
+
text_tokens=text_tokens,
|
| 259 |
+
text_token_lengths=text_token_lens,
|
| 260 |
+
eou_targets=eou_targets,
|
| 261 |
+
eou_target_lengths=eou_target_lens,
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
def _audio_len_to_frame_len(self, num_samples: int):
|
| 265 |
+
"""
|
| 266 |
+
Convert the raw audio length to the number of frames after audio encoder.
|
| 267 |
+
|
| 268 |
+
self.num_sample_per_mel_frame = int(
|
| 269 |
+
self.cfg.get('window_stride', 0.01) * self.cfg.get('sample_rate', 16000)
|
| 270 |
+
) # 160 samples for every 1ms by default
|
| 271 |
+
self.num_mel_frame_per_target_frame = int(self.cfg.get('subsampling_factor', 8))
|
| 272 |
+
"""
|
| 273 |
+
mel_frame_count = math.ceil((num_samples + 1) / self.num_sample_per_mel_frame)
|
| 274 |
+
hidden_length = math.ceil(mel_frame_count / self.num_mel_frame_per_target_frame)
|
| 275 |
+
return hidden_length
|
| 276 |
+
|
| 277 |
+
def _repeat_eou_labels(self, eou_targets: torch.Tensor) -> torch.Tensor:
|
| 278 |
+
"""
|
| 279 |
+
Repeat EOU labels according to self.pad_eou_label_secs
|
| 280 |
+
Args:
|
| 281 |
+
eou_targets: torch.Tensor of EOU labels, shape [T]
|
| 282 |
+
Returns:
|
| 283 |
+
eou_targets: torch.Tensor of padded EOU labels, shape [T]
|
| 284 |
+
"""
|
| 285 |
+
if not self.pad_eou_label_secs or self.pad_eou_label_secs <= 0:
|
| 286 |
+
return eou_targets
|
| 287 |
+
|
| 288 |
+
eou_len = self._audio_len_to_frame_len(int(self.pad_eou_label_secs * self.sample_rate))
|
| 289 |
+
|
| 290 |
+
i = 0
|
| 291 |
+
while i < eou_targets.size(0):
|
| 292 |
+
if eou_targets[i] == EOU_LABEL or eou_targets[i] == EOB_LABEL:
|
| 293 |
+
# repeat the label for the next eou_len samples
|
| 294 |
+
start = i
|
| 295 |
+
end = min(i + eou_len, eou_targets.size(0))
|
| 296 |
+
j = start + 1
|
| 297 |
+
while j < end:
|
| 298 |
+
if eou_targets[j] != NON_SPEECH_LABEL:
|
| 299 |
+
# do not overwrite the label if it's not non-speech
|
| 300 |
+
break
|
| 301 |
+
j += 1
|
| 302 |
+
end = min(j, end)
|
| 303 |
+
# fill the non-speech label with the current EOU/EOB label
|
| 304 |
+
eou_targets[start:end] = eou_targets[i]
|
| 305 |
+
i = end
|
| 306 |
+
else:
|
| 307 |
+
i += 1
|
| 308 |
+
return eou_targets
|
| 309 |
+
|
| 310 |
+
def _get_frame_labels(self, cut: Cut, num_samples: int):
|
| 311 |
+
"""
|
| 312 |
+
Get the frame-level EOU labels for a single audio segment.
|
| 313 |
+
Args:
|
| 314 |
+
cut: Cut object
|
| 315 |
+
num_samples: int, the number of samples in the audio segment
|
| 316 |
+
Returns:
|
| 317 |
+
eou_targets: torch.Tensor of EOU labels, shape [T]
|
| 318 |
+
"""
|
| 319 |
+
hidden_length = self._audio_len_to_frame_len(num_samples)
|
| 320 |
+
if not "sou_time" in cut.custom or not "eou_time" in cut.custom:
|
| 321 |
+
# assume only single speech segment
|
| 322 |
+
text = cut.supervisions[0].text
|
| 323 |
+
if not text:
|
| 324 |
+
# skip empty utterances
|
| 325 |
+
return torch.zeros(hidden_length).long()
|
| 326 |
+
eou_targets = torch.ones(hidden_length).long() # speech label
|
| 327 |
+
eou_targets[-1] = EOU_LABEL # by default it's end of utterance
|
| 328 |
+
if cut.has_custom("is_backchannel") and cut.custom["is_backchannel"] and not self.ignore_eob_label:
|
| 329 |
+
eou_targets[-1] = EOB_LABEL # end of backchannel
|
| 330 |
+
return eou_targets
|
| 331 |
+
|
| 332 |
+
sou_time = cut.custom["sou_time"]
|
| 333 |
+
eou_time = cut.custom["eou_time"]
|
| 334 |
+
if not isinstance(sou_time, list):
|
| 335 |
+
sou_time = [sou_time]
|
| 336 |
+
if not isinstance(eou_time, list):
|
| 337 |
+
eou_time = [eou_time]
|
| 338 |
+
|
| 339 |
+
assert len(sou_time) == len(
|
| 340 |
+
eou_time
|
| 341 |
+
), f"Number of SOU time and EOU time do not match: SOU ({sou_time}) vs EOU ({eou_time})"
|
| 342 |
+
|
| 343 |
+
if cut.has_custom("is_backchannel"):
|
| 344 |
+
is_backchannel = cut.custom["is_backchannel"]
|
| 345 |
+
if not isinstance(is_backchannel, list):
|
| 346 |
+
is_backchannel = [is_backchannel]
|
| 347 |
+
assert len(sou_time) == len(
|
| 348 |
+
is_backchannel
|
| 349 |
+
), f"Number of SOU and backchannel do not match: SOU ({len(sou_time)}) vs backchannel ({len(is_backchannel)})"
|
| 350 |
+
else:
|
| 351 |
+
is_backchannel = [False] * len(sou_time)
|
| 352 |
+
|
| 353 |
+
eou_targets = torch.zeros(hidden_length).long()
|
| 354 |
+
for i in range(len(sou_time)):
|
| 355 |
+
if sou_time[i] is None or eou_time[i] is None or sou_time[i] < 0 or eou_time[i] < 0:
|
| 356 |
+
# skip empty utterances
|
| 357 |
+
continue
|
| 358 |
+
sou_idx = self._audio_len_to_frame_len(int((sou_time[i] - cut.start) * self.sample_rate))
|
| 359 |
+
seg_len_in_secs = eou_time[i] - sou_time[i]
|
| 360 |
+
seg_len = self._audio_len_to_frame_len(int(seg_len_in_secs * self.sample_rate))
|
| 361 |
+
eou_targets[sou_idx : sou_idx + seg_len] = SPEECH_LABEL
|
| 362 |
+
last_idx = min(sou_idx + seg_len - 1, hidden_length - 1)
|
| 363 |
+
if is_backchannel[i] and not self.ignore_eob_label:
|
| 364 |
+
eou_targets[last_idx] = EOB_LABEL # end of backchannel
|
| 365 |
+
else:
|
| 366 |
+
eou_targets[last_idx] = EOU_LABEL # end of utterance
|
| 367 |
+
|
| 368 |
+
return eou_targets
|
| 369 |
+
|
| 370 |
+
def _get_text_tokens(self, cut: Cut):
|
| 371 |
+
"""
|
| 372 |
+
Add EOU labels to the text and get the text tokens for a single audio segment.
|
| 373 |
+
Args:
|
| 374 |
+
cut: Cut object
|
| 375 |
+
Returns:
|
| 376 |
+
text_tokens: torch.Tensor of text tokens, shape [T]
|
| 377 |
+
"""
|
| 378 |
+
if not cut.has_custom("sou_time") or not cut.has_custom("eou_time") or not cut.has_custom("utterances"):
|
| 379 |
+
# assume only single speech segment
|
| 380 |
+
utterances = [cut.supervisions[0].text]
|
| 381 |
+
else:
|
| 382 |
+
utterances = cut.custom["utterances"]
|
| 383 |
+
|
| 384 |
+
if not isinstance(utterances, list):
|
| 385 |
+
utterances = [utterances]
|
| 386 |
+
|
| 387 |
+
if cut.has_custom("is_backchannel"):
|
| 388 |
+
is_backchannel = cut.custom["is_backchannel"]
|
| 389 |
+
if not isinstance(is_backchannel, list):
|
| 390 |
+
is_backchannel = [is_backchannel]
|
| 391 |
+
assert len(utterances) == len(
|
| 392 |
+
is_backchannel
|
| 393 |
+
), f"Number of utterances and backchannel do not match: utterance ({len(utterances)}) vs backchannel ({len(is_backchannel)})"
|
| 394 |
+
else:
|
| 395 |
+
is_backchannel = [False] * len(utterances)
|
| 396 |
+
|
| 397 |
+
total_text = ""
|
| 398 |
+
for i, text in enumerate(utterances):
|
| 399 |
+
if not text:
|
| 400 |
+
# skip empty utterances
|
| 401 |
+
continue
|
| 402 |
+
if self.add_eou_to_text:
|
| 403 |
+
eou_string = self.eob_string if is_backchannel[i] and not self.ignore_eob_label else self.eou_string
|
| 404 |
+
if self.add_sep_before_eou:
|
| 405 |
+
eou_string = " " + eou_string
|
| 406 |
+
else:
|
| 407 |
+
eou_string = ""
|
| 408 |
+
total_text += text + eou_string + " "
|
| 409 |
+
total_text = total_text.strip()
|
| 410 |
+
return torch.as_tensor(self.tokenizer(total_text))
|
| 411 |
+
|
| 412 |
+
def _random_pad_audio(self, audio: torch.Tensor, audio_len: torch.Tensor, eou_targets: torch.Tensor):
|
| 413 |
+
"""
|
| 414 |
+
Randomly pad the audio signal with non-speech signal before and after the audio signal.
|
| 415 |
+
Args:
|
| 416 |
+
audio: torch.Tensor of a single audio signal, shape [T]
|
| 417 |
+
audio_len: torch.Tensor of audio signal length, shape [1]
|
| 418 |
+
eou_targets: torch.Tensor of EOU labels, shape [T]
|
| 419 |
+
Returns:
|
| 420 |
+
padded_audio: torch.Tensor of padded audio signal, shape [T+padding]
|
| 421 |
+
padded_audio_len: torch.Tensor of padded audio signal length, shape [1]
|
| 422 |
+
padded_eou_targets: torch.Tensor of padded EOU labels, shape [T+padding]
|
| 423 |
+
padded_eou_targets_len: torch.Tensor of padded EOU label length, shape [1]
|
| 424 |
+
"""
|
| 425 |
+
p = np.random.rand()
|
| 426 |
+
if self.padding_cfg is None or p > self.padding_cfg.prob:
|
| 427 |
+
# don't apply padding
|
| 428 |
+
eou_targets = self._repeat_eou_labels(eou_targets)
|
| 429 |
+
return audio, audio_len, eou_targets
|
| 430 |
+
|
| 431 |
+
duration = audio_len.item() / self.cfg.sample_rate
|
| 432 |
+
# if already longer than the maximum duration, return the original audio
|
| 433 |
+
if duration >= self.padding_cfg.max_total_duration:
|
| 434 |
+
return audio, audio_len, eou_targets
|
| 435 |
+
|
| 436 |
+
# apply padding
|
| 437 |
+
audio = audio[:audio_len]
|
| 438 |
+
|
| 439 |
+
self.padding_cfg.min_pre_pad_duration = max(
|
| 440 |
+
self.padding_cfg.min_pre_pad_duration, self.padding_cfg.min_pad_duration
|
| 441 |
+
)
|
| 442 |
+
self.padding_cfg.min_post_pad_duration = max(
|
| 443 |
+
self.padding_cfg.min_post_pad_duration, self.padding_cfg.min_pad_duration
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
max_padding_duration = max(0, self.padding_cfg.max_total_duration - duration)
|
| 447 |
+
if max_padding_duration <= self.padding_cfg.min_pre_pad_duration + self.padding_cfg.min_post_pad_duration:
|
| 448 |
+
min_padding_duration = 0
|
| 449 |
+
else:
|
| 450 |
+
min_padding_duration = self.padding_cfg.min_pre_pad_duration + self.padding_cfg.min_post_pad_duration
|
| 451 |
+
|
| 452 |
+
pre_padding_duration = None
|
| 453 |
+
post_padding_duration = None
|
| 454 |
+
|
| 455 |
+
if self.padding_cfg.pad_distribution == 'uniform':
|
| 456 |
+
total_padding_duration = np.random.uniform(min_padding_duration, max_padding_duration)
|
| 457 |
+
elif self.padding_cfg.pad_distribution == 'normal':
|
| 458 |
+
total_padding_duration = np.random.normal(self.padding_cfg.normal_mean, self.padding_cfg.normal_std)
|
| 459 |
+
total_padding_duration = max(min_padding_duration, min(max_padding_duration, total_padding_duration))
|
| 460 |
+
elif self.padding_cfg.pad_distribution == 'constant':
|
| 461 |
+
pass
|
| 462 |
+
else:
|
| 463 |
+
raise ValueError(f"Unknown padding distribution: {self.padding_cfg.pad_distribution}")
|
| 464 |
+
|
| 465 |
+
if self.padding_cfg.pad_distribution == 'constant':
|
| 466 |
+
pre_padding_duration = self.padding_cfg.pre_pad_duration
|
| 467 |
+
post_padding_duration = self.padding_cfg.post_pad_duration
|
| 468 |
+
elif min_padding_duration == 0:
|
| 469 |
+
pre_padding_duration = total_padding_duration / 2
|
| 470 |
+
post_padding_duration = total_padding_duration / 2
|
| 471 |
+
else:
|
| 472 |
+
post_padding_duration = np.random.uniform(
|
| 473 |
+
self.padding_cfg.min_post_pad_duration, total_padding_duration - self.padding_cfg.min_pre_pad_duration
|
| 474 |
+
)
|
| 475 |
+
pre_padding_duration = total_padding_duration - post_padding_duration
|
| 476 |
+
|
| 477 |
+
if self.padding_cfg.max_pad_duration is not None:
|
| 478 |
+
pre_padding_duration = min(pre_padding_duration, self.padding_cfg.max_pad_duration)
|
| 479 |
+
post_padding_duration = min(post_padding_duration, self.padding_cfg.max_pad_duration)
|
| 480 |
+
|
| 481 |
+
pre_padding_len = math.ceil(pre_padding_duration * self.cfg.sample_rate)
|
| 482 |
+
post_padding_len = math.ceil(post_padding_duration * self.cfg.sample_rate)
|
| 483 |
+
|
| 484 |
+
# pad the audio signal
|
| 485 |
+
pre_padding = torch.zeros(pre_padding_len, dtype=audio.dtype)
|
| 486 |
+
post_padding = torch.zeros(post_padding_len, dtype=audio.dtype)
|
| 487 |
+
padded_audio = torch.cat((pre_padding, audio, post_padding), dim=0)
|
| 488 |
+
padded_audio_len = audio_len + pre_padding_len + post_padding_len
|
| 489 |
+
|
| 490 |
+
# pad the EOU labels
|
| 491 |
+
pre_padding_eou_len = self._audio_len_to_frame_len(pre_padding_len)
|
| 492 |
+
post_padding_eou_len = self._audio_len_to_frame_len(post_padding_len)
|
| 493 |
+
pre_padding_eou = torch.zeros(pre_padding_eou_len, dtype=eou_targets.dtype)
|
| 494 |
+
post_padding_eou = torch.zeros(post_padding_eou_len, dtype=eou_targets.dtype)
|
| 495 |
+
padded_eou_targets = torch.cat((pre_padding_eou, eou_targets, post_padding_eou), dim=0)
|
| 496 |
+
padded_eou_targets = self._repeat_eou_labels(padded_eou_targets)
|
| 497 |
+
return padded_audio, padded_audio_len, padded_eou_targets
|
| 498 |
+
|
| 499 |
+
def _maybe_augment_audio(self, audio: torch.Tensor, audio_len: torch.Tensor):
|
| 500 |
+
"""
|
| 501 |
+
Apply augmentation to the audio signal if augmentor is provided.
|
| 502 |
+
Args:
|
| 503 |
+
audio: torch.Tensor of a single audio signal, shape [T]
|
| 504 |
+
audio_len: torch.Tensor of audio signal length, shape [1]
|
| 505 |
+
Returns:
|
| 506 |
+
augmented_audio: torch.Tensor of augmented audio signal, shape [T]
|
| 507 |
+
augmented_audio_len: torch.Tensor of augmented audio signal length, shape [1]
|
| 508 |
+
"""
|
| 509 |
+
if self.augmentor is None:
|
| 510 |
+
return audio, audio_len
|
| 511 |
+
|
| 512 |
+
# Cast to AudioSegment
|
| 513 |
+
audio_segment = AudioSegment(
|
| 514 |
+
samples=audio[:audio_len].numpy(),
|
| 515 |
+
sample_rate=self.sample_rate,
|
| 516 |
+
offset=0,
|
| 517 |
+
duration=audio_len.item() / self.sample_rate,
|
| 518 |
+
)
|
| 519 |
+
# Apply augmentation
|
| 520 |
+
self.augmentor.perturb(audio_segment)
|
| 521 |
+
audio = torch.from_numpy(audio_segment.samples).float()
|
| 522 |
+
audio_len = audio.size(0)
|
| 523 |
+
|
| 524 |
+
return audio, audio_len
|
nemo/collections/asr/data/audio_to_label.py
CHANGED
|
@@ -16,7 +16,6 @@ import os
|
|
| 16 |
from typing import Dict, List, Optional, Union
|
| 17 |
|
| 18 |
import torch
|
| 19 |
-
import webdataset as wds
|
| 20 |
|
| 21 |
from nemo.collections.asr.data.audio_to_text import cache_datastore_manifests, expand_sharded_filepaths
|
| 22 |
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
|
|
@@ -25,6 +24,7 @@ from nemo.collections.common.parts.preprocessing import collections
|
|
| 25 |
from nemo.core.classes import Dataset, IterableDataset
|
| 26 |
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType, RegressionValuesType
|
| 27 |
from nemo.utils import logging
|
|
|
|
| 28 |
from nemo.utils.distributed import webdataset_split_by_workers
|
| 29 |
|
| 30 |
# List of valid file formats (prioritized by order of importance)
|
|
@@ -587,6 +587,7 @@ class _TarredAudioLabelDataset(IterableDataset):
|
|
| 587 |
world_size=world_size,
|
| 588 |
global_rank=global_rank,
|
| 589 |
)
|
|
|
|
| 590 |
# Put together WebDataset
|
| 591 |
self._dataset = wds.DataPipeline(
|
| 592 |
wds.SimpleShardList(urls=audio_tar_filepaths),
|
|
@@ -1193,6 +1194,7 @@ class TarredAudioToMultiLabelDataset(IterableDataset):
|
|
| 1193 |
world_size=world_size,
|
| 1194 |
global_rank=global_rank,
|
| 1195 |
)
|
|
|
|
| 1196 |
# Put together WebDataset
|
| 1197 |
self._dataset = wds.DataPipeline(
|
| 1198 |
wds.SimpleShardList(urls=audio_tar_filepaths),
|
|
|
|
| 16 |
from typing import Dict, List, Optional, Union
|
| 17 |
|
| 18 |
import torch
|
|
|
|
| 19 |
|
| 20 |
from nemo.collections.asr.data.audio_to_text import cache_datastore_manifests, expand_sharded_filepaths
|
| 21 |
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
|
|
|
|
| 24 |
from nemo.core.classes import Dataset, IterableDataset
|
| 25 |
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType, RegressionValuesType
|
| 26 |
from nemo.utils import logging
|
| 27 |
+
from nemo.utils import webdataset as wds
|
| 28 |
from nemo.utils.distributed import webdataset_split_by_workers
|
| 29 |
|
| 30 |
# List of valid file formats (prioritized by order of importance)
|
|
|
|
| 587 |
world_size=world_size,
|
| 588 |
global_rank=global_rank,
|
| 589 |
)
|
| 590 |
+
|
| 591 |
# Put together WebDataset
|
| 592 |
self._dataset = wds.DataPipeline(
|
| 593 |
wds.SimpleShardList(urls=audio_tar_filepaths),
|
|
|
|
| 1194 |
world_size=world_size,
|
| 1195 |
global_rank=global_rank,
|
| 1196 |
)
|
| 1197 |
+
|
| 1198 |
# Put together WebDataset
|
| 1199 |
self._dataset = wds.DataPipeline(
|
| 1200 |
wds.SimpleShardList(urls=audio_tar_filepaths),
|
nemo/collections/asr/data/audio_to_text.py
CHANGED
|
@@ -22,7 +22,6 @@ from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
|
|
| 22 |
import braceexpand
|
| 23 |
import numpy as np
|
| 24 |
import torch
|
| 25 |
-
import webdataset as wds
|
| 26 |
from torch.utils.data import ChainDataset
|
| 27 |
from tqdm import tqdm
|
| 28 |
|
|
@@ -34,14 +33,8 @@ from nemo.collections.common.parts.preprocessing import collections, parsers
|
|
| 34 |
from nemo.core.classes import Dataset, IterableDataset
|
| 35 |
from nemo.core.neural_types import *
|
| 36 |
from nemo.utils import logging
|
| 37 |
-
from nemo.utils
|
| 38 |
-
|
| 39 |
-
datastore_object_get,
|
| 40 |
-
datastore_path_to_webdataset_url,
|
| 41 |
-
is_datastore_cache_shared,
|
| 42 |
-
is_datastore_path,
|
| 43 |
-
is_tarred_path,
|
| 44 |
-
)
|
| 45 |
from nemo.utils.decorators import deprecated
|
| 46 |
from nemo.utils.distributed import webdataset_split_by_workers
|
| 47 |
from nemo.utils.get_rank import is_global_rank_zero
|
|
@@ -209,12 +202,6 @@ def expand_sharded_filepaths(sharded_filepaths, shard_strategy: str, world_size:
|
|
| 209 |
# Brace expand, set escape=False for Windows compatibility
|
| 210 |
sharded_filepaths = list(braceexpand.braceexpand(sharded_filepaths, escape=False))
|
| 211 |
|
| 212 |
-
# Expand store paths into WebDataset URLs
|
| 213 |
-
sharded_filepaths = [
|
| 214 |
-
datastore_path_to_webdataset_url(p) if is_datastore_path(p) and is_tarred_path(p) else p
|
| 215 |
-
for p in sharded_filepaths
|
| 216 |
-
]
|
| 217 |
-
|
| 218 |
# Check for distributed and partition shards accordingly
|
| 219 |
if world_size > 1:
|
| 220 |
if shard_strategy == 'scatter':
|
|
@@ -949,9 +936,7 @@ class _TarredAudioToTextDataset(IterableDataset):
|
|
| 949 |
self.current_bytes, self.current_fn = next(self.iterator)
|
| 950 |
self.offset_id = 0
|
| 951 |
else:
|
| 952 |
-
|
| 953 |
-
file_id, _ = os.path.splitext(os.path.basename(self.current_fn))
|
| 954 |
-
offset_list = self.collection.mapping[file_id]
|
| 955 |
if len(offset_list) == self.offset_id + 1:
|
| 956 |
self.current_bytes, self.current_fn = next(self.iterator)
|
| 957 |
self.offset_id = 0
|
|
|
|
| 22 |
import braceexpand
|
| 23 |
import numpy as np
|
| 24 |
import torch
|
|
|
|
| 25 |
from torch.utils.data import ChainDataset
|
| 26 |
from tqdm import tqdm
|
| 27 |
|
|
|
|
| 33 |
from nemo.core.classes import Dataset, IterableDataset
|
| 34 |
from nemo.core.neural_types import *
|
| 35 |
from nemo.utils import logging
|
| 36 |
+
from nemo.utils import webdataset as wds
|
| 37 |
+
from nemo.utils.data_utils import DataStoreObject, datastore_object_get, is_datastore_cache_shared, is_datastore_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
from nemo.utils.decorators import deprecated
|
| 39 |
from nemo.utils.distributed import webdataset_split_by_workers
|
| 40 |
from nemo.utils.get_rank import is_global_rank_zero
|
|
|
|
| 202 |
# Brace expand, set escape=False for Windows compatibility
|
| 203 |
sharded_filepaths = list(braceexpand.braceexpand(sharded_filepaths, escape=False))
|
| 204 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
# Check for distributed and partition shards accordingly
|
| 206 |
if world_size > 1:
|
| 207 |
if shard_strategy == 'scatter':
|
|
|
|
| 936 |
self.current_bytes, self.current_fn = next(self.iterator)
|
| 937 |
self.offset_id = 0
|
| 938 |
else:
|
| 939 |
+
offset_list = self.collection.mapping[self.current_fn]
|
|
|
|
|
|
|
| 940 |
if len(offset_list) == self.offset_id + 1:
|
| 941 |
self.current_bytes, self.current_fn = next(self.iterator)
|
| 942 |
self.offset_id = 0
|
nemo/collections/asr/data/audio_to_text_dali.py
CHANGED
|
@@ -300,9 +300,7 @@ class _AudioTextDALIDataset(Iterator):
|
|
| 300 |
f"'clamp'."
|
| 301 |
)
|
| 302 |
|
| 303 |
-
self.log_zero_guard_value =
|
| 304 |
-
params['log_zero_guard_value'] if 'log_zero_guard_value' in params else 2 ** -24
|
| 305 |
-
)
|
| 306 |
if isinstance(self.log_zero_guard_value, str):
|
| 307 |
if self.log_zero_guard_value == "tiny":
|
| 308 |
self.log_zero_guard_value = torch.finfo(torch.float32).tiny
|
|
@@ -346,8 +344,12 @@ class _AudioTextDALIDataset(Iterator):
|
|
| 346 |
|
| 347 |
elif audio_tar_filepaths is not None and audio_tar_index_filepaths is not None:
|
| 348 |
audio_tar_filepaths = expand_sharded_filepaths(
|
| 349 |
-
audio_tar_filepaths,
|
|
|
|
|
|
|
|
|
|
| 350 |
)
|
|
|
|
| 351 |
audio_tar_index_filepaths = expand_sharded_filepaths(
|
| 352 |
audio_tar_index_filepaths,
|
| 353 |
shard_strategy=shard_strategy,
|
|
@@ -374,7 +376,10 @@ class _AudioTextDALIDataset(Iterator):
|
|
| 374 |
pad_last_batch=True,
|
| 375 |
)
|
| 376 |
audio, _ = dali.fn.decoders.audio(
|
| 377 |
-
tar_file,
|
|
|
|
|
|
|
|
|
|
| 378 |
)
|
| 379 |
indices = dali.fn.get_property(tar_file, key="source_info")
|
| 380 |
indices = dali.fn.pad(indices)
|
|
@@ -446,7 +451,7 @@ class _AudioTextDALIDataset(Iterator):
|
|
| 446 |
)
|
| 447 |
|
| 448 |
# Normalization
|
| 449 |
-
spec = dali.fn.normalize(spec, axes=self.normalization_axes, epsilon=1e-5
|
| 450 |
|
| 451 |
# Extracting the length of the spectrogram
|
| 452 |
spec_len = dali.fn.slice(dali.fn.shapes(spec), 1, 1, axes=(0,))
|
|
|
|
| 300 |
f"'clamp'."
|
| 301 |
)
|
| 302 |
|
| 303 |
+
self.log_zero_guard_value = params['log_zero_guard_value'] if 'log_zero_guard_value' in params else 2**-24
|
|
|
|
|
|
|
| 304 |
if isinstance(self.log_zero_guard_value, str):
|
| 305 |
if self.log_zero_guard_value == "tiny":
|
| 306 |
self.log_zero_guard_value = torch.finfo(torch.float32).tiny
|
|
|
|
| 344 |
|
| 345 |
elif audio_tar_filepaths is not None and audio_tar_index_filepaths is not None:
|
| 346 |
audio_tar_filepaths = expand_sharded_filepaths(
|
| 347 |
+
audio_tar_filepaths,
|
| 348 |
+
shard_strategy=shard_strategy,
|
| 349 |
+
world_size=world_size,
|
| 350 |
+
global_rank=global_rank,
|
| 351 |
)
|
| 352 |
+
|
| 353 |
audio_tar_index_filepaths = expand_sharded_filepaths(
|
| 354 |
audio_tar_index_filepaths,
|
| 355 |
shard_strategy=shard_strategy,
|
|
|
|
| 376 |
pad_last_batch=True,
|
| 377 |
)
|
| 378 |
audio, _ = dali.fn.decoders.audio(
|
| 379 |
+
tar_file,
|
| 380 |
+
dtype=dali.types.FLOAT,
|
| 381 |
+
downmix=True,
|
| 382 |
+
sample_rate=float(self.sample_rate),
|
| 383 |
)
|
| 384 |
indices = dali.fn.get_property(tar_file, key="source_info")
|
| 385 |
indices = dali.fn.pad(indices)
|
|
|
|
| 451 |
)
|
| 452 |
|
| 453 |
# Normalization
|
| 454 |
+
spec = dali.fn.normalize(spec, axes=self.normalization_axes, epsilon=1e-5**2, ddof=1)
|
| 455 |
|
| 456 |
# Extracting the length of the spectrogram
|
| 457 |
spec_len = dali.fn.slice(dali.fn.shapes(spec), 1, 1, axes=(0,))
|
nemo/collections/asr/data/audio_to_text_dataset.py
CHANGED
|
@@ -18,7 +18,9 @@ import random
|
|
| 18 |
from math import isclose
|
| 19 |
from typing import Any, List, Optional, Union
|
| 20 |
|
|
|
|
| 21 |
import torch
|
|
|
|
| 22 |
from lightning.pytorch.callbacks import BasePredictionWriter
|
| 23 |
from omegaconf import DictConfig, OmegaConf, open_dict
|
| 24 |
from omegaconf.listconfig import ListConfig
|
|
@@ -29,8 +31,9 @@ from nemo.collections.asr.data.huggingface.hf_audio_to_text_dataset import (
|
|
| 29 |
get_hf_audio_to_text_bpe_dataset,
|
| 30 |
get_hf_audio_to_text_char_dataset,
|
| 31 |
)
|
| 32 |
-
from nemo.collections.asr.parts.preprocessing.perturb import process_augmentations
|
| 33 |
from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset
|
|
|
|
| 34 |
from nemo.utils import logging
|
| 35 |
|
| 36 |
|
|
@@ -94,7 +97,7 @@ def get_concat_char_dataset(
|
|
| 94 |
An instance of ConcatDataset containing one or more instances of AudioToCharDataset.
|
| 95 |
"""
|
| 96 |
if 'labels' not in config:
|
| 97 |
-
logging.warning(
|
| 98 |
|
| 99 |
manifest_filepaths = config['manifest_filepath']
|
| 100 |
datasets = []
|
|
@@ -138,7 +141,7 @@ def get_char_dataset(config: dict, augmentor: Optional['AudioAugmentor'] = None)
|
|
| 138 |
An instance of AudioToCharDataset.
|
| 139 |
"""
|
| 140 |
if 'labels' not in config:
|
| 141 |
-
logging.warning(
|
| 142 |
|
| 143 |
dataset = audio_to_text.AudioToCharDataset(
|
| 144 |
manifest_filepath=config['manifest_filepath'],
|
|
@@ -332,7 +335,7 @@ def get_tarred_dataset(
|
|
| 332 |
if bucketing_weights:
|
| 333 |
for idx, weight in enumerate(bucketing_weights):
|
| 334 |
if not isinstance(weight, int) or weight <= 0:
|
| 335 |
-
raise ValueError(
|
| 336 |
|
| 337 |
if len(manifest_filepaths) != len(tarred_audio_filepaths):
|
| 338 |
raise ValueError(
|
|
@@ -340,10 +343,10 @@ def get_tarred_dataset(
|
|
| 340 |
)
|
| 341 |
|
| 342 |
if 'labels' not in config:
|
| 343 |
-
logging.warning(
|
| 344 |
|
| 345 |
if 'max_utts' in config:
|
| 346 |
-
|
| 347 |
|
| 348 |
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
|
| 349 |
zip(tarred_audio_filepaths, manifest_filepaths)
|
|
@@ -861,7 +864,7 @@ class ASRPredictionWriter(BasePredictionWriter):
|
|
| 861 |
):
|
| 862 |
import lhotse
|
| 863 |
|
| 864 |
-
for sample_id,
|
| 865 |
item = {}
|
| 866 |
if isinstance(sample_id, lhotse.cut.Cut):
|
| 867 |
sample = sample_id
|
|
@@ -876,18 +879,29 @@ class ASRPredictionWriter(BasePredictionWriter):
|
|
| 876 |
item["text"] = sample.supervisions[0].text or ''
|
| 877 |
if hasattr(sample, 'shard_id'):
|
| 878 |
item["shard_id"] = sample.shard_id
|
| 879 |
-
item["pred_text"] =
|
| 880 |
-
|
| 881 |
-
self.samples_num += 1
|
| 882 |
else:
|
| 883 |
sample = self.dataset.get_manifest_sample(sample_id)
|
| 884 |
item["audio_filepath"] = sample.audio_file
|
| 885 |
item["offset"] = sample.offset
|
| 886 |
item["duration"] = sample.duration
|
| 887 |
item["text"] = sample.text_raw
|
| 888 |
-
item["pred_text"] =
|
| 889 |
-
|
| 890 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 891 |
return
|
| 892 |
|
| 893 |
def close_output_file(self):
|
|
|
|
| 18 |
from math import isclose
|
| 19 |
from typing import Any, List, Optional, Union
|
| 20 |
|
| 21 |
+
import numpy as np
|
| 22 |
import torch
|
| 23 |
+
from lightning.pytorch import LightningModule
|
| 24 |
from lightning.pytorch.callbacks import BasePredictionWriter
|
| 25 |
from omegaconf import DictConfig, OmegaConf, open_dict
|
| 26 |
from omegaconf.listconfig import ListConfig
|
|
|
|
| 31 |
get_hf_audio_to_text_bpe_dataset,
|
| 32 |
get_hf_audio_to_text_char_dataset,
|
| 33 |
)
|
| 34 |
+
from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor, process_augmentations
|
| 35 |
from nemo.collections.common.data.dataset import CodeSwitchedDataset, ConcatDataset
|
| 36 |
+
from nemo.collections.common.tokenizers import TokenizerSpec
|
| 37 |
from nemo.utils import logging
|
| 38 |
|
| 39 |
|
|
|
|
| 97 |
An instance of ConcatDataset containing one or more instances of AudioToCharDataset.
|
| 98 |
"""
|
| 99 |
if 'labels' not in config:
|
| 100 |
+
logging.warning("dataset does not have explicitly defined labels")
|
| 101 |
|
| 102 |
manifest_filepaths = config['manifest_filepath']
|
| 103 |
datasets = []
|
|
|
|
| 141 |
An instance of AudioToCharDataset.
|
| 142 |
"""
|
| 143 |
if 'labels' not in config:
|
| 144 |
+
logging.warning("dataset does not have explicitly defined labels")
|
| 145 |
|
| 146 |
dataset = audio_to_text.AudioToCharDataset(
|
| 147 |
manifest_filepath=config['manifest_filepath'],
|
|
|
|
| 335 |
if bucketing_weights:
|
| 336 |
for idx, weight in enumerate(bucketing_weights):
|
| 337 |
if not isinstance(weight, int) or weight <= 0:
|
| 338 |
+
raise ValueError("bucket weights must be positive integers")
|
| 339 |
|
| 340 |
if len(manifest_filepaths) != len(tarred_audio_filepaths):
|
| 341 |
raise ValueError(
|
|
|
|
| 343 |
)
|
| 344 |
|
| 345 |
if 'labels' not in config:
|
| 346 |
+
logging.warning("dataset does not have explicitly defined labels")
|
| 347 |
|
| 348 |
if 'max_utts' in config:
|
| 349 |
+
logging.warning('"max_utts" parameter is not supported for tarred datasets')
|
| 350 |
|
| 351 |
for dataset_idx, (tarred_audio_filepath, manifest_filepath) in enumerate(
|
| 352 |
zip(tarred_audio_filepaths, manifest_filepaths)
|
|
|
|
| 864 |
):
|
| 865 |
import lhotse
|
| 866 |
|
| 867 |
+
for sample_id, hypotheses in prediction:
|
| 868 |
item = {}
|
| 869 |
if isinstance(sample_id, lhotse.cut.Cut):
|
| 870 |
sample = sample_id
|
|
|
|
| 879 |
item["text"] = sample.supervisions[0].text or ''
|
| 880 |
if hasattr(sample, 'shard_id'):
|
| 881 |
item["shard_id"] = sample.shard_id
|
| 882 |
+
item["pred_text"] = hypotheses.text
|
| 883 |
+
|
|
|
|
| 884 |
else:
|
| 885 |
sample = self.dataset.get_manifest_sample(sample_id)
|
| 886 |
item["audio_filepath"] = sample.audio_file
|
| 887 |
item["offset"] = sample.offset
|
| 888 |
item["duration"] = sample.duration
|
| 889 |
item["text"] = sample.text_raw
|
| 890 |
+
item["pred_text"] = hypotheses.text
|
| 891 |
+
|
| 892 |
+
if hasattr(hypotheses, "timestamp") and isinstance(hypotheses.timestamp, dict):
|
| 893 |
+
for timestamp_type, timestamps in hypotheses.timestamp.items():
|
| 894 |
+
if timestamp_type in ['char', 'word', 'segment']:
|
| 895 |
+
item[f'{timestamp_type}_timestamps'] = [
|
| 896 |
+
{
|
| 897 |
+
key: int(value) if isinstance(value, np.int64) else value
|
| 898 |
+
for key, value in offset.items()
|
| 899 |
+
}
|
| 900 |
+
for offset in timestamps
|
| 901 |
+
]
|
| 902 |
+
|
| 903 |
+
self.outf.write(json.dumps(item) + "\n")
|
| 904 |
+
self.samples_num += 1
|
| 905 |
return
|
| 906 |
|
| 907 |
def close_output_file(self):
|
nemo/collections/asr/data/audio_to_text_lhotse.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -12,6 +12,7 @@
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
|
|
|
| 15 |
from typing import Dict, Optional, Tuple
|
| 16 |
|
| 17 |
import torch.utils.data
|
|
@@ -31,6 +32,11 @@ class LhotseSpeechToTextBpeDataset(torch.utils.data.Dataset):
|
|
| 31 |
Specifically, it performs tokenization, I/O, augmentation, and feature extraction (if any).
|
| 32 |
Managing data, sampling, de-duplication across workers/nodes etc. is all handled
|
| 33 |
by Lhotse samplers instead.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
"""
|
| 35 |
|
| 36 |
@property
|
|
@@ -46,7 +52,22 @@ class LhotseSpeechToTextBpeDataset(torch.utils.data.Dataset):
|
|
| 46 |
def __init__(self, tokenizer: TokenizerSpec, return_cuts: bool = False):
|
| 47 |
super().__init__()
|
| 48 |
self.tokenizer = TokenizerWrapper(tokenizer)
|
| 49 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
self.return_cuts = return_cuts
|
| 51 |
|
| 52 |
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
|
| 15 |
+
import os
|
| 16 |
from typing import Dict, Optional, Tuple
|
| 17 |
|
| 18 |
import torch.utils.data
|
|
|
|
| 32 |
Specifically, it performs tokenization, I/O, augmentation, and feature extraction (if any).
|
| 33 |
Managing data, sampling, de-duplication across workers/nodes etc. is all handled
|
| 34 |
by Lhotse samplers instead.
|
| 35 |
+
|
| 36 |
+
NOTE:
|
| 37 |
+
If the environment variable ``USE_AIS_GET_BATCH`` is set to ``true`` (case-insensitive),
|
| 38 |
+
then batch audio loading from AIStore will be enabled for this dataset. This will use the
|
| 39 |
+
AISBatchLoader to load the audio from AIStore. This can improve data loading efficiency in some setups.
|
| 40 |
"""
|
| 41 |
|
| 42 |
@property
|
|
|
|
| 52 |
def __init__(self, tokenizer: TokenizerSpec, return_cuts: bool = False):
|
| 53 |
super().__init__()
|
| 54 |
self.tokenizer = TokenizerWrapper(tokenizer)
|
| 55 |
+
self.use_ais_get_batch = os.environ.get("USE_AIS_GET_BATCH", "False").lower() == "true"
|
| 56 |
+
|
| 57 |
+
# Try to use use_batch_loader if available (Lhotse >= 1.32.0)
|
| 58 |
+
try:
|
| 59 |
+
self.load_audio = AudioSamples(fault_tolerant=True, use_batch_loader=self.use_ais_get_batch)
|
| 60 |
+
except TypeError:
|
| 61 |
+
# Lhotse < 1.32.0 doesn't support use_batch_loader
|
| 62 |
+
if self.use_ais_get_batch:
|
| 63 |
+
import logging
|
| 64 |
+
|
| 65 |
+
logging.warning(
|
| 66 |
+
"AIS batch loading requested but not supported by this Lhotse version. "
|
| 67 |
+
"Please upgrade to Lhotse >= 1.32.0"
|
| 68 |
+
)
|
| 69 |
+
self.load_audio = AudioSamples(fault_tolerant=True)
|
| 70 |
+
|
| 71 |
self.return_cuts = return_cuts
|
| 72 |
|
| 73 |
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
nemo/collections/asr/data/audio_to_text_lhotse_prompt.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import math
|
| 15 |
+
from typing import Dict, Optional, Tuple
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import torch
|
| 19 |
+
import torch.utils.data
|
| 20 |
+
from lhotse.dataset import AudioSamples
|
| 21 |
+
from lhotse.dataset.collation import collate_matrices, collate_vectors
|
| 22 |
+
|
| 23 |
+
from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer
|
| 24 |
+
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
|
| 25 |
+
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LhotseSpeechToTextBpeDatasetWithPrompt(torch.utils.data.Dataset):
|
| 29 |
+
"""
|
| 30 |
+
Dataset class for speech-to-text with prompt vectors.
|
| 31 |
+
Supports both language ID and custom prompts.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def output_types(self) -> Optional[Dict[str, NeuralType]]:
|
| 36 |
+
return {
|
| 37 |
+
'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
|
| 38 |
+
'audio_signal_length': NeuralType(tuple('B'), LengthsType()),
|
| 39 |
+
'transcripts': NeuralType(('B', 'T'), LabelsType()),
|
| 40 |
+
'transcript_length': NeuralType(tuple('B'), LengthsType()),
|
| 41 |
+
'prompt': NeuralType(('B', 'T', 'D'), LabelsType()),
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
def __init__(self, tokenizer, cfg):
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.tokenizer = TokenizerWrapper(tokenizer)
|
| 47 |
+
self.load_audio = AudioSamples(fault_tolerant=True)
|
| 48 |
+
self.cfg = cfg
|
| 49 |
+
|
| 50 |
+
# Calculate num_sample_per_mel_frame from config
|
| 51 |
+
sample_rate = cfg.get('sample_rate', 16000)
|
| 52 |
+
window_stride = cfg.get('window_stride', 0.01)
|
| 53 |
+
self.num_sample_per_mel_frame = int(sample_rate * window_stride)
|
| 54 |
+
|
| 55 |
+
self.subsampling_factor = cfg.get('subsampling_factor', 8)
|
| 56 |
+
|
| 57 |
+
# Load prompt dictionary from config if provided
|
| 58 |
+
self.prompt_dict = cfg.get('prompt_dictionary')
|
| 59 |
+
if self.prompt_dict:
|
| 60 |
+
# Set num_prompts based on the length of prompt_dictionary or a minimum value
|
| 61 |
+
# This ensures we have enough dimensions in our embedding space to add scale up without changing the model
|
| 62 |
+
self.num_prompts = cfg.get('num_prompts', 128)
|
| 63 |
+
|
| 64 |
+
# Field to use for prompt key (default to 'language')
|
| 65 |
+
self.prompt_field = cfg.get('prompt_field', 'language')
|
| 66 |
+
|
| 67 |
+
def _get_prompt_index(self, prompt_key: str) -> int:
|
| 68 |
+
"""
|
| 69 |
+
Maps prompt keys to indices using the prompt dictionary.
|
| 70 |
+
"""
|
| 71 |
+
if not self.prompt_dict:
|
| 72 |
+
raise ValueError("Prompt dictionary is empty. Please provide a valid prompt_dictionary in the config.")
|
| 73 |
+
|
| 74 |
+
if prompt_key not in self.prompt_dict:
|
| 75 |
+
available_keys = list(self.prompt_dict.keys())
|
| 76 |
+
raise ValueError(
|
| 77 |
+
f"Unknown prompt key: '{prompt_key}'. Available prompts: {available_keys[:10]}{'...' if len(available_keys) > 10 else ''}"
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
return self.prompt_dict[prompt_key]
|
| 81 |
+
|
| 82 |
+
def prompt_to_target(self, cut, num_prompts: int, window_stride: int, subsampling_factor: int):
|
| 83 |
+
"""
|
| 84 |
+
Create prompt target tensor for the sequence.
|
| 85 |
+
"""
|
| 86 |
+
# Calculate encoder output length based on subsampling factor
|
| 87 |
+
encoder_hidden_len = self.get_hidden_length_from_sample_length(cut.num_samples)
|
| 88 |
+
|
| 89 |
+
# Initialize prompt target matrix
|
| 90 |
+
mask = np.zeros((num_prompts, encoder_hidden_len))
|
| 91 |
+
|
| 92 |
+
# Get prompt index - default to language if prompt not specified
|
| 93 |
+
# revise supervisions to include prompt key
|
| 94 |
+
# prompt_key = getattr(cut.supervisions[0].custom_fields, cut.supervisions[0].language)cut.supervisions[0].custom_fields,
|
| 95 |
+
prompt_id = self._get_prompt_index(cut.supervisions[0].language)
|
| 96 |
+
|
| 97 |
+
# Set the corresponding prompt ID to 1 for all time steps
|
| 98 |
+
mask[prompt_id, :] = 1
|
| 99 |
+
|
| 100 |
+
return mask
|
| 101 |
+
|
| 102 |
+
def get_hidden_length_from_sample_length(self, num_samples: int) -> int:
|
| 103 |
+
"""
|
| 104 |
+
Calculate the hidden length from the given number of samples.
|
| 105 |
+
|
| 106 |
+
Parameters:
|
| 107 |
+
num_samples (int): The total number of audio samples.
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
hidden_length (int): The calculated hidden length in terms of the number of frames.
|
| 111 |
+
"""
|
| 112 |
+
mel_frame_count = math.ceil((num_samples + 1) / self.num_sample_per_mel_frame)
|
| 113 |
+
hidden_length = math.ceil(mel_frame_count / self.subsampling_factor)
|
| 114 |
+
return int(hidden_length)
|
| 115 |
+
|
| 116 |
+
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
| 117 |
+
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 118 |
+
tokens = [torch.as_tensor(self.tokenizer(c.supervisions[0].text, c.supervisions[0].language)) for c in cuts]
|
| 119 |
+
|
| 120 |
+
# Create prompt targets
|
| 121 |
+
prompt_targets = [
|
| 122 |
+
torch.transpose(
|
| 123 |
+
torch.as_tensor(
|
| 124 |
+
self.prompt_to_target(
|
| 125 |
+
c,
|
| 126 |
+
self.num_prompts,
|
| 127 |
+
self.num_sample_per_mel_frame,
|
| 128 |
+
self.subsampling_factor,
|
| 129 |
+
),
|
| 130 |
+
dtype=torch.float32,
|
| 131 |
+
),
|
| 132 |
+
0,
|
| 133 |
+
1,
|
| 134 |
+
)
|
| 135 |
+
for c in cuts
|
| 136 |
+
]
|
| 137 |
+
|
| 138 |
+
# Create final tensors
|
| 139 |
+
token_lens = torch.tensor([t.size(0) for t in tokens], dtype=torch.long)
|
| 140 |
+
tokens = collate_vectors(tokens, padding_value=0)
|
| 141 |
+
prompt_targets = collate_matrices(prompt_targets)
|
| 142 |
+
|
| 143 |
+
return (
|
| 144 |
+
audio, # Audio signal
|
| 145 |
+
audio_lens, # Audio lengths
|
| 146 |
+
tokens, # Text tokens
|
| 147 |
+
token_lens, # Token lengths
|
| 148 |
+
prompt_targets, # Prompt targets
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class TokenizerWrapper:
|
| 153 |
+
"""
|
| 154 |
+
Provide a unified interface for NeMo Tokenizer, AggregateTokenizer, and (char) Parser.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
def __init__(self, tokenizer):
|
| 158 |
+
self._tokenizer = tokenizer
|
| 159 |
+
if isinstance(tokenizer, AggregateTokenizer):
|
| 160 |
+
self._impl = self._call_agg_tokenizer
|
| 161 |
+
elif isinstance(tokenizer, TokenizerSpec):
|
| 162 |
+
self._impl = self._call_tokenizer
|
| 163 |
+
else:
|
| 164 |
+
self._impl = self._call_parser
|
| 165 |
+
|
| 166 |
+
def __call__(self, text: str, lang: Optional[str] = None):
|
| 167 |
+
return self._impl(text, lang)
|
| 168 |
+
|
| 169 |
+
def _call_agg_tokenizer(self, text: str, lang: Optional[str] = None):
|
| 170 |
+
assert lang is not None, "Expected 'lang' to be set for AggregateTokenizer."
|
| 171 |
+
return self._tokenizer.text_to_ids(text, lang)
|
| 172 |
+
|
| 173 |
+
def _call_tokenizer(self, text: str, lang: Optional[str] = None):
|
| 174 |
+
return self._tokenizer.text_to_ids(text)
|
| 175 |
+
|
| 176 |
+
def _call_parser(self, text: str, lang: Optional[str] = None):
|
| 177 |
+
return self._tokenizer(text)
|
nemo/collections/asr/data/audio_to_text_lhotse_prompted.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -11,8 +11,9 @@
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
|
|
|
| 14 |
from dataclasses import dataclass
|
| 15 |
-
from typing import
|
| 16 |
|
| 17 |
import torch.utils.data
|
| 18 |
from lhotse import CutSet
|
|
@@ -21,7 +22,7 @@ from lhotse.dataset import AudioSamples
|
|
| 21 |
from lhotse.dataset.collation import collate_vectors
|
| 22 |
|
| 23 |
from nemo.collections.common.data import apply_prompt_format_fn
|
| 24 |
-
from nemo.collections.common.prompts import
|
| 25 |
from nemo.collections.common.tokenizers import TokenizerSpec
|
| 26 |
|
| 27 |
|
|
@@ -61,23 +62,68 @@ class PromptedAudioToTextLhotseDataset(torch.utils.data.Dataset):
|
|
| 61 |
Tokenized utterances will be extended with special prompt tokens according to ``prompt_format_fn`` logic.
|
| 62 |
We support cuts with multiple supervision segments -- their tokenized texts will be concatenated before we add the prompt tokens.
|
| 63 |
This is useful, for example, in code-switched scenarios where each segment is spoken in a different language.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
"""
|
| 65 |
|
| 66 |
def __init__(
|
| 67 |
self,
|
| 68 |
tokenizer: TokenizerSpec,
|
| 69 |
prompt: PromptFormatter,
|
|
|
|
| 70 |
):
|
| 71 |
super().__init__()
|
| 72 |
self.tokenizer = tokenizer
|
| 73 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
self.padding_value = self.tokenizer.pad_id
|
| 75 |
self.prompt = prompt
|
|
|
|
| 76 |
|
| 77 |
def __getitem__(self, cuts: CutSet) -> PromptedAudioToTextMiniBatch:
|
|
|
|
| 78 |
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 79 |
|
| 80 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
attrs = ("input_ids", "context_ids", "answer_ids")
|
| 82 |
pre_formatted = all(hasattr(c, a) for c in cuts for a in attrs)
|
| 83 |
if pre_formatted:
|
|
@@ -110,6 +156,93 @@ class PromptedAudioToTextLhotseDataset(torch.utils.data.Dataset):
|
|
| 110 |
tokens = collate_vectors(tokens, padding_value=self.padding_value)
|
| 111 |
return tokens, token_lens
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
|
| 114 |
class ProbablyIncorrectLanguageKeyError(RuntimeError):
|
| 115 |
pass
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 11 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
# See the License for the specific language governing permissions and
|
| 13 |
# limitations under the License.
|
| 14 |
+
import os
|
| 15 |
from dataclasses import dataclass
|
| 16 |
+
from typing import Optional, Union
|
| 17 |
|
| 18 |
import torch.utils.data
|
| 19 |
from lhotse import CutSet
|
|
|
|
| 22 |
from lhotse.dataset.collation import collate_vectors
|
| 23 |
|
| 24 |
from nemo.collections.common.data import apply_prompt_format_fn
|
| 25 |
+
from nemo.collections.common.prompts import PromptFormatter
|
| 26 |
from nemo.collections.common.tokenizers import TokenizerSpec
|
| 27 |
|
| 28 |
|
|
|
|
| 62 |
Tokenized utterances will be extended with special prompt tokens according to ``prompt_format_fn`` logic.
|
| 63 |
We support cuts with multiple supervision segments -- their tokenized texts will be concatenated before we add the prompt tokens.
|
| 64 |
This is useful, for example, in code-switched scenarios where each segment is spoken in a different language.
|
| 65 |
+
|
| 66 |
+
Chunking:
|
| 67 |
+
If `enable_chunking` is True, each audio sample is split into optimally sized chunks
|
| 68 |
+
(see `find_optimal_chunk_size` and `chunk_waveform`). This is useful for long audio inputs,
|
| 69 |
+
allowing the model to process them in manageable segments.
|
| 70 |
+
|
| 71 |
+
NOTE:
|
| 72 |
+
If the environment variable `USE_AIS_GET_BATCH` is set to `true` (case-insensitive),
|
| 73 |
+
then batch audio loading from AIStore will be enabled for this dataset. This will use the
|
| 74 |
+
AISBatchLoader to load the audio from AIStore. This can improve data loading efficiency in some setups.
|
| 75 |
"""
|
| 76 |
|
| 77 |
def __init__(
|
| 78 |
self,
|
| 79 |
tokenizer: TokenizerSpec,
|
| 80 |
prompt: PromptFormatter,
|
| 81 |
+
enable_chunking: bool = False,
|
| 82 |
):
|
| 83 |
super().__init__()
|
| 84 |
self.tokenizer = tokenizer
|
| 85 |
+
self.use_ais_get_batch = os.environ.get("USE_AIS_GET_BATCH", "False").lower() == "true"
|
| 86 |
+
|
| 87 |
+
# Try to use use_batch_loader if available (Lhotse >= 1.32.0)
|
| 88 |
+
try:
|
| 89 |
+
self.load_audio = AudioSamples(fault_tolerant=True, use_batch_loader=self.use_ais_get_batch)
|
| 90 |
+
except TypeError:
|
| 91 |
+
# Lhotse < 1.32.0 doesn't support use_batch_loader
|
| 92 |
+
if self.use_ais_get_batch:
|
| 93 |
+
import logging
|
| 94 |
+
|
| 95 |
+
logging.warning(
|
| 96 |
+
"AIS batch loading requested but not supported by this Lhotse version. "
|
| 97 |
+
"Please upgrade to Lhotse >= 1.32.0"
|
| 98 |
+
)
|
| 99 |
+
self.load_audio = AudioSamples(fault_tolerant=True)
|
| 100 |
+
|
| 101 |
self.padding_value = self.tokenizer.pad_id
|
| 102 |
self.prompt = prompt
|
| 103 |
+
self.enable_chunking = enable_chunking
|
| 104 |
|
| 105 |
def __getitem__(self, cuts: CutSet) -> PromptedAudioToTextMiniBatch:
|
| 106 |
+
# Load the audio's from AIS and add them to the CutSet
|
| 107 |
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 108 |
|
| 109 |
+
# Will work if batch_size is set to 1.
|
| 110 |
+
if self.enable_chunking:
|
| 111 |
+
# If dynamic chunking is enabled, split each audio sample into chunks.
|
| 112 |
+
new_audio = []
|
| 113 |
+
new_audio_lens = []
|
| 114 |
+
for i in range(audio.shape[0]):
|
| 115 |
+
waveform = audio[i, : audio_lens[i]]
|
| 116 |
+
# Split the waveform into chunks and get their lengths.
|
| 117 |
+
chunks, chunk_lens = self._chunk_waveform(waveform)
|
| 118 |
+
new_audio.extend(chunks)
|
| 119 |
+
new_audio_lens.extend(chunk_lens)
|
| 120 |
+
# Stack all chunks into a batch.
|
| 121 |
+
audio = torch.stack(new_audio)
|
| 122 |
+
audio_lens = torch.tensor(new_audio_lens, dtype=torch.long)
|
| 123 |
+
# Adding this to allow gathering results of the same audio from different batches
|
| 124 |
+
if cuts[0].start != 0:
|
| 125 |
+
cuts[0].id = cuts[0].id + '_cut_segmented'
|
| 126 |
+
# Fast-path: the tokenization and prompt format ting was already done before sampling.
|
| 127 |
attrs = ("input_ids", "context_ids", "answer_ids")
|
| 128 |
pre_formatted = all(hasattr(c, a) for c in cuts for a in attrs)
|
| 129 |
if pre_formatted:
|
|
|
|
| 156 |
tokens = collate_vectors(tokens, padding_value=self.padding_value)
|
| 157 |
return tokens, token_lens
|
| 158 |
|
| 159 |
+
def _find_optimal_chunk_size(
|
| 160 |
+
self, total_len: int, min_sec: int = 30, max_sec: int = 40, sample_rate: int = 16000, overlap_sec: float = 1.0
|
| 161 |
+
) -> int:
|
| 162 |
+
"""
|
| 163 |
+
Find the optimal chunk size for audio processing that minimizes paddings to the last chunk.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
total_len (int): Total length of the audio waveform in samples
|
| 167 |
+
min_sec (int, optional): Minimum chunk size in seconds. Defaults to 30.
|
| 168 |
+
max_sec (int, optional): Maximum chunk size in seconds. Defaults to 40.
|
| 169 |
+
sample_rate (int, optional): Audio sample rate in Hz. Defaults to 16000.
|
| 170 |
+
overlap_sec (float, optional): Overlap duration between consecutive chunks in seconds.
|
| 171 |
+
Defaults to 1.0.
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
int: Optimal chunk size in samples that maximizes the last chunk length
|
| 175 |
+
"""
|
| 176 |
+
best_chunk_size = min_sec * sample_rate
|
| 177 |
+
best_last_chunk_len = 0
|
| 178 |
+
if total_len < max_sec * sample_rate:
|
| 179 |
+
return total_len
|
| 180 |
+
# Try each possible chunk duration in the range
|
| 181 |
+
for sec in range(min_sec, max_sec + 1):
|
| 182 |
+
chunk_size = sec * sample_rate
|
| 183 |
+
overlap_size = int(overlap_sec * sample_rate)
|
| 184 |
+
step_size = chunk_size - overlap_size
|
| 185 |
+
|
| 186 |
+
if step_size <= 0: # Invalid overlap
|
| 187 |
+
continue
|
| 188 |
+
if chunk_size > total_len:
|
| 189 |
+
continue
|
| 190 |
+
|
| 191 |
+
# Calculate how many chunks we'd need and the last chunk's length
|
| 192 |
+
n_chunks = (total_len + step_size - 1) // step_size
|
| 193 |
+
last_chunk_len = total_len - step_size * (n_chunks - 1)
|
| 194 |
+
|
| 195 |
+
if last_chunk_len > best_last_chunk_len:
|
| 196 |
+
best_last_chunk_len = last_chunk_len
|
| 197 |
+
best_chunk_size = chunk_size
|
| 198 |
+
|
| 199 |
+
return best_chunk_size
|
| 200 |
+
|
| 201 |
+
def _chunk_waveform(
|
| 202 |
+
self, waveform: torch.Tensor, chunk_size: int = None, overlap_sec: float = 1.0, sample_rate: int = 16000
|
| 203 |
+
) -> tuple[list[torch.Tensor], list[int]]:
|
| 204 |
+
"""
|
| 205 |
+
Split a waveform tensor into overlapping chunks.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
waveform (torch.Tensor): Input audio waveform tensor of shape (time_samples,)
|
| 209 |
+
chunk_size (int, optional): Size of each chunk in samples. If None, automatically
|
| 210 |
+
determines optimal chunk size using find_optimal_chunk_size().
|
| 211 |
+
Defaults to None.
|
| 212 |
+
sample_rate (int, optional): Audio sample rate in Hz. Defaults to 16000.
|
| 213 |
+
overlap_sec (float, optional): Overlap duration between consecutive chunks in seconds.
|
| 214 |
+
Used to calculate step size. Defaults to 2.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
tuple[list[torch.Tensor], list[int]]: A tuple containing:
|
| 218 |
+
- List of chunk tensors, each of shape (chunk_size,)
|
| 219 |
+
- List of original lengths for each chunk before padding (useful for masking
|
| 220 |
+
padded regions during processing.
|
| 221 |
+
"""
|
| 222 |
+
# If chunk_size is None, find the optimal chunk size for this waveform
|
| 223 |
+
total_len = waveform.shape[0]
|
| 224 |
+
if chunk_size is None:
|
| 225 |
+
chunk_size = self._find_optimal_chunk_size(total_len, overlap_sec=overlap_sec)
|
| 226 |
+
if chunk_size >= total_len:
|
| 227 |
+
return [waveform], [total_len]
|
| 228 |
+
overlap_size = int(overlap_sec * sample_rate)
|
| 229 |
+
step_size = chunk_size - overlap_size
|
| 230 |
+
chunks = []
|
| 231 |
+
chunk_lens = []
|
| 232 |
+
start = 0
|
| 233 |
+
while start + overlap_size < total_len:
|
| 234 |
+
end = min(start + chunk_size, total_len)
|
| 235 |
+
chunk = waveform[start:end]
|
| 236 |
+
length = chunk.shape[0]
|
| 237 |
+
if length < chunk_size:
|
| 238 |
+
pad = torch.zeros(chunk_size - length, dtype=chunk.dtype, device=chunk.device)
|
| 239 |
+
chunk = torch.cat([chunk, pad], dim=0)
|
| 240 |
+
chunks.append(chunk)
|
| 241 |
+
chunk_lens.append(length)
|
| 242 |
+
start += step_size
|
| 243 |
+
|
| 244 |
+
return chunks, chunk_lens
|
| 245 |
+
|
| 246 |
|
| 247 |
class ProbablyIncorrectLanguageKeyError(RuntimeError):
|
| 248 |
pass
|
nemo/collections/asr/data/audio_to_text_lhotse_speaker.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import random
|
| 16 |
+
from typing import Dict, Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import torch.utils.data
|
| 19 |
+
from lhotse.dataset import AudioSamples
|
| 20 |
+
from lhotse.dataset.collation import collate_vectors
|
| 21 |
+
|
| 22 |
+
from nemo.collections.asr.data.audio_to_text_lhotse import TokenizerWrapper
|
| 23 |
+
from nemo.collections.asr.parts.utils.asr_multispeaker_utils import speaker_to_target
|
| 24 |
+
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
|
| 25 |
+
from nemo.core.neural_types import AudioSignal, LabelsType, LengthsType, NeuralType
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class LhotseSpeechToTextSpkBpeDataset(torch.utils.data.Dataset):
|
| 29 |
+
"""
|
| 30 |
+
This dataset is based on BPE datasets from audio_to_text.py. It has the same functionality of LhotseSpeechToTextBpeDataset but also yield speaker target tensor.
|
| 31 |
+
Unlike native NeMo datasets, Lhotse dataset defines only the mapping from
|
| 32 |
+
a CutSet (meta-data) to a mini-batch with PyTorch tensors.
|
| 33 |
+
Specifically, it performs tokenization, I/O, augmentation, and feature extraction (if any).
|
| 34 |
+
Managing data, sampling, de-duplication across workers/nodes etc. is all handled
|
| 35 |
+
by Lhotse samplers instead.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
@property
|
| 39 |
+
def output_types(self) -> Optional[Dict[str, NeuralType]]:
|
| 40 |
+
return {
|
| 41 |
+
'audio_signal': NeuralType(('B', 'T'), AudioSignal()),
|
| 42 |
+
'a_sig_length': NeuralType(tuple('B'), LengthsType()),
|
| 43 |
+
'transcripts': NeuralType(('B', 'T'), LabelsType()),
|
| 44 |
+
'transcript_length': NeuralType(tuple('B'), LengthsType()),
|
| 45 |
+
'spk_targets': NeuralType(('B', 'T'), LabelsType()),
|
| 46 |
+
'bg_spk_targets': NeuralType(('B', 'T'), LabelsType()),
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
def __init__(self, cfg, tokenizer: TokenizerSpec):
|
| 50 |
+
super().__init__()
|
| 51 |
+
self.tokenizer = TokenizerWrapper(tokenizer)
|
| 52 |
+
self.load_audio = AudioSamples(fault_tolerant=True)
|
| 53 |
+
self.cfg = cfg
|
| 54 |
+
self.num_speakers = self.cfg.get('num_speakers', 4)
|
| 55 |
+
self.num_sample_per_mel_frame = self.cfg.get('num_sample_per_mel_frame', 160)
|
| 56 |
+
self.num_mel_frame_per_asr_frame = self.cfg.get('num_mel_frame_per_asr_frame', 8)
|
| 57 |
+
self.fixed_spk_id = self.cfg.get('fixed_spk_id', None)
|
| 58 |
+
self.inference_mode = self.cfg.get('inference_mode', False)
|
| 59 |
+
|
| 60 |
+
def __getitem__(self, cuts) -> Tuple[torch.Tensor, ...]:
|
| 61 |
+
|
| 62 |
+
audio, audio_lens, cuts = self.load_audio(cuts)
|
| 63 |
+
|
| 64 |
+
tokens = []
|
| 65 |
+
spk_targets = []
|
| 66 |
+
bg_spk_targets = []
|
| 67 |
+
|
| 68 |
+
if self.inference_mode:
|
| 69 |
+
return audio, audio_lens, None, None, None, None
|
| 70 |
+
|
| 71 |
+
for idx, cut in enumerate(cuts):
|
| 72 |
+
|
| 73 |
+
speaker_targets, texts = speaker_to_target(
|
| 74 |
+
a_cut=cut,
|
| 75 |
+
num_speakers=self.num_speakers,
|
| 76 |
+
num_sample_per_mel_frame=self.num_sample_per_mel_frame,
|
| 77 |
+
num_mel_frame_per_asr_frame=self.num_mel_frame_per_asr_frame,
|
| 78 |
+
return_text=True,
|
| 79 |
+
)
|
| 80 |
+
speaker_targets = speaker_targets.transpose(0, 1)[: len(texts)]
|
| 81 |
+
|
| 82 |
+
target_speaker_id = random.choice(range(len(texts)))
|
| 83 |
+
non_target_speaker_ids = [i for i in range(len(texts)) if i != target_speaker_id]
|
| 84 |
+
text = texts[target_speaker_id]
|
| 85 |
+
speaker_target = speaker_targets[target_speaker_id]
|
| 86 |
+
bg_speaker_target = speaker_targets[non_target_speaker_ids].sum(dim=0) > 0
|
| 87 |
+
|
| 88 |
+
tokens.append(torch.as_tensor(self.tokenizer(text, cut.supervisions[0].language)))
|
| 89 |
+
spk_targets.append(speaker_target)
|
| 90 |
+
bg_spk_targets.append(bg_speaker_target)
|
| 91 |
+
|
| 92 |
+
token_lens = torch.tensor([t.size(0) for t in tokens], dtype=torch.long)
|
| 93 |
+
tokens = collate_vectors(tokens, padding_value=0)
|
| 94 |
+
spk_targets = collate_vectors(spk_targets, padding_value=0)
|
| 95 |
+
bg_spk_targets = collate_vectors(bg_spk_targets, padding_value=0)
|
| 96 |
+
|
| 97 |
+
return audio, audio_lens, tokens, token_lens, spk_targets, bg_spk_targets
|
nemo/collections/asr/data/data_simulation.py
CHANGED
|
@@ -67,122 +67,86 @@ class MultiSpeakerSimulator(object):
|
|
| 67 |
Multispeaker Audio Session Simulator - Simulates multispeaker audio sessions using single-speaker audio files and
|
| 68 |
corresponding word alignments.
|
| 69 |
|
| 70 |
-
Change Log:
|
| 71 |
-
v1.0: Dec 2022
|
| 72 |
-
- First working verison, supports multispeaker simulation with overlaps, silence and RIR
|
| 73 |
-
v1.0.1: Feb 2023
|
| 74 |
-
- Multi-GPU support for speed up
|
| 75 |
-
- Faster random sampling routine
|
| 76 |
-
- Fixed sentence duration bug
|
| 77 |
-
- Silence and overlap length sampling algorithms are updated to guarantee `mean_silence` approximation
|
| 78 |
-
v1.0.2: March 2023
|
| 79 |
-
- Added support for segment-level gain perturbation and session-level white-noise perturbation
|
| 80 |
-
- Modified speaker sampling mechanism to include as many speakers as possible in each data-generation run
|
| 81 |
-
- Added chunking mechanism to avoid freezing in multiprocessing processes
|
| 82 |
-
|
| 83 |
-
v1.1.0 March 2023
|
| 84 |
-
- Faster audio-file loading with maximum audio duration parameter
|
| 85 |
-
- Re-organized MultiSpeakerSimulator class and moved util functions to util files.
|
| 86 |
-
v1.1.1 March 2023
|
| 87 |
-
- Changed `silence_mean` to use exactly the same sampling equation as `overlap_mean`.
|
| 88 |
-
|
| 89 |
-
|
| 90 |
Args:
|
| 91 |
cfg: OmegaConf configuration loaded from yaml file.
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
snr_min (int): Min random SNR for background noise (using average speaker power), set `null` to use fixed SNR
|
| 158 |
-
snr_max (int): Max random SNR for background noise (using average speaker power), set `null` to use fixed SNR
|
| 159 |
-
|
| 160 |
-
segment_augmentor:
|
| 161 |
-
add_seg_aug (bool): Set True to enable augmentation on each speech segment (Default: False)
|
| 162 |
-
segmentor:
|
| 163 |
-
gain:
|
| 164 |
-
prob (float): Probability range (uniform distribution) gain augmentation for individual segment
|
| 165 |
-
min_gain_dbfs (float): minimum gain in terms of dB
|
| 166 |
-
max_gain_dbfs (float): maximum gain in terms of dB
|
| 167 |
-
|
| 168 |
-
session_augmentor:
|
| 169 |
-
add_sess_aug: (bool) set True to enable audio augmentation on the whole session (Default: False)
|
| 170 |
-
segmentor:
|
| 171 |
-
white_noise:
|
| 172 |
prob (float): Probability of adding white noise (Default: 1.0)
|
| 173 |
-
min_level (float): minimum gain in
|
| 174 |
-
max_level (float): maximum gain in
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
deci (int): Rounding decimals for segment manifest file
|
| 186 |
"""
|
| 187 |
|
| 188 |
def __init__(self, cfg):
|
|
@@ -629,7 +593,7 @@ class MultiSpeakerSimulator(object):
|
|
| 629 |
if num_missing != 0:
|
| 630 |
warnings.warn(
|
| 631 |
f"{self._params.data_simulator.session_config.num_speakers - num_missing}"
|
| 632 |
-
|
| 633 |
f"{self._params.data_simulator.session_config.num_speakers}"
|
| 634 |
)
|
| 635 |
|
|
@@ -1117,7 +1081,7 @@ class MultiSpeakerSimulator(object):
|
|
| 1117 |
)
|
| 1118 |
self.annotator.annote_lists['json'].append(new_json_entry)
|
| 1119 |
|
| 1120 |
-
new_ctm_entries = self.annotator.create_new_ctm_entry(
|
| 1121 |
words=self._words,
|
| 1122 |
alignments=self._alignments,
|
| 1123 |
session_name=filename,
|
|
@@ -1148,7 +1112,7 @@ class MultiSpeakerSimulator(object):
|
|
| 1148 |
if self._params.data_simulator.background_noise.add_bg:
|
| 1149 |
if len(self._noise_samples) > 0:
|
| 1150 |
avg_power_array = torch.mean(array[is_speech == 1] ** 2)
|
| 1151 |
-
bg, snr = get_background_noise(
|
| 1152 |
len_array=len(array),
|
| 1153 |
power_array=avg_power_array,
|
| 1154 |
noise_samples=self._noise_samples,
|
|
@@ -1190,7 +1154,7 @@ class MultiSpeakerSimulator(object):
|
|
| 1190 |
Args:
|
| 1191 |
random_seed (int): random seed for reproducibility
|
| 1192 |
"""
|
| 1193 |
-
logging.info(
|
| 1194 |
if random_seed is None:
|
| 1195 |
random_seed = self._params.data_simulator.random_seed
|
| 1196 |
np.random.seed(random_seed)
|
|
@@ -1286,29 +1250,25 @@ class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
|
|
| 1286 |
Args:
|
| 1287 |
cfg: OmegaConf configuration loaded from yaml file.
|
| 1288 |
|
| 1289 |
-
|
| 1290 |
-
|
| 1291 |
-
|
| 1292 |
-
|
| 1293 |
-
|
| 1294 |
-
|
| 1295 |
-
|
| 1296 |
-
|
| 1297 |
-
|
| 1298 |
-
|
| 1299 |
-
|
| 1300 |
-
|
| 1301 |
-
|
| 1302 |
-
|
| 1303 |
-
|
| 1304 |
-
|
| 1305 |
-
|
| 1306 |
-
|
| 1307 |
-
|
| 1308 |
-
T60 (float): Room reverberation time (`T60` is the time it takes for the RIR to decay by 60DB)
|
| 1309 |
-
att_diff (float): Starting attenuation (if this is different than att_max, the diffuse reverberation model is
|
| 1310 |
-
used by gpuRIR)
|
| 1311 |
-
att_max (float): End attenuation when using the diffuse reverberation model (gpuRIR)
|
| 1312 |
"""
|
| 1313 |
|
| 1314 |
def __init__(self, cfg):
|
|
@@ -1466,6 +1426,8 @@ class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
|
|
| 1466 |
if self._params.data_simulator.rir_generation.mic_config.mic_pattern == 'omni':
|
| 1467 |
mic_pattern = DirectivityPattern.OMNI
|
| 1468 |
dir_vec = DirectionVector(azimuth=0, colatitude=90, degrees=True)
|
|
|
|
|
|
|
| 1469 |
dir_obj = CardioidFamily(
|
| 1470 |
orientation=dir_vec,
|
| 1471 |
pattern_enum=mic_pattern,
|
|
@@ -1509,6 +1471,8 @@ class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
|
|
| 1509 |
out_channel = convolve(input, RIR[speaker_turn, channel, : len(input)]).tolist()
|
| 1510 |
elif self._params.data_simulator.rir_generation.toolkit == 'pyroomacoustics':
|
| 1511 |
out_channel = convolve(input, RIR[channel][speaker_turn][: len(input)]).tolist()
|
|
|
|
|
|
|
| 1512 |
if len(out_channel) > length:
|
| 1513 |
length = len(out_channel)
|
| 1514 |
output_sound.append(torch.tensor(out_channel))
|
|
@@ -1643,8 +1607,12 @@ class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
|
|
| 1643 |
)
|
| 1644 |
self.annotator.annote_lists['json'].append(new_json_entry)
|
| 1645 |
|
| 1646 |
-
new_ctm_entries = self.annotator.create_new_ctm_entry(
|
| 1647 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1648 |
)
|
| 1649 |
self.annotator.annote_lists['ctm'].extend(new_ctm_entries)
|
| 1650 |
|
|
@@ -1659,23 +1627,21 @@ class RIRMultiSpeakerSimulator(MultiSpeakerSimulator):
|
|
| 1659 |
array = perturb_audio(array, self._params.data_simulator.sr, self.session_augmentor)
|
| 1660 |
|
| 1661 |
# Step 7-2: Additive background noise from noise manifest files
|
| 1662 |
-
if self._params.data_simulator.background_noise.add_bg:
|
| 1663 |
-
|
| 1664 |
-
|
| 1665 |
-
|
| 1666 |
-
|
| 1667 |
-
|
| 1668 |
-
|
| 1669 |
-
|
| 1670 |
-
|
| 1671 |
-
|
| 1672 |
-
|
| 1673 |
-
|
| 1674 |
-
|
| 1675 |
-
|
| 1676 |
-
array += bg
|
| 1677 |
length = array.shape[0]
|
| 1678 |
-
bg, snr = self._get_background(length, avg_power_array)
|
| 1679 |
augmented_bg, _ = self._convolve_rir(bg, -1, RIR)
|
| 1680 |
for channel in range(self._params.data_simulator.rir_generation.mic_config.num_channels):
|
| 1681 |
array[:, channel] += augmented_bg[channel][:length]
|
|
|
|
| 67 |
Multispeaker Audio Session Simulator - Simulates multispeaker audio sessions using single-speaker audio files and
|
| 68 |
corresponding word alignments.
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
Args:
|
| 71 |
cfg: OmegaConf configuration loaded from yaml file.
|
| 72 |
|
| 73 |
+
Configuration parameters (YAML)::
|
| 74 |
+
|
| 75 |
+
Parameters:
|
| 76 |
+
manifest_filepath (str): Manifest file with paths to single speaker audio files
|
| 77 |
+
sr (int): Sampling rate of the input audio files from the manifest
|
| 78 |
+
random_seed (int): Seed to random number generator
|
| 79 |
+
|
| 80 |
+
session_config:
|
| 81 |
+
num_speakers (int): Number of unique speakers per multispeaker audio session
|
| 82 |
+
num_sessions (int): Number of sessions to simulate
|
| 83 |
+
session_length (int): Length of each simulated multispeaker audio session (seconds)
|
| 84 |
+
|
| 85 |
+
session_params:
|
| 86 |
+
max_audio_read_sec (int): Max audio length in seconds when loading an audio file
|
| 87 |
+
sentence_length_params (list): k,p values for a negative_binomial distribution
|
| 88 |
+
dominance_var (float): Variance in speaker dominance
|
| 89 |
+
min_dominance (float): Minimum percentage of speaking time per speaker
|
| 90 |
+
turn_prob (float): Probability of switching speakers after each utterance
|
| 91 |
+
mean_silence (float): Mean proportion of silence to speaking time [0, 1)
|
| 92 |
+
mean_silence_var (float): Variance for mean silence in all audio sessions
|
| 93 |
+
per_silence_var (float): Variance for each silence in an audio session
|
| 94 |
+
per_silence_min (float): Minimum duration for each silence (default: 0)
|
| 95 |
+
per_silence_max (float): Maximum duration for each silence (default: -1, no max)
|
| 96 |
+
mean_overlap (float): Mean proportion of overlap in non-silence duration [0, 1)
|
| 97 |
+
mean_overlap_var (float): Variance for mean overlap in all audio sessions
|
| 98 |
+
per_overlap_var (float): Variance for per overlap in each session
|
| 99 |
+
per_overlap_min (float): Minimum per overlap duration in seconds
|
| 100 |
+
per_overlap_max (float): Maximum per overlap duration in seconds (-1 for no max)
|
| 101 |
+
start_window (bool): Whether to window the start of sentences
|
| 102 |
+
window_type (str): Type of windowing ("hamming", "hann", "cosine")
|
| 103 |
+
window_size (float): Length of window at start/end of segmented utterance (seconds)
|
| 104 |
+
start_buffer (float): Buffer of silence before the start of the sentence
|
| 105 |
+
split_buffer (float): Split RTTM labels if greater than twice this amount of silence
|
| 106 |
+
release_buffer (float): Buffer before window at end of sentence
|
| 107 |
+
normalize (bool): Normalize speaker volumes
|
| 108 |
+
normalization_type (str): "equal" or "var" volume per speaker
|
| 109 |
+
normalization_var (str): Variance in speaker volume
|
| 110 |
+
min_volume (float): Minimum speaker volume (variable normalization only)
|
| 111 |
+
max_volume (float): Maximum speaker volume (variable normalization only)
|
| 112 |
+
end_buffer (float): Buffer at the end of the session to leave blank
|
| 113 |
+
|
| 114 |
+
outputs:
|
| 115 |
+
output_dir (str): Output directory for audio sessions and label files
|
| 116 |
+
output_filename (str): Output filename for the wav and RTTM files
|
| 117 |
+
overwrite_output (bool): If true, delete the output directory if it exists
|
| 118 |
+
output_precision (int): Number of decimal places in output files
|
| 119 |
+
|
| 120 |
+
background_noise:
|
| 121 |
+
add_bg (bool): Add ambient background noise if true
|
| 122 |
+
background_manifest (str): Path to background noise manifest file
|
| 123 |
+
snr (int): SNR for background noise (using average speaker power)
|
| 124 |
+
snr_min (int): Min random SNR (set null to use fixed SNR)
|
| 125 |
+
snr_max (int): Max random SNR (set null to use fixed SNR)
|
| 126 |
+
|
| 127 |
+
segment_augmentor:
|
| 128 |
+
add_seg_aug (bool): Enable augmentation on each speech segment (Default: False)
|
| 129 |
+
segmentor.gain:
|
| 130 |
+
prob (float): Probability of gain augmentation
|
| 131 |
+
min_gain_dbfs (float): minimum gain in dB
|
| 132 |
+
max_gain_dbfs (float): maximum gain in dB
|
| 133 |
+
|
| 134 |
+
session_augmentor:
|
| 135 |
+
add_sess_aug (bool): Enable audio augmentation on the whole session (Default: False)
|
| 136 |
+
segmentor.white_noise:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
prob (float): Probability of adding white noise (Default: 1.0)
|
| 138 |
+
min_level (float): minimum gain in dB
|
| 139 |
+
max_level (float): maximum gain in dB
|
| 140 |
+
|
| 141 |
+
speaker_enforcement:
|
| 142 |
+
enforce_num_speakers (bool): Enforce all requested speakers are present
|
| 143 |
+
enforce_time (list): Percentage through session that enforcement triggers
|
| 144 |
+
|
| 145 |
+
segment_manifest:
|
| 146 |
+
window (float): Window length for segmentation
|
| 147 |
+
shift (float): Shift length for segmentation
|
| 148 |
+
step_count (int): Number of unit segments per utterance
|
| 149 |
+
deci (int): Rounding decimals for segment manifest file
|
|
|
|
| 150 |
"""
|
| 151 |
|
| 152 |
def __init__(self, cfg):
|
|
|
|
| 593 |
if num_missing != 0:
|
| 594 |
warnings.warn(
|
| 595 |
f"{self._params.data_simulator.session_config.num_speakers - num_missing}"
|
| 596 |
+
"speakers were included in the clip instead of the requested amount of "
|
| 597 |
f"{self._params.data_simulator.session_config.num_speakers}"
|
| 598 |
)
|
| 599 |
|
|
|
|
| 1081 |
)
|
| 1082 |
self.annotator.annote_lists['json'].append(new_json_entry)
|
| 1083 |
|
| 1084 |
+
new_ctm_entries, _ = self.annotator.create_new_ctm_entry(
|
| 1085 |
words=self._words,
|
| 1086 |
alignments=self._alignments,
|
| 1087 |
session_name=filename,
|
|
|
|
| 1112 |
if self._params.data_simulator.background_noise.add_bg:
|
| 1113 |
if len(self._noise_samples) > 0:
|
| 1114 |
avg_power_array = torch.mean(array[is_speech == 1] ** 2)
|
| 1115 |
+
bg, snr, _ = get_background_noise(
|
| 1116 |
len_array=len(array),
|
| 1117 |
power_array=avg_power_array,
|
| 1118 |
noise_samples=self._noise_samples,
|
|
|
|
| 1154 |
Args:
|
| 1155 |
random_seed (int): random seed for reproducibility
|
| 1156 |
"""
|
| 1157 |
+
logging.info("Generating Diarization Sessions")
|
| 1158 |
if random_seed is None:
|
| 1159 |
random_seed = self._params.data_simulator.random_seed
|
| 1160 |
np.random.seed(random_seed)
|
|
|
|
| 1250 |
Args:
|
| 1251 |
cfg: OmegaConf configuration loaded from yaml file.
|
| 1252 |
|
| 1253 |
+
Additional configuration parameters (on top of ``MultiSpeakerSimulator``)::
|
| 1254 |
+
|
| 1255 |
+
rir_generation:
|
| 1256 |
+
use_rir (bool): Whether to generate synthetic RIR
|
| 1257 |
+
toolkit (str): Which toolkit to use ("pyroomacoustics", "gpuRIR")
|
| 1258 |
+
room_config:
|
| 1259 |
+
room_sz (list): Size of the shoebox room environment
|
| 1260 |
+
pos_src (list): Positions of the speakers in the simulated room
|
| 1261 |
+
noise_src_pos (list): Position in room for background noise source
|
| 1262 |
+
mic_config:
|
| 1263 |
+
num_channels (int): Number of output audio channels
|
| 1264 |
+
pos_rcv (list): Microphone positions in the simulated room
|
| 1265 |
+
orV_rcv (list or null): Microphone orientations
|
| 1266 |
+
mic_pattern (str): Microphone type ("omni")
|
| 1267 |
+
absorbtion_params:
|
| 1268 |
+
abs_weights (list): Absorption coefficient ratios for each surface
|
| 1269 |
+
T60 (float): Room reverberation time (decay by 60dB)
|
| 1270 |
+
att_diff (float): Starting attenuation for diffuse reverberation model
|
| 1271 |
+
att_max (float): End attenuation for diffuse reverberation model (gpuRIR)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1272 |
"""
|
| 1273 |
|
| 1274 |
def __init__(self, cfg):
|
|
|
|
| 1426 |
if self._params.data_simulator.rir_generation.mic_config.mic_pattern == 'omni':
|
| 1427 |
mic_pattern = DirectivityPattern.OMNI
|
| 1428 |
dir_vec = DirectionVector(azimuth=0, colatitude=90, degrees=True)
|
| 1429 |
+
else:
|
| 1430 |
+
raise Exception("Currently, microphone pattern must be omni. Aborting RIR generation.")
|
| 1431 |
dir_obj = CardioidFamily(
|
| 1432 |
orientation=dir_vec,
|
| 1433 |
pattern_enum=mic_pattern,
|
|
|
|
| 1471 |
out_channel = convolve(input, RIR[speaker_turn, channel, : len(input)]).tolist()
|
| 1472 |
elif self._params.data_simulator.rir_generation.toolkit == 'pyroomacoustics':
|
| 1473 |
out_channel = convolve(input, RIR[channel][speaker_turn][: len(input)]).tolist()
|
| 1474 |
+
else:
|
| 1475 |
+
raise Exception("Toolkit must be pyroomacoustics or gpuRIR. Aborting RIR convolution.")
|
| 1476 |
if len(out_channel) > length:
|
| 1477 |
length = len(out_channel)
|
| 1478 |
output_sound.append(torch.tensor(out_channel))
|
|
|
|
| 1607 |
)
|
| 1608 |
self.annotator.annote_lists['json'].append(new_json_entry)
|
| 1609 |
|
| 1610 |
+
new_ctm_entries, _ = self.annotator.create_new_ctm_entry(
|
| 1611 |
+
words=self._text,
|
| 1612 |
+
alignments=self._alignments,
|
| 1613 |
+
session_name=filename,
|
| 1614 |
+
speaker_id=speaker_ids[speaker_turn],
|
| 1615 |
+
start=start / self._params.data_simulator.sr,
|
| 1616 |
)
|
| 1617 |
self.annotator.annote_lists['ctm'].extend(new_ctm_entries)
|
| 1618 |
|
|
|
|
| 1627 |
array = perturb_audio(array, self._params.data_simulator.sr, self.session_augmentor)
|
| 1628 |
|
| 1629 |
# Step 7-2: Additive background noise from noise manifest files
|
| 1630 |
+
if self._params.data_simulator.background_noise.add_bg and len(self._noise_samples) > 0:
|
| 1631 |
+
avg_power_array = torch.mean(array[is_speech == 1] ** 2)
|
| 1632 |
+
bg, snr, _ = get_background_noise(
|
| 1633 |
+
len_array=len(array),
|
| 1634 |
+
power_array=avg_power_array,
|
| 1635 |
+
noise_samples=self._noise_samples,
|
| 1636 |
+
audio_read_buffer_dict=self._audio_read_buffer_dict,
|
| 1637 |
+
snr_min=self._params.data_simulator.background_noise.snr_min,
|
| 1638 |
+
snr_max=self._params.data_simulator.background_noise.snr_max,
|
| 1639 |
+
background_noise_snr=self._params.data_simulator.background_noise.snr,
|
| 1640 |
+
seed=(random_seed + idx),
|
| 1641 |
+
device=self._device,
|
| 1642 |
+
)
|
| 1643 |
+
array += bg
|
|
|
|
| 1644 |
length = array.shape[0]
|
|
|
|
| 1645 |
augmented_bg, _ = self._convolve_rir(bg, -1, RIR)
|
| 1646 |
for channel in range(self._params.data_simulator.rir_generation.mic_config.num_channels):
|
| 1647 |
array[:, channel] += augmented_bg[channel][:length]
|
nemo/collections/asr/data/ssl_dataset.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Copyright (c)
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
@@ -103,12 +103,8 @@ def _audio_noise_collate_fn(batch: List[AudioNoiseItem], batch_augmentor: Any =
|
|
| 103 |
noises = [x.noise for x in batch]
|
| 104 |
noise_lengths = [x.noise_len for x in batch]
|
| 105 |
|
| 106 |
-
noisy_audios = [x.noisy_audio for x in batch]
|
| 107 |
-
noisy_audio_lengths = [x.noisy_audio_len for x in batch]
|
| 108 |
-
|
| 109 |
audio_signal_list = []
|
| 110 |
noise_signal_list = []
|
| 111 |
-
noisy_audio_signal_list = []
|
| 112 |
for i, audio in enumerate(audios):
|
| 113 |
audio_len = audio.size(0)
|
| 114 |
if audio_len < max_audio_len:
|
|
@@ -123,31 +119,23 @@ def _audio_noise_collate_fn(batch: List[AudioNoiseItem], batch_augmentor: Any =
|
|
| 123 |
noise = torch.nn.functional.pad(noise, pad)
|
| 124 |
noise_signal_list.append(noise[:max_audio_len])
|
| 125 |
|
| 126 |
-
noisy_audio = noisy_audios[i]
|
| 127 |
-
noisy_audio_len = noisy_audio.size(0)
|
| 128 |
-
if noisy_audio_len < max_audio_len:
|
| 129 |
-
pad = (0, max_audio_len - noisy_audio_len)
|
| 130 |
-
noisy_audio = torch.nn.functional.pad(noisy_audio, pad)
|
| 131 |
-
noisy_audio_signal_list.append(noisy_audio[:max_audio_len])
|
| 132 |
-
|
| 133 |
audio_signal = torch.stack(audio_signal_list).float()
|
| 134 |
audio_lengths = torch.stack(audio_lengths).long()
|
| 135 |
noise_signal = torch.stack(noise_signal_list).float()
|
| 136 |
noise_lengths = torch.stack(noise_lengths).long()
|
| 137 |
-
noisy_audio_signal = torch.stack(noisy_audio_signal_list).float()
|
| 138 |
-
noisy_audio_lengths = torch.stack(noisy_audio_lengths).long()
|
| 139 |
|
| 140 |
output = AudioNoiseBatch(
|
| 141 |
audio=audio_signal,
|
| 142 |
audio_len=audio_lengths,
|
| 143 |
noise=noise_signal,
|
| 144 |
noise_len=noise_lengths,
|
| 145 |
-
noisy_audio=noisy_audio_signal,
|
| 146 |
-
noisy_audio_len=noisy_audio_lengths,
|
| 147 |
)
|
| 148 |
|
| 149 |
if batch_augmentor is not None:
|
| 150 |
output = batch_augmentor(output)
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
return output
|
| 153 |
|
|
@@ -344,8 +332,6 @@ class AudioNoiseDataset(audio_to_text.AudioToCharDataset):
|
|
| 344 |
audio_len=audio_len,
|
| 345 |
noise=noise,
|
| 346 |
noise_len=noise_len,
|
| 347 |
-
noisy_audio=audio + noise,
|
| 348 |
-
noisy_audio_len=audio_len,
|
| 349 |
)
|
| 350 |
return item
|
| 351 |
|
|
@@ -421,8 +407,6 @@ class TarredAudioNoiseDataset(audio_to_text.TarredAudioToCharDataset):
|
|
| 421 |
audio_len=audio_len,
|
| 422 |
noise=noise,
|
| 423 |
noise_len=noise_len,
|
| 424 |
-
noisy_audio=audio + noise,
|
| 425 |
-
noisy_audio_len=audio_len,
|
| 426 |
)
|
| 427 |
return item
|
| 428 |
|
|
@@ -460,21 +444,29 @@ class LhotseAudioNoiseDataset(torch.utils.data.Dataset):
|
|
| 460 |
def __getitem__(self, cuts):
|
| 461 |
|
| 462 |
audios, audio_lens, cuts = self.load_audio(cuts)
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
|
| 476 |
-
|
| 477 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
|
| 479 |
|
| 480 |
def get_audio_noise_dataset(
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
# you may not use this file except in compliance with the License.
|
|
|
|
| 103 |
noises = [x.noise for x in batch]
|
| 104 |
noise_lengths = [x.noise_len for x in batch]
|
| 105 |
|
|
|
|
|
|
|
|
|
|
| 106 |
audio_signal_list = []
|
| 107 |
noise_signal_list = []
|
|
|
|
| 108 |
for i, audio in enumerate(audios):
|
| 109 |
audio_len = audio.size(0)
|
| 110 |
if audio_len < max_audio_len:
|
|
|
|
| 119 |
noise = torch.nn.functional.pad(noise, pad)
|
| 120 |
noise_signal_list.append(noise[:max_audio_len])
|
| 121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
audio_signal = torch.stack(audio_signal_list).float()
|
| 123 |
audio_lengths = torch.stack(audio_lengths).long()
|
| 124 |
noise_signal = torch.stack(noise_signal_list).float()
|
| 125 |
noise_lengths = torch.stack(noise_lengths).long()
|
|
|
|
|
|
|
| 126 |
|
| 127 |
output = AudioNoiseBatch(
|
| 128 |
audio=audio_signal,
|
| 129 |
audio_len=audio_lengths,
|
| 130 |
noise=noise_signal,
|
| 131 |
noise_len=noise_lengths,
|
|
|
|
|
|
|
| 132 |
)
|
| 133 |
|
| 134 |
if batch_augmentor is not None:
|
| 135 |
output = batch_augmentor(output)
|
| 136 |
+
else:
|
| 137 |
+
output.noisy_audio = output.audio + output.noise
|
| 138 |
+
output.noisy_audio_len = output.audio_len
|
| 139 |
|
| 140 |
return output
|
| 141 |
|
|
|
|
| 332 |
audio_len=audio_len,
|
| 333 |
noise=noise,
|
| 334 |
noise_len=noise_len,
|
|
|
|
|
|
|
| 335 |
)
|
| 336 |
return item
|
| 337 |
|
|
|
|
| 407 |
audio_len=audio_len,
|
| 408 |
noise=noise,
|
| 409 |
noise_len=noise_len,
|
|
|
|
|
|
|
| 410 |
)
|
| 411 |
return item
|
| 412 |
|
|
|
|
| 444 |
def __getitem__(self, cuts):
|
| 445 |
|
| 446 |
audios, audio_lens, cuts = self.load_audio(cuts)
|
| 447 |
+
if len(self.noise_data) > 0:
|
| 448 |
+
sampled_noises = [sample_noise(self.noise_data, cut.sampling_rate, cut.num_samples) for cut in cuts]
|
| 449 |
+
sampled_noises, sampled_noises_lens = zip(*sampled_noises)
|
| 450 |
+
sampled_noises = torch.stack(sampled_noises).float()
|
| 451 |
+
sampled_noises_lens = torch.tensor(sampled_noises_lens).long()
|
| 452 |
+
else:
|
| 453 |
+
sampled_noises = torch.zeros_like(audios)
|
| 454 |
+
sampled_noises_lens = audio_lens
|
| 455 |
+
|
| 456 |
+
output = AudioNoiseBatch(
|
| 457 |
+
audio=audios,
|
| 458 |
+
audio_len=audio_lens,
|
| 459 |
+
noise=sampled_noises,
|
| 460 |
+
noise_len=sampled_noises_lens,
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
+
if self.batch_augmentor is not None:
|
| 464 |
+
output = self.batch_augmentor(output)
|
| 465 |
+
else:
|
| 466 |
+
output.noisy_audio = output.audio + output.noise
|
| 467 |
+
output.noisy_audio_len = output.audio_len
|
| 468 |
+
|
| 469 |
+
return output
|
| 470 |
|
| 471 |
|
| 472 |
def get_audio_noise_dataset(
|
nemo/collections/asr/data/text_to_text.py
CHANGED
|
@@ -335,7 +335,9 @@ class TextToTextDatasetBase:
|
|
| 335 |
tts_tokenizer_global = copy.deepcopy(tokenizer)
|
| 336 |
|
| 337 |
with concurrent.futures.ProcessPoolExecutor(
|
| 338 |
-
initializer=_init_tts_tokenize_process,
|
|
|
|
|
|
|
| 339 |
) as pool:
|
| 340 |
# chunk size for pool map is empirically chosen as a trade-off between speed and responsiveness
|
| 341 |
for i, tokenized_text in enumerate(
|
|
@@ -373,7 +375,7 @@ class TextToTextDatasetBase:
|
|
| 373 |
|
| 374 |
|
| 375 |
class TextToTextDataset(TextToTextDatasetBase, Dataset):
|
| 376 |
-
"""Text-to-Text Map-style Dataset
|
| 377 |
|
| 378 |
def __init__(
|
| 379 |
self,
|
|
@@ -418,8 +420,8 @@ class TextToTextDataset(TextToTextDatasetBase, Dataset):
|
|
| 418 |
|
| 419 |
class TextToTextIterableDataset(TextToTextDatasetBase, IterableDataset):
|
| 420 |
"""
|
| 421 |
-
Text-to-Text Iterable Dataset
|
| 422 |
-
Only part necessary for current process should be loaded and stored
|
| 423 |
"""
|
| 424 |
|
| 425 |
def __init__(
|
|
|
|
| 335 |
tts_tokenizer_global = copy.deepcopy(tokenizer)
|
| 336 |
|
| 337 |
with concurrent.futures.ProcessPoolExecutor(
|
| 338 |
+
initializer=_init_tts_tokenize_process,
|
| 339 |
+
initargs=(tts_parser,),
|
| 340 |
+
max_workers=tokenizer_workers,
|
| 341 |
) as pool:
|
| 342 |
# chunk size for pool map is empirically chosen as a trade-off between speed and responsiveness
|
| 343 |
for i, tokenized_text in enumerate(
|
|
|
|
| 375 |
|
| 376 |
|
| 377 |
class TextToTextDataset(TextToTextDatasetBase, Dataset):
|
| 378 |
+
"""Text-to-Text Map-style Dataset."""
|
| 379 |
|
| 380 |
def __init__(
|
| 381 |
self,
|
|
|
|
| 420 |
|
| 421 |
class TextToTextIterableDataset(TextToTextDatasetBase, IterableDataset):
|
| 422 |
"""
|
| 423 |
+
Text-to-Text Iterable Dataset.
|
| 424 |
+
Only part necessary for current process should be loaded and stored.
|
| 425 |
"""
|
| 426 |
|
| 427 |
def __init__(
|
nemo/collections/asr/inference/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
nemo/collections/asr/inference/factory/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|