| """WebSocket ASR server for Nemotron-Speech with true incremental streaming and timestamps.""" |
|
|
| import asyncio |
| import argparse |
| import hashlib |
| import json |
| import os |
| from dataclasses import dataclass, field |
| from typing import Any, Optional, Tuple |
|
|
| import numpy as np |
| import torch |
| from aiohttp import web, WSMsgType |
| from loguru import logger |
|
|
| from nemo.collections.asr.parts.utils.transcribe_utils import normalize_timestamp_output |
| from nemo.collections.asr.parts.utils.timestamp_utils import process_timestamp_outputs |
|
|
| |
| DEBUG_ASR = os.environ.get("DEBUG_ASR", "0") == "1" |
|
|
|
|
| def _hash_audio(audio: np.ndarray) -> str: |
| """Get short hash of audio array for debugging.""" |
| if audio is None or len(audio) == 0: |
| return "empty" |
| return hashlib.md5(audio.tobytes()).hexdigest()[:8] |
|
|
|
|
| |
| |
| DEFAULT_MODEL = "results/NeMo_Ja_FastConformer_Transducer_RNNT_EOU/checkpoints/NeMo_Ja_FastConformer_Transducer_RNNT_EOU.nemo" |
|
|
| |
| RIGHT_CONTEXT_OPTIONS = { |
| 0: "~80ms ultra-low latency", |
| 1: "~160ms low latency (recommended)", |
| 6: "~560ms balanced", |
| 13: "~1.12s highest accuracy", |
| } |
|
|
|
|
| @dataclass |
| class ASRSession: |
| """Per-connection session state with caches for true incremental streaming.""" |
|
|
| id: str |
| websocket: Any |
|
|
| |
| accumulated_audio: Optional[np.ndarray] = None |
|
|
| |
| emitted_frames: int = 0 |
|
|
| |
| cache_last_channel: Optional[torch.Tensor] = None |
| cache_last_time: Optional[torch.Tensor] = None |
| cache_last_channel_len: Optional[torch.Tensor] = None |
|
|
| |
| previous_hypotheses: Any = None |
| pred_out_stream: Any = None |
|
|
| |
| current_text: str = "" |
| |
| |
| current_timestamps: Optional[dict] = None |
|
|
| |
| |
| last_emitted_text: str = "" |
|
|
| |
| |
| |
| overlap_buffer: Optional[np.ndarray] = None |
|
|
|
|
| class ASRServer: |
| """WebSocket server for streaming ASR with true incremental processing.""" |
|
|
| def __init__( |
| self, |
| model: str, |
| host: str = "0.0.0.0", |
| port: int = 8080, |
| right_context: int = 1, |
| ): |
| self.model_name_or_path = model |
| self.host = host |
| self.port = port |
| self.right_context = right_context |
| self.model = None |
| self.sample_rate = 16000 |
|
|
| |
| self.inference_lock = asyncio.Lock() |
|
|
| |
| self.sessions: dict[str, ASRSession] = {} |
|
|
| |
| self.model_loaded = False |
|
|
| |
| self.shift_frames = None |
| self.pre_encode_cache_size = None |
| self.hop_samples = None |
|
|
| |
| self.overlap_samples = None |
|
|
| def load_model(self): |
| """Load the NeMo ASR model with streaming configuration.""" |
| import nemo.collections.asr as nemo_asr |
| from omegaconf import OmegaConf |
|
|
| |
| is_local_file = ( |
| self.model_name_or_path.endswith('.nemo') or |
| os.path.exists(self.model_name_or_path) |
| ) |
|
|
| if is_local_file: |
| logger.info(f"Loading model from local file: {self.model_name_or_path}") |
| self.model = nemo_asr.models.ASRModel.restore_from( |
| self.model_name_or_path, map_location='cpu' |
| ) |
| else: |
| logger.info(f"Loading model from HuggingFace: {self.model_name_or_path}") |
| self.model = nemo_asr.models.ASRModel.from_pretrained( |
| self.model_name_or_path, map_location='cpu' |
| ) |
| self.model = self.model.cuda() |
|
|
| |
| logger.info(f"Setting att_context_size=[70, {self.right_context}] ({RIGHT_CONTEXT_OPTIONS.get(self.right_context, 'custom')})") |
| if hasattr(self.model.encoder, "set_default_att_context_size"): |
| self.model.encoder.set_default_att_context_size([70, self.right_context]) |
|
|
| |
| logger.info("Configuring greedy decoding for Blackwell compatibility and enabling timestamps...") |
| |
| |
| preserve_alignments = False |
| if hasattr(self.model, 'joint'): |
| preserve_alignments = True |
| |
| decoding_cfg_dict = { |
| 'strategy': 'greedy', |
| 'greedy': { |
| 'max_symbols': 10, |
| 'loop_labels': False, |
| 'use_cuda_graph_decoder': False, |
| }, |
| 'compute_timestamps': True |
| } |
| |
| if preserve_alignments: |
| decoding_cfg_dict['preserve_alignments'] = True |
| |
| self.model.change_decoding_strategy( |
| decoding_cfg=OmegaConf.create(decoding_cfg_dict) |
| ) |
| |
| |
| if hasattr(self.model, 'decoding'): |
| if hasattr(self.model.decoding, 'compute_timestamps'): |
| self.model.decoding.compute_timestamps = True |
| if hasattr(self.model.decoding, 'preserve_alignments'): |
| self.model.decoding.preserve_alignments = preserve_alignments |
| if hasattr(self.model.decoding, 'ctc_decoder') and hasattr(self.model.decoding.ctc_decoder, 'compute_timestamps'): |
| self.model.decoding.ctc_decoder.compute_timestamps = True |
| self.model.decoding.ctc_decoder.return_hypotheses = True |
| |
| |
| if hasattr(self.model, 'joint'): |
| if hasattr(self.model.decoding, 'rnnt_decoder_predictions_tensor'): |
| if hasattr(self.model.decoding, 'compute_timestamps'): |
| |
| |
| |
| |
| self.model.decoding.compute_timestamps = False |
| if hasattr(self.model.decoding, 'preserve_alignments'): |
| self.model.decoding.preserve_alignments = True |
| if hasattr(self.model.decoding, 'return_hypotheses'): |
| self.model.decoding.return_hypotheses = True |
| |
| self.model.eval() |
|
|
| |
| self.model.preprocessor.featurizer.dither = 0.0 |
|
|
| |
| scfg = self.model.encoder.streaming_cfg |
| logger.info(f"Streaming config: chunk_size={scfg.chunk_size}, shift_size={scfg.shift_size}") |
|
|
| |
| preprocessor_cfg = self.model.cfg.preprocessor |
| hop_length_sec = preprocessor_cfg.get('window_stride', 0.01) |
| self.hop_samples = int(hop_length_sec * self.sample_rate) |
|
|
| |
| self.shift_frames = scfg.shift_size[1] if isinstance(scfg.shift_size, list) else scfg.shift_size |
|
|
| |
| pre_cache = scfg.pre_encode_cache_size |
| self.pre_encode_cache_size = pre_cache[1] if isinstance(pre_cache, list) else pre_cache |
|
|
| |
| self.drop_extra = scfg.drop_extra_pre_encoded |
|
|
| |
| |
| |
| |
| self.final_padding_frames = (self.right_context + 1) * self.shift_frames |
| padding_ms = self.final_padding_frames * hop_length_sec * 1000 |
|
|
| |
| |
| |
| self.overlap_samples = self.pre_encode_cache_size * self.hop_samples |
| overlap_ms = self.overlap_samples * 1000 / self.sample_rate |
|
|
| shift_ms = self.shift_frames * hop_length_sec * 1000 |
| logger.info(f"Model loaded: {type(self.model).__name__}") |
| logger.info(f"Shift size: {shift_ms:.0f}ms ({self.shift_frames} frames)") |
| logger.info(f"Pre-encode cache: {self.pre_encode_cache_size} frames") |
| logger.info(f"Final chunk padding: {padding_ms:.0f}ms ({self.final_padding_frames} frames)") |
| logger.info(f"Audio overlap for resets: {overlap_ms:.0f}ms ({self.overlap_samples} samples)") |
|
|
| |
| |
| self._warmup() |
|
|
| def _warmup(self): |
| """Run warmup inference using streaming API to claim GPU memory.""" |
| import time |
|
|
| logger.info("Running warmup inference (streaming API) to claim GPU memory...") |
| start = time.perf_counter() |
|
|
| |
| warmup_samples = self.sample_rate + (self.final_padding_frames * self.hop_samples) |
| warmup_audio = np.zeros(warmup_samples, dtype=np.float32) |
|
|
| |
| with torch.inference_mode(): |
| audio_tensor = torch.from_numpy(warmup_audio).unsqueeze(0).cuda() |
| audio_len = torch.tensor([len(warmup_audio)], device='cuda') |
|
|
| |
| mel, mel_len = self.model.preprocessor(input_signal=audio_tensor, length=audio_len) |
|
|
| |
| cache = self.model.encoder.get_initial_cache_state(batch_size=1) |
|
|
| |
| _ = self.model.conformer_stream_step( |
| processed_signal=mel, |
| processed_signal_length=mel_len, |
| cache_last_channel=cache[0], |
| cache_last_time=cache[1], |
| cache_last_channel_len=cache[2], |
| keep_all_outputs=True, |
| previous_hypotheses=None, |
| previous_pred_out=None, |
| drop_extra_pre_encoded=0, |
| return_transcription=True, |
| ) |
|
|
| elapsed = (time.perf_counter() - start) * 1000 |
| logger.info(f"Warmup complete in {elapsed:.0f}ms - GPU memory claimed") |
|
|
| def _init_session(self, session: ASRSession): |
| """Initialize a fresh session.""" |
| |
| cache = self.model.encoder.get_initial_cache_state(batch_size=1) |
| session.cache_last_channel = cache[0] |
| session.cache_last_time = cache[1] |
| session.cache_last_channel_len = cache[2] |
|
|
| |
| if session.overlap_buffer is not None and len(session.overlap_buffer) > 0: |
| session.accumulated_audio = session.overlap_buffer.copy() |
| overlap_ms = len(session.overlap_buffer) * 1000 / self.sample_rate |
| logger.debug( |
| f"Session {session.id}: prepending {len(session.overlap_buffer)} samples " |
| f"({overlap_ms:.0f}ms) of overlap audio" |
| ) |
| session.overlap_buffer = None |
| else: |
| session.accumulated_audio = np.array([], dtype=np.float32) |
|
|
| session.emitted_frames = 0 |
|
|
| |
| session.previous_hypotheses = None |
| session.pred_out_stream = None |
| session.current_text = "" |
| session.current_timestamps = None |
|
|
| async def websocket_handler(self, request: web.Request) -> web.WebSocketResponse: |
| """Handle a WebSocket client connection.""" |
| import uuid |
|
|
| ws = web.WebSocketResponse(max_msg_size=10 * 1024 * 1024) |
| await ws.prepare(request) |
|
|
| session_id = str(uuid.uuid4())[:8] |
| session = ASRSession(id=session_id, websocket=ws) |
| self.sessions[session_id] = session |
|
|
| logger.info(f"Client {session_id} connected") |
|
|
| try: |
| async with self.inference_lock: |
| await asyncio.get_event_loop().run_in_executor( |
| None, self._init_session, session |
| ) |
|
|
| await ws.send_str(json.dumps({"type": "ready"})) |
| logger.debug(f"Client {session_id}: sent ready") |
|
|
| async for msg in ws: |
| if msg.type == WSMsgType.BINARY: |
| await self._handle_audio(session, msg.data) |
| elif msg.type == WSMsgType.TEXT: |
| try: |
| data = json.loads(msg.data) |
| msg_type = data.get("type") |
|
|
| if msg_type == "reset" or msg_type == "end": |
| finalize = data.get("finalize", True) |
| await self._reset_session(session, finalize=finalize) |
| else: |
| logger.warning(f"Client {session_id}: unknown message type: {msg_type}") |
|
|
| except json.JSONDecodeError: |
| logger.warning(f"Client {session_id}: invalid JSON") |
| elif msg.type == WSMsgType.ERROR: |
| logger.error(f"Client {session_id} WebSocket error: {ws.exception()}") |
| break |
|
|
| logger.info(f"Client {session_id} disconnected") |
|
|
| except Exception as e: |
| logger.error(f"Client {session_id} error: {e}") |
| import traceback |
| logger.error(traceback.format_exc()) |
| try: |
| await ws.send_str(json.dumps({ |
| "type": "error", |
| "message": str(e) |
| })) |
| except: |
| pass |
| finally: |
| if session_id in self.sessions: |
| del self.sessions[session_id] |
|
|
| return ws |
|
|
| async def _handle_audio(self, session: ASRSession, audio_bytes: bytes): |
| """Accumulate audio and process when enough frames available.""" |
| audio_np = np.frombuffer(audio_bytes, dtype=np.int16).astype(np.float32) / 32768.0 |
|
|
| if DEBUG_ASR: |
| chunk_hash = hashlib.md5(audio_bytes).hexdigest()[:8] |
| logger.debug(f"Session {session.id}: recv chunk {len(audio_bytes)}B hash={chunk_hash}") |
|
|
| session.accumulated_audio = np.concatenate([session.accumulated_audio, audio_np]) |
|
|
| |
| min_audio_for_chunk = (session.emitted_frames + self.shift_frames + 1) * self.hop_samples |
|
|
| while len(session.accumulated_audio) >= min_audio_for_chunk: |
| async with self.inference_lock: |
| result = await asyncio.get_event_loop().run_in_executor( |
| None, self._process_chunk, session |
| ) |
|
|
| if result is not None: |
| text, timestamps = result |
| if text is not None and text != session.current_text: |
| session.current_text = text |
| session.current_timestamps = timestamps |
| logger.debug(f"Session {session.id} interim: {text[-50:] if len(text) > 50 else text}") |
| |
| formatted_timestamps = [] |
| if timestamps: |
| if isinstance(timestamps, dict): |
| for key, val in timestamps.items(): |
| if key != 'timestep': |
| formatted_timestamps.append({key: normalize_timestamp_output(val)}) |
| elif isinstance(timestamps, list): |
| |
| formatted_timestamps = timestamps |
| |
| await session.websocket.send_str(json.dumps({ |
| "type": "transcript", |
| "text": text, |
| "timestamps": formatted_timestamps if formatted_timestamps else None, |
| "is_final": False |
| })) |
|
|
| |
| min_audio_for_chunk = (session.emitted_frames + self.shift_frames + 1) * self.hop_samples |
|
|
| def _decode_stream_output(self, session, pred_out_stream): |
| """Manually decode model outputs to retrieve timestamps.""" |
| |
| if hasattr(self.model, 'joint'): |
| decoding = self.model.decoding |
| transcribed_texts = [] |
| for preds_idx, preds_concat in enumerate(pred_out_stream): |
| |
| |
| if preds_concat.dim() == 2: |
| preds_tensor = preds_concat.unsqueeze(0) |
| else: |
| preds_tensor = preds_concat |
| |
| encoded_len = torch.tensor([preds_tensor.size(1)], device=preds_tensor.device) |
| |
| |
| |
| hypotheses_list = decoding( |
| encoder_output=preds_tensor, |
| encoded_lengths=encoded_len, |
| partial_hypotheses=session.previous_hypotheses |
| ) |
| |
| |
| hypotheses_list = hypotheses_list[0] |
| |
| if isinstance(hypotheses_list[0], list): |
| transcribed_texts.append(hypotheses_list[0][0]) |
| else: |
| transcribed_texts.append(hypotheses_list[0]) |
| |
| else: |
| if hasattr(self.model, 'ctc_decoder'): |
| decoding = self.model.ctc_decoding |
| else: |
| decoding = self.model.decoding |
| |
| transcribed_texts = [] |
| for preds_idx, preds_concat in enumerate(pred_out_stream): |
| encoded_len = torch.tensor([len(preds_concat)], device=preds_concat.device) |
| decoded_out = decoding.ctc_decoder_predictions_tensor( |
| decoder_outputs=preds_concat.unsqueeze(0), |
| decoder_lengths=encoded_len, |
| return_hypotheses=True, |
| ) |
| if isinstance(decoded_out[0], list): |
| transcribed_texts.append(decoded_out[0][0]) |
| else: |
| transcribed_texts.append(decoded_out[0]) |
| |
| |
| if hasattr(self.model.cfg, 'preprocessor'): |
| window_stride = self.model.cfg.preprocessor.get('window_stride', 0.01) |
| else: |
| window_stride = 0.01 |
| |
| subsampling_factor = 1 |
| if hasattr(self.model, 'encoder') and hasattr(self.model.encoder, 'subsampling_factor'): |
| subsampling_factor = self.model.encoder.subsampling_factor |
| elif hasattr(self.model, 'encoder') and hasattr(self.model.encoder, 'conv_subsampling_factor'): |
| subsampling_factor = self.model.encoder.conv_subsampling_factor |
| |
| |
| |
| if hasattr(self.model, 'joint'): |
| import copy |
| timestamp_type = 'all' |
| if hasattr(decoding, 'cfg'): |
| timestamp_type = decoding.cfg.get('rnnt_timestamp_type', 'all') |
| |
| for i in range(len(transcribed_texts)): |
| if hasattr(transcribed_texts[i], 'timestamp') and not transcribed_texts[i].timestamp: |
| |
| |
| if hasattr(transcribed_texts[i], 'y_sequence'): |
| prediction = transcribed_texts[i].y_sequence |
| if type(prediction) != list: |
| prediction = prediction.tolist() |
| |
| |
| if decoding.big_blank_durations is not None and decoding.big_blank_durations != []: |
| num_extra_outputs = len(decoding.big_blank_durations) |
| prediction = [p for p in prediction if p < decoding.blank_id - num_extra_outputs] |
| elif hasattr(decoding, '_is_tdt') and decoding._is_tdt: |
| prediction = [p for p in prediction if p < decoding.blank_id] |
| else: |
| prediction = [p for p in prediction if p != decoding.blank_id] |
| |
| alignments = copy.deepcopy(transcribed_texts[i].alignments) |
| token_repetitions = [1] * len(alignments) |
| |
| |
| transcribed_texts[i].text = (prediction, alignments, token_repetitions) |
| |
| |
| transcribed_texts[i] = decoding.compute_rnnt_timestamps(transcribed_texts[i], timestamp_type) |
| |
| process_timestamp_outputs(transcribed_texts, subsampling_factor=subsampling_factor, window_stride=window_stride) |
| return transcribed_texts |
|
|
| def _process_chunk(self, session: ASRSession) -> Optional[Tuple[str, Optional[dict]]]: |
| """Process accumulated audio, extract new mel frames, run streaming inference.""" |
| try: |
| |
| audio_tensor = torch.from_numpy(session.accumulated_audio).unsqueeze(0).cuda() |
| audio_len = torch.tensor([len(session.accumulated_audio)], device='cuda') |
|
|
| with torch.inference_mode(): |
| mel, mel_len = self.model.preprocessor( |
| input_signal=audio_tensor, |
| length=audio_len |
| ) |
|
|
| |
| available_frames = mel.shape[-1] - 1 |
| new_frame_count = available_frames - session.emitted_frames |
|
|
| if new_frame_count < self.shift_frames: |
| return session.current_text, session.current_timestamps |
|
|
| |
| if session.emitted_frames == 0: |
| chunk_start = 0 |
| chunk_end = self.shift_frames |
| drop_extra = 0 |
| else: |
| chunk_start = session.emitted_frames - self.pre_encode_cache_size |
| chunk_end = session.emitted_frames + self.shift_frames |
| drop_extra = self.drop_extra |
|
|
| chunk_mel = mel[:, :, chunk_start:chunk_end] |
| chunk_len = torch.tensor([chunk_mel.shape[-1]], device='cuda') |
|
|
| |
| ( |
| session.pred_out_stream, |
| transcribed_texts, |
| session.cache_last_channel, |
| session.cache_last_time, |
| session.cache_last_channel_len, |
| session.previous_hypotheses, |
| ) = self.model.conformer_stream_step( |
| processed_signal=chunk_mel, |
| processed_signal_length=chunk_len, |
| cache_last_channel=session.cache_last_channel, |
| cache_last_time=session.cache_last_time, |
| cache_last_channel_len=session.cache_last_channel_len, |
| keep_all_outputs=False, |
| previous_hypotheses=session.previous_hypotheses, |
| previous_pred_out=session.pred_out_stream, |
| drop_extra_pre_encoded=drop_extra, |
| return_transcription=True, |
| ) |
|
|
| |
| session.emitted_frames += self.shift_frames |
|
|
| |
| |
| |
| if hasattr(self.model, 'joint'): |
| |
| pass |
| else: |
| transcribed_texts = self._decode_stream_output(session, session.pred_out_stream) |
| |
| if transcribed_texts and transcribed_texts[0]: |
| hyp = transcribed_texts[0] |
| text = hyp.text if hasattr(hyp, 'text') else str(hyp) |
| timestamps = hyp.timestamp if hasattr(hyp, 'timestamp') else None |
| return text, timestamps |
|
|
| return session.current_text, session.current_timestamps |
|
|
| except Exception as e: |
| logger.error(f"Session {session.id} chunk processing error: {e}") |
| import traceback |
| logger.error(traceback.format_exc()) |
| return None |
|
|
| async def _reset_session(self, session: ASRSession, finalize: bool = True): |
| """Handle reset with soft or hard finalization.""" |
| import time |
|
|
| |
| audio_samples = len(session.accumulated_audio) if session.accumulated_audio is not None else 0 |
| audio_duration_ms = (audio_samples * 1000) // self.sample_rate |
| logger.debug( |
| f"Session {session.id} {'hard' if finalize else 'soft'} reset: " |
| f"accumulated={audio_samples} samples ({audio_duration_ms}ms), " |
| f"emitted={session.emitted_frames} frames" |
| ) |
|
|
| if not finalize: |
| text = session.current_text |
| timestamps = session.current_timestamps |
| |
| formatted_timestamps = [] |
| if timestamps: |
| if isinstance(timestamps, dict): |
| for key, val in timestamps.items(): |
| if key != 'timestep': |
| formatted_timestamps.append({key: normalize_timestamp_output(val)}) |
| elif isinstance(timestamps, list): |
| formatted_timestamps = timestamps |
|
|
| await session.websocket.send_str(json.dumps({ |
| "type": "transcript", |
| "text": text, |
| "timestamps": formatted_timestamps if formatted_timestamps else None, |
| "is_final": True, |
| "finalize": False |
| })) |
|
|
| logger.debug(f"Session {session.id} soft reset: '{text[-50:] if len(text) > 50 else text}'") |
| return |
|
|
| |
| original_audio_length = len(session.accumulated_audio) if session.accumulated_audio is not None else 0 |
|
|
| if original_audio_length > 0: |
| padding_samples = self.final_padding_frames * self.hop_samples |
| silence_padding = np.zeros(padding_samples, dtype=np.float32) |
| session.accumulated_audio = np.concatenate([session.accumulated_audio, silence_padding]) |
|
|
| |
| final_text = session.current_text |
| final_timestamps = session.current_timestamps |
| if session.accumulated_audio is not None and len(session.accumulated_audio) > 0: |
| start_time = time.perf_counter() |
| async with self.inference_lock: |
| result = await asyncio.get_event_loop().run_in_executor( |
| None, self._process_final_chunk, session |
| ) |
| if result is not None: |
| final_text, final_timestamps = result |
| session.current_text = final_text |
| session.current_timestamps = final_timestamps |
| elapsed_ms = (time.perf_counter() - start_time) * 1000 |
| logger.debug(f"Session {session.id} final chunk processed in {elapsed_ms:.1f}ms: '{final_text[-50:] if len(final_text) > 50 else final_text}'") |
|
|
| |
| if final_text.startswith(session.last_emitted_text): |
| delta_text = final_text[len(session.last_emitted_text):].lstrip() |
| else: |
| delta_text = final_text |
|
|
| session.last_emitted_text = final_text |
| |
| formatted_timestamps = [] |
| if final_timestamps: |
| if isinstance(final_timestamps, dict): |
| for key, val in final_timestamps.items(): |
| if key != 'timestep': |
| formatted_timestamps.append({key: normalize_timestamp_output(val)}) |
| elif isinstance(final_timestamps, list): |
| formatted_timestamps = final_timestamps |
|
|
| |
| await session.websocket.send_str(json.dumps({ |
| "type": "transcript", |
| "text": delta_text, |
| "timestamps": formatted_timestamps if formatted_timestamps else None, |
| "is_final": True, |
| "finalize": True |
| })) |
|
|
| session.last_emitted_text = "" |
| session.overlap_buffer = None |
| self._init_session(session) |
|
|
| def _process_final_chunk(self, session: ASRSession) -> Optional[Tuple[str, Optional[dict]]]: |
| """Process all remaining audio with keep_all_outputs=True.""" |
| try: |
| if len(session.accumulated_audio) == 0: |
| return session.current_text, session.current_timestamps |
|
|
| |
| audio_tensor = torch.from_numpy(session.accumulated_audio).unsqueeze(0).cuda() |
| audio_len = torch.tensor([len(session.accumulated_audio)], device='cuda') |
|
|
| with torch.inference_mode(): |
| mel, mel_len = self.model.preprocessor( |
| input_signal=audio_tensor, |
| length=audio_len |
| ) |
|
|
| |
| total_mel_frames = mel.shape[-1] |
| remaining_frames = total_mel_frames - session.emitted_frames |
|
|
| if remaining_frames <= 0: |
| return session.current_text, session.current_timestamps |
|
|
| |
| if session.emitted_frames == 0: |
| chunk_start = 0 |
| drop_extra = 0 |
| else: |
| chunk_start = session.emitted_frames - self.pre_encode_cache_size |
| drop_extra = self.drop_extra |
|
|
| chunk_mel = mel[:, :, chunk_start:] |
| chunk_len = torch.tensor([chunk_mel.shape[-1]], device='cuda') |
|
|
| ( |
| session.pred_out_stream, |
| transcribed_texts, |
| session.cache_last_channel, |
| session.cache_last_time, |
| session.cache_last_channel_len, |
| session.previous_hypotheses, |
| ) = self.model.conformer_stream_step( |
| processed_signal=chunk_mel, |
| processed_signal_length=chunk_len, |
| cache_last_channel=session.cache_last_channel, |
| cache_last_time=session.cache_last_time, |
| cache_last_channel_len=session.cache_last_channel_len, |
| keep_all_outputs=True, |
| previous_hypotheses=session.previous_hypotheses, |
| previous_pred_out=session.pred_out_stream, |
| drop_extra_pre_encoded=drop_extra, |
| return_transcription=True, |
| ) |
|
|
| if hasattr(self.model, 'joint'): |
| pass |
| else: |
| transcribed_texts = self._decode_stream_output(session, session.pred_out_stream) |
| |
| if transcribed_texts and transcribed_texts[0]: |
| hyp = transcribed_texts[0] |
| text = hyp.text if hasattr(hyp, 'text') else str(hyp) |
| timestamps = hyp.timestamp if hasattr(hyp, 'timestamp') else None |
| return text, timestamps |
|
|
| return session.current_text, session.current_timestamps |
|
|
| except Exception as e: |
| logger.error(f"Session {session.id} final chunk error: {e}") |
| import traceback |
| logger.error(traceback.format_exc()) |
| return None |
|
|
| async def health_handler(self, request: web.Request) -> web.Response: |
| """Health check endpoint.""" |
| return web.json_response({ |
| "status": "healthy" if self.model_loaded else "loading", |
| "model_loaded": self.model_loaded, |
| }) |
|
|
| async def start(self): |
| """Start the HTTP + WebSocket server.""" |
| self.load_model() |
| self.model_loaded = True |
|
|
| logger.info(f"Starting streaming ASR server on ws://{self.host}:{self.port}") |
|
|
| app = web.Application() |
| app.router.add_get("/health", self.health_handler) |
| app.router.add_get("/", self.websocket_handler) |
|
|
| runner = web.AppRunner(app) |
| await runner.setup() |
| site = web.TCPSite(runner, self.host, self.port) |
| await site.start() |
|
|
| logger.info(f"ASR server listening on ws://{self.host}:{self.port}") |
| logger.info(f"Health check available at http://{self.host}:{self.port}/health") |
| await asyncio.Future() |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Nemotron Streaming ASR WebSocket Server") |
| parser.add_argument("--host", default="0.0.0.0", help="Host to bind to") |
| parser.add_argument("--port", type=int, default=8080, help="Port to bind to") |
| parser.add_argument( |
| "--model", |
| default=DEFAULT_MODEL, |
| help="HuggingFace model name or path to local .nemo file" |
| ) |
| parser.add_argument( |
| "--right-context", |
| type=int, |
| default=1, |
| choices=[0, 1, 6, 13], |
| help="Right context frames: 0=80ms, 1=160ms, 6=560ms, 13=1.12s latency" |
| ) |
| args = parser.parse_args() |
|
|
| server = ASRServer( |
| model=args.model, |
| host=args.host, |
| port=args.port, |
| right_context=args.right_context, |
| ) |
|
|
| asyncio.run(server.start()) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|