File size: 1,701 Bytes
fd1908b ed5752e fd1908b ed5752e fd1908b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | """Shared structured logger for NeuroBridge pipelines.
All modules in `src/` must obtain their logger via `get_logger(__name__)`
instead of using `print()`. This guarantees consistent format and INFO-level
traceability across pipelines (per AGENTS.md §4).
"""
from __future__ import annotations
import logging
import sys
_LOG_FORMAT = "%(asctime)s | %(levelname)-7s | %(name)s | %(message)s"
_DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
def get_logger(name: str, level: int = logging.INFO) -> logging.Logger:
"""Return a process-wide singleton logger for the given name.
Idempotent on handler attachment: repeated calls with the same name
return the same Logger instance and never stack duplicate stdout
StreamHandlers. The most recent call wins on `level`, so callers can
raise/lower verbosity at runtime without rebuilding the logger.
Note on `propagate=False`: records do NOT bubble up to the root logger.
If a framework (FastAPI, Uvicorn, MLflow) needs to capture records via
a root handler in week-2 work, this default will need to be revisited.
Args:
name: Dotted logger name, conventionally `__name__`.
level: Logging level (default `logging.INFO`).
Returns:
Configured `logging.Logger` writing to stdout.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
logger.propagate = False
if not any(
isinstance(h, logging.StreamHandler) and h.stream is sys.stdout
for h in logger.handlers
):
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(logging.Formatter(_LOG_FORMAT, datefmt=_DATE_FORMAT))
logger.addHandler(handler)
return logger
|