File size: 4,969 Bytes
881f9f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
"""
Shared data-loading stubs for all four Smart Grid MCP servers.

Each server imports from here to get a consistent view of the processed
datasets.  Fill in each function once the corresponding Kaggle CSV(s) have
been downloaded to data/processed/.

Dataset → server mapping:
  Power Transformers FDD & RUL  →  IoT, TSFM
  DGA Fault Classification       →  FMSR
  Smart Grid Fault Records       →  WO
  Transformer Health Index       →  FMSR (supplemental)
  Current & Voltage Monitoring   →  IoT, TSFM (supplemental)
"""

from __future__ import annotations

from pathlib import Path

import pandas as pd

# Root of the repository — resolved relative to this file so imports work
# from any working directory.
REPO_ROOT = Path(__file__).resolve().parents[1]
DATA_DIR = REPO_ROOT / "data" / "processed"


# ---------------------------------------------------------------------------
# IoT domain
# ---------------------------------------------------------------------------


def load_asset_metadata() -> pd.DataFrame:
    """
    Load static asset metadata (transformer ID, location, manufacturer,
    installation date, rated capacity, etc.).

    Source CSV: data/processed/asset_metadata.csv
    Synthesized from: Power Transformers FDD & RUL dataset.
    """
    path = DATA_DIR / "asset_metadata.csv"
    _require(path)
    return pd.read_csv(path)


def load_sensor_readings() -> pd.DataFrame:
    """
    Load time-series sensor readings indexed by (transformer_id, timestamp).

    Source CSV: data/processed/sensor_readings.csv
    Synthesized from: Power Transformers FDD & RUL + Current & Voltage
    Monitoring datasets.

    Expected columns:
        transformer_id, timestamp, sensor_id, value, unit, source
    """
    path = DATA_DIR / "sensor_readings.csv"
    _require(path)
    df = pd.read_csv(path, parse_dates=["timestamp"])
    return df


# ---------------------------------------------------------------------------
# FMSR domain
# ---------------------------------------------------------------------------


def load_failure_modes() -> pd.DataFrame:
    """
    Load failure mode descriptions and their associated sensor signatures.

    Source CSV: data/processed/failure_modes.csv
    Synthesized from: DGA Fault Classification + Transformer Health Index.

    Expected columns:
        failure_mode_id, name, dga_label, description, severity, iec_code,
        key_gases, recommended_action
    """
    path = DATA_DIR / "failure_modes.csv"
    _require(path)
    return pd.read_csv(path)


def load_dga_records() -> pd.DataFrame:
    """
    Load dissolved gas analysis (DGA) records used for fault classification.

    Source CSV: data/processed/dga_records.csv
    Synthesized from: DGA Fault Classification dataset.

    Expected columns:
        transformer_id, sample_date, dissolved_h2_ppm, dissolved_ch4_ppm,
        dissolved_c2h2_ppm, dissolved_c2h4_ppm, dissolved_c2h6_ppm,
        dissolved_co_ppm, dissolved_co2_ppm, fault_label, source_dataset
    """
    path = DATA_DIR / "dga_records.csv"
    _require(path)
    return pd.read_csv(path, parse_dates=["sample_date"])


# ---------------------------------------------------------------------------
# TSFM domain
# ---------------------------------------------------------------------------


def load_rul_labels() -> pd.DataFrame:
    """
    Load remaining-useful-life (RUL) ground-truth labels per transformer.

    Source CSV: data/processed/rul_labels.csv
    Synthesized from: Power Transformers FDD & RUL dataset.

    Expected columns:
        transformer_id, timestamp, rul_days, health_index, fdd_category
    """
    path = DATA_DIR / "rul_labels.csv"
    _require(path)
    return pd.read_csv(path, parse_dates=["timestamp"])


# ---------------------------------------------------------------------------
# WO domain
# ---------------------------------------------------------------------------


def load_fault_records() -> pd.DataFrame:
    """
    Load historical fault / maintenance event records.

    Source CSV: data/processed/fault_records.csv
    Synthesized from: Smart Grid Fault Records dataset.

    Expected columns:
        transformer_id, fault_id, fault_type, location, voltage_v, current_a,
        power_load_mw, temperature_c, wind_speed_kmh, weather_condition,
        maintenance_status, component_health, duration_hrs, downtime_hrs
    """
    path = DATA_DIR / "fault_records.csv"
    _require(path)
    return pd.read_csv(path)


# ---------------------------------------------------------------------------
# Internal helpers
# ---------------------------------------------------------------------------


def _require(path: Path) -> None:
    """Raise a clear error if a processed data file hasn't been created yet."""
    if not path.exists():
        raise FileNotFoundError(
            f"Processed data file not found: {path}\n"
            "Run the data pipeline (data/processed/) to generate it first."
        )