File size: 3,464 Bytes
053c830 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | """
Generate Denmark_data .mat files from the weather-denmark CSV
to match the format expected by the EQL-Wind-Speed-Forecasting repo.
"""
import numpy as np
import pandas as pd
from scipy.io import savemat
import os
def main():
csv_path = '/tmp/weather-denmark/weather-denmark.csv'
df = pd.read_csv(csv_path)
df['DateTime'] = pd.to_datetime(df['DateTime'])
cities = ['Aalborg', 'Aarhus', 'Esbjerg', 'Odense', 'Roskilde']
features = ['Temp', 'Pressure', 'WindSpeed', 'WindDir']
target_feature = 'WindSpeed'
n_lags = 4
steps_ahead = 6
train_ratio = 0.9
df_hourly = []
for city in cities:
city_df = df[df['City'] == city].copy()
city_df = city_df.set_index('DateTime').sort_index()
city_hourly = city_df[features].resample('h').mean()
city_hourly = city_hourly.ffill().bfill()
city_hourly['City'] = city
df_hourly.append(city_hourly)
merged = pd.concat(df_hourly, axis=1, keys=cities)
merged = merged.dropna()
print(f"Total hourly samples after merge: {len(merged)}")
data_array = np.zeros((len(merged), len(cities), len(features)))
for i, city in enumerate(cities):
for j, feat in enumerate(features):
data_array[:, i, j] = merged[(city, feat)].values
valid_len = len(data_array) - steps_ahead - (n_lags - 1)
X = np.zeros((valid_len, len(cities), n_lags, len(features)))
Y = np.zeros((valid_len, len(cities)))
for t in range(valid_len):
start_idx = t + n_lags - 1
for lag in range(n_lags):
X[t, :, lag, :] = data_array[start_idx - lag, :, :]
Y[t, :] = data_array[start_idx + steps_ahead, :, features.index(target_feature)]
n_train = int(valid_len * train_ratio)
Xtr = X[:n_train]
Xtest = X[n_train:]
Ytr = Y[:n_train]
Ytest = Y[n_train:]
x_min = np.zeros((len(cities), len(features)))
x_max = np.zeros((len(cities), len(features)))
for i in range(len(cities)):
for j in range(len(features)):
vals = Xtr[:, i, :, j].flatten()
x_min[i, j] = vals.min()
x_max[i, j] = vals.max()
rng = x_max[i, j] - x_min[i, j]
if rng < 1e-8:
rng = 1.0
Xtr[:, i, :, j] = (Xtr[:, i, :, j] - x_min[i, j]) / rng
Xtest[:, i, :, j] = (Xtest[:, i, :, j] - x_min[i, j]) / rng
y_min_tr = np.zeros((1, len(cities)))
y_max_tr = np.zeros((1, len(cities)))
for i in range(len(cities)):
y_min_tr[0, i] = Ytr[:, i].min()
y_max_tr[0, i] = Ytr[:, i].max()
rng = y_max_tr[0, i] - y_min_tr[0, i]
if rng < 1e-8:
rng = 1.0
Ytr[:, i] = (Ytr[:, i] - y_min_tr[0, i]) / rng
Ytest[:, i] = (Ytest[:, i] - y_min_tr[0, i]) / rng
os.makedirs('Denmark_data/wind_speed', exist_ok=True)
os.makedirs('Denmark_data/temp', exist_ok=True)
for feat_dir, label in [('wind_speed', 'wind_speed'), ('temp', 'temp')]:
savemat(f'Denmark_data/{feat_dir}/step1.mat', {
'Xtr': Xtr, 'Xtest': Xtest, 'Ytr': Ytr, 'Ytest': Ytest,
'y_min_tr': y_min_tr, 'y_max_tr': y_max_tr,
}, do_compression=True)
np.savez('Denmark_data/scaling_params.npz',
x_min=x_min, x_max=x_max, y_min_tr=y_min_tr, y_max_tr=y_max_tr,
cities=cities, features=features)
print(f"Saved Denmark_data. Xtr:{Xtr.shape} Ytr:{Ytr.shape}")
if __name__ == '__main__':
main()
|