showeed commited on
Commit
4ab167f
·
verified ·
1 Parent(s): f244443

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -58,3 +58,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  gensan.pdf filter=lfs diff=lfs merge=lfs -text
 
 
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  gensan.pdf filter=lfs diff=lfs merge=lfs -text
61
+ gensan.xlsx filter=lfs diff=lfs merge=lfs -text
gensan.xlsx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b98b60fc4fce790114bd117bf5634a1e79dd9aa2f1ce3920a5cee9ed01e7cd3f
3
+ size 1014473
name_address_pred/inference.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ===============================================================================
3
+ 推論スクリプト
4
+ 学習済みモデルを使って、テキストから国名・タイプを予測する
5
+ ===============================================================================
6
+ """
7
+
8
+ import json
9
+ import torch
10
+ import torch.nn as nn
11
+ from transformers import XLMRobertaTokenizer, XLMRobertaModel
12
+
13
+
14
+ # --- Model定義(train_pipeline.py と同一) ---
15
+ class MultiTaskClassifier(nn.Module):
16
+ def __init__(self, model_name, num_countries, dropout=0.3):
17
+ super().__init__()
18
+ self.xlmr = XLMRobertaModel.from_pretrained(model_name)
19
+ hidden_size = self.xlmr.config.hidden_size
20
+ self.shared_layer = nn.Sequential(
21
+ nn.Linear(hidden_size, 512),
22
+ nn.ReLU(),
23
+ nn.Dropout(dropout),
24
+ )
25
+ self.country_head = nn.Linear(512, num_countries)
26
+ self.type_head = nn.Linear(512, 1)
27
+
28
+ def forward(self, input_ids, attention_mask):
29
+ outputs = self.xlmr(input_ids=input_ids, attention_mask=attention_mask)
30
+ cls_output = outputs.last_hidden_state[:, 0, :]
31
+ shared = self.shared_layer(cls_output)
32
+ return self.country_head(shared), self.type_head(shared).squeeze(-1)
33
+
34
+
35
+ class Predictor:
36
+ def __init__(
37
+ self,
38
+ model_path="output/best_model.pt",
39
+ encoder_path="output/country_encoder.json",
40
+ model_name="xlm-roberta-base",
41
+ device=None,
42
+ ):
43
+ self.device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu")
44
+
45
+ # Country encoder
46
+ with open(encoder_path, "r", encoding="utf-8") as f:
47
+ self.classes = json.load(f)["classes"]
48
+ self.num_countries = len(self.classes)
49
+
50
+ # Tokenizer
51
+ self.tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)
52
+
53
+ # Model
54
+ self.model = MultiTaskClassifier(model_name, self.num_countries).to(self.device)
55
+ checkpoint = torch.load(model_path, map_location=self.device)
56
+ self.model.load_state_dict(checkpoint["model_state_dict"])
57
+ self.model.eval()
58
+
59
+ @torch.no_grad()
60
+ def predict(self, texts: list[str], top_k: int = 3) -> list[dict]:
61
+ """
62
+ テキストのリストを受け取り、各テキストの予測結果を返す
63
+
64
+ Returns:
65
+ list of dict:
66
+ - type: "name" or "address"
67
+ - type_confidence: float
68
+ - top_countries: list of (country, probability)
69
+ """
70
+ encoding = self.tokenizer(
71
+ texts,
72
+ max_length=128,
73
+ padding=True,
74
+ truncation=True,
75
+ return_tensors="pt",
76
+ )
77
+ input_ids = encoding["input_ids"].to(self.device)
78
+ attention_mask = encoding["attention_mask"].to(self.device)
79
+
80
+ country_logits, type_logit = self.model(input_ids, attention_mask)
81
+
82
+ # Country probabilities
83
+ country_probs = torch.softmax(country_logits, dim=1).cpu().numpy()
84
+ # Type probabilities
85
+ type_probs = torch.sigmoid(type_logit).cpu().numpy()
86
+
87
+ results = []
88
+ for i in range(len(texts)):
89
+ # Top-K countries
90
+ top_indices = country_probs[i].argsort()[::-1][:top_k]
91
+ top_countries = [
92
+ {"country": self.classes[idx], "probability": float(country_probs[i][idx])}
93
+ for idx in top_indices
94
+ ]
95
+
96
+ # Type
97
+ t_prob = float(type_probs[i])
98
+ text_type = "address" if t_prob > 0.5 else "name"
99
+
100
+ results.append({
101
+ "input": texts[i],
102
+ "type": text_type,
103
+ "type_confidence": t_prob if text_type == "address" else 1 - t_prob,
104
+ "top_countries": top_countries,
105
+ })
106
+
107
+ return results
108
+
109
+
110
+ # =============================================================================
111
+ # 使用例
112
+ # =============================================================================
113
+ if __name__ == "__main__":
114
+ predictor = Predictor()
115
+
116
+ test_texts = [
117
+ "田中太郎",
118
+ "東京都渋谷区神宮前1-2-3",
119
+ "John Smith",
120
+ "1600 Pennsylvania Avenue NW, Washington, DC",
121
+ "محمد أحمد",
122
+ "Müller",
123
+ "Via Roma 15, 00100 Roma",
124
+ ]
125
+
126
+ results = predictor.predict(test_texts, top_k=5)
127
+
128
+ for r in results:
129
+ print(f"\nInput: {r['input']}")
130
+ print(f" Type: {r['type']} (confidence: {r['type_confidence']:.3f})")
131
+ print(f" Top countries:")
132
+ for c in r["top_countries"]:
133
+ print(f" {c['country']}: {c['probability']:.4f}")
name_address_pred/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch>=2.0.0
2
+ transformers>=4.30.0
3
+ scikit-learn>=1.3.0
4
+ pandas>=2.0.0
5
+ tqdm>=4.65.0
name_address_pred/train_pipeline.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ===============================================================================
3
+ XLM-RoBERTa Multi-Task Classification Pipeline
4
+ - Task 1: Country prediction (multi-class)
5
+ - Task 2: Name/Address type prediction (binary)
6
+ ===============================================================================
7
+ """
8
+
9
+ import os
10
+ import random
11
+ import numpy as np
12
+ import pandas as pd
13
+ import torch
14
+ import torch.nn as nn
15
+ import torch.nn.functional as F
16
+ from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
17
+ from transformers import XLMRobertaTokenizer, XLMRobertaModel
18
+ from sklearn.model_selection import train_test_split
19
+ from sklearn.preprocessing import LabelEncoder
20
+ from sklearn.metrics import classification_report, accuracy_score, f1_score
21
+ from collections import Counter
22
+ from tqdm import tqdm
23
+ import json
24
+ import logging
25
+
26
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ # =============================================================================
31
+ # 0. Config
32
+ # =============================================================================
33
+ class Config:
34
+ # Paths
35
+ DATA_PATH = "data.csv" # 入力CSVのパス(text, country, label)
36
+ OUTPUT_DIR = "output" # モデル・結果の保存先
37
+ MODEL_NAME = "xlm-roberta-base" # HuggingFace モデル名
38
+
39
+ # Data split
40
+ TEST_SIZE = 0.15
41
+ VAL_SIZE = 0.10 # train全体に対する割合
42
+ RANDOM_SEED = 42
43
+
44
+ # Training
45
+ MAX_LENGTH = 128 # トークン最大長
46
+ BATCH_SIZE = 64
47
+ NUM_EPOCHS = 5
48
+ LEARNING_RATE = 2e-5
49
+ WEIGHT_DECAY = 0.01
50
+ WARMUP_RATIO = 0.1
51
+ MAX_GRAD_NORM = 1.0
52
+
53
+ # Loss
54
+ TYPE_LOSS_WEIGHT = 0.3 # 2値分類lossの重み(メインは国分類)
55
+ CLASS_WEIGHT_CLAMP = 50.0 # 少数クラスの重み上限
56
+
57
+ # XLM-R fine-tuning
58
+ FREEZE_EMBEDDINGS = True # embedding層をfreezeして学習安定化
59
+ FREEZE_LOWER_LAYERS = 0 # 下位N層をfreeze (0=全層学習)
60
+
61
+ # Device
62
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
63
+
64
+
65
+ def set_seed(seed):
66
+ random.seed(seed)
67
+ np.random.seed(seed)
68
+ torch.manual_seed(seed)
69
+ if torch.cuda.is_available():
70
+ torch.cuda.manual_seed_all(seed)
71
+
72
+
73
+ # =============================================================================
74
+ # 1. Data Preparation
75
+ # =============================================================================
76
+ def load_and_split_data(cfg: Config):
77
+ """CSVを読み込み、ラベルエンコード、train/val/test分割を行う"""
78
+
79
+ logger.info(f"Loading data from {cfg.DATA_PATH}")
80
+ df = pd.read_csv(cfg.DATA_PATH)
81
+ logger.info(f"Total samples: {len(df)}")
82
+
83
+ # --- ラベルエンコード ---
84
+ # Country → integer
85
+ country_encoder = LabelEncoder()
86
+ df["country_id"] = country_encoder.fit_transform(df["country"])
87
+ num_countries = len(country_encoder.classes_)
88
+ logger.info(f"Number of countries: {num_countries}")
89
+
90
+ # Label (name/address) → binary
91
+ label_map = {"name": 0, "address": 1}
92
+ df["type_id"] = df["label"].map(label_map)
93
+
94
+ # --- 分布の確認 ---
95
+ country_counts = df["country"].value_counts()
96
+ logger.info(f"Country distribution (top 10):\n{country_counts.head(10)}")
97
+ logger.info(f"Country distribution (bottom 10):\n{country_counts.tail(10)}")
98
+ logger.info(f"Type distribution:\n{df['label'].value_counts()}")
99
+
100
+ # --- Stratified Split ---
101
+ # まず test を分離(country で層化)
102
+ train_val_df, test_df = train_test_split(
103
+ df,
104
+ test_size=cfg.TEST_SIZE,
105
+ random_state=cfg.RANDOM_SEED,
106
+ stratify=df["country_id"],
107
+ )
108
+
109
+ # 次に train と val を分離
110
+ relative_val_size = cfg.VAL_SIZE / (1 - cfg.TEST_SIZE)
111
+ train_df, val_df = train_test_split(
112
+ train_val_df,
113
+ test_size=relative_val_size,
114
+ random_state=cfg.RANDOM_SEED,
115
+ stratify=train_val_df["country_id"],
116
+ )
117
+
118
+ train_df = train_df.reset_index(drop=True)
119
+ val_df = val_df.reset_index(drop=True)
120
+ test_df = test_df.reset_index(drop=True)
121
+
122
+ logger.info(f"Split sizes - Train: {len(train_df)}, Val: {len(val_df)}, Test: {len(test_df)}")
123
+
124
+ return train_df, val_df, test_df, country_encoder, num_countries
125
+
126
+
127
+ # =============================================================================
128
+ # 2. Dataset
129
+ # =============================================================================
130
+ class TextClassificationDataset(Dataset):
131
+ def __init__(self, df, tokenizer, max_length):
132
+ self.texts = df["text"].tolist()
133
+ self.country_ids = df["country_id"].tolist()
134
+ self.type_ids = df["type_id"].tolist()
135
+ self.tokenizer = tokenizer
136
+ self.max_length = max_length
137
+
138
+ def __len__(self):
139
+ return len(self.texts)
140
+
141
+ def __getitem__(self, idx):
142
+ text = str(self.texts[idx])
143
+ encoding = self.tokenizer(
144
+ text,
145
+ max_length=self.max_length,
146
+ padding="max_length",
147
+ truncation=True,
148
+ return_tensors="pt",
149
+ )
150
+ return {
151
+ "input_ids": encoding["input_ids"].squeeze(0),
152
+ "attention_mask": encoding["attention_mask"].squeeze(0),
153
+ "country_id": torch.tensor(self.country_ids[idx], dtype=torch.long),
154
+ "type_id": torch.tensor(self.type_ids[idx], dtype=torch.float),
155
+ }
156
+
157
+
158
+ # =============================================================================
159
+ # 3. Model
160
+ # =============================================================================
161
+ class MultiTaskClassifier(nn.Module):
162
+ def __init__(self, model_name, num_countries, dropout=0.3):
163
+ super().__init__()
164
+ self.xlmr = XLMRobertaModel.from_pretrained(model_name)
165
+ hidden_size = self.xlmr.config.hidden_size # 768
166
+
167
+ self.shared_layer = nn.Sequential(
168
+ nn.Linear(hidden_size, 512),
169
+ nn.ReLU(),
170
+ nn.Dropout(dropout),
171
+ )
172
+ self.country_head = nn.Linear(512, num_countries)
173
+ self.type_head = nn.Linear(512, 1)
174
+
175
+ def forward(self, input_ids, attention_mask):
176
+ outputs = self.xlmr(input_ids=input_ids, attention_mask=attention_mask)
177
+ cls_output = outputs.last_hidden_state[:, 0, :] # [CLS] token
178
+
179
+ shared = self.shared_layer(cls_output)
180
+ country_logits = self.country_head(shared)
181
+ type_logit = self.type_head(shared).squeeze(-1)
182
+
183
+ return country_logits, type_logit
184
+
185
+ def freeze_embeddings(self):
186
+ """Embedding層をフリーズ"""
187
+ for param in self.xlmr.embeddings.parameters():
188
+ param.requires_grad = False
189
+ logger.info("Froze XLM-R embedding layer")
190
+
191
+ def freeze_lower_layers(self, num_layers):
192
+ """下位N層をフリーズ"""
193
+ if num_layers <= 0:
194
+ return
195
+ for i in range(num_layers):
196
+ for param in self.xlmr.encoder.layer[i].parameters():
197
+ param.requires_grad = False
198
+ logger.info(f"Froze lower {num_layers} transformer layers")
199
+
200
+
201
+ # =============================================================================
202
+ # 4. Loss
203
+ # =============================================================================
204
+ class MultiTaskLoss(nn.Module):
205
+ def __init__(self, country_weights, type_loss_weight=0.3):
206
+ super().__init__()
207
+ self.country_loss_fn = nn.CrossEntropyLoss(weight=country_weights)
208
+ self.type_loss_fn = nn.BCEWithLogitsLoss()
209
+ self.type_loss_weight = type_loss_weight
210
+
211
+ def forward(self, country_logits, type_logit, country_label, type_label):
212
+ loss_country = self.country_loss_fn(country_logits, country_label)
213
+ loss_type = self.type_loss_fn(type_logit, type_label)
214
+ total_loss = loss_country + self.type_loss_weight * loss_type
215
+
216
+ return total_loss, loss_country.item(), loss_type.item()
217
+
218
+
219
+ def compute_class_weights(train_df, num_countries, clamp_max=50.0):
220
+ """Inverse frequency weight を計算"""
221
+ counts = Counter(train_df["country_id"].tolist())
222
+ total = len(train_df)
223
+ weights = torch.zeros(num_countries)
224
+ for cls_id in range(num_countries):
225
+ count = counts.get(cls_id, 1)
226
+ weights[cls_id] = total / (num_countries * count)
227
+ weights = weights.clamp(max=clamp_max)
228
+ logger.info(f"Class weights range: [{weights.min():.2f}, {weights.max():.2f}]")
229
+ return weights
230
+
231
+
232
+ # =============================================================================
233
+ # 5. Sampler (Optional: WeightedRandomSampler)
234
+ # =============================================================================
235
+ def create_weighted_sampler(train_df):
236
+ """少数クラスをオーバーサンプリングするサンプラーを作成"""
237
+ counts = Counter(train_df["country_id"].tolist())
238
+ sample_weights = [1.0 / counts[cid] for cid in train_df["country_id"]]
239
+ sampler = WeightedRandomSampler(
240
+ weights=sample_weights,
241
+ num_samples=len(train_df),
242
+ replacement=True,
243
+ )
244
+ return sampler
245
+
246
+
247
+ # =============================================================================
248
+ # 6. Training Loop
249
+ # =============================================================================
250
+ def train_one_epoch(model, dataloader, loss_fn, optimizer, scheduler, cfg):
251
+ model.train()
252
+ total_loss = 0
253
+ total_country_loss = 0
254
+ total_type_loss = 0
255
+ all_country_preds = []
256
+ all_country_labels = []
257
+
258
+ pbar = tqdm(dataloader, desc="Training", leave=False)
259
+ for batch in pbar:
260
+ input_ids = batch["input_ids"].to(cfg.DEVICE)
261
+ attention_mask = batch["attention_mask"].to(cfg.DEVICE)
262
+ country_labels = batch["country_id"].to(cfg.DEVICE)
263
+ type_labels = batch["type_id"].to(cfg.DEVICE)
264
+
265
+ optimizer.zero_grad()
266
+ country_logits, type_logit = model(input_ids, attention_mask)
267
+ loss, l_country, l_type = loss_fn(country_logits, type_logit, country_labels, type_labels)
268
+
269
+ loss.backward()
270
+ nn.utils.clip_grad_norm_(model.parameters(), cfg.MAX_GRAD_NORM)
271
+ optimizer.step()
272
+ scheduler.step()
273
+
274
+ total_loss += loss.item()
275
+ total_country_loss += l_country
276
+ total_type_loss += l_type
277
+
278
+ preds = country_logits.argmax(dim=1).cpu().numpy()
279
+ all_country_preds.extend(preds)
280
+ all_country_labels.extend(country_labels.cpu().numpy())
281
+
282
+ pbar.set_postfix(loss=f"{loss.item():.4f}")
283
+
284
+ n = len(dataloader)
285
+ acc = accuracy_score(all_country_labels, all_country_preds)
286
+ return {
287
+ "loss": total_loss / n,
288
+ "country_loss": total_country_loss / n,
289
+ "type_loss": total_type_loss / n,
290
+ "country_acc": acc,
291
+ }
292
+
293
+
294
+ @torch.no_grad()
295
+ def evaluate(model, dataloader, loss_fn, cfg):
296
+ model.eval()
297
+ total_loss = 0
298
+ total_country_loss = 0
299
+ total_type_loss = 0
300
+ all_country_preds = []
301
+ all_country_labels = []
302
+ all_type_preds = []
303
+ all_type_labels = []
304
+
305
+ for batch in tqdm(dataloader, desc="Evaluating", leave=False):
306
+ input_ids = batch["input_ids"].to(cfg.DEVICE)
307
+ attention_mask = batch["attention_mask"].to(cfg.DEVICE)
308
+ country_labels = batch["country_id"].to(cfg.DEVICE)
309
+ type_labels = batch["type_id"].to(cfg.DEVICE)
310
+
311
+ country_logits, type_logit = model(input_ids, attention_mask)
312
+ loss, l_country, l_type = loss_fn(country_logits, type_logit, country_labels, type_labels)
313
+
314
+ total_loss += loss.item()
315
+ total_country_loss += l_country
316
+ total_type_loss += l_type
317
+
318
+ all_country_preds.extend(country_logits.argmax(dim=1).cpu().numpy())
319
+ all_country_labels.extend(country_labels.cpu().numpy())
320
+ all_type_preds.extend((torch.sigmoid(type_logit) > 0.5).int().cpu().numpy())
321
+ all_type_labels.extend(type_labels.int().cpu().numpy())
322
+
323
+ n = len(dataloader)
324
+ country_acc = accuracy_score(all_country_labels, all_country_preds)
325
+ country_f1_macro = f1_score(all_country_labels, all_country_preds, average="macro", zero_division=0)
326
+ country_f1_weighted = f1_score(all_country_labels, all_country_preds, average="weighted", zero_division=0)
327
+ type_acc = accuracy_score(all_type_labels, all_type_preds)
328
+ type_f1 = f1_score(all_type_labels, all_type_preds, average="binary", zero_division=0)
329
+
330
+ return {
331
+ "loss": total_loss / n,
332
+ "country_loss": total_country_loss / n,
333
+ "type_loss": total_type_loss / n,
334
+ "country_acc": country_acc,
335
+ "country_f1_macro": country_f1_macro,
336
+ "country_f1_weighted": country_f1_weighted,
337
+ "type_acc": type_acc,
338
+ "type_f1": type_f1,
339
+ "country_preds": all_country_preds,
340
+ "country_labels": all_country_labels,
341
+ "type_preds": all_type_preds,
342
+ "type_labels": all_type_labels,
343
+ }
344
+
345
+
346
+ # =============================================================================
347
+ # 7. Main
348
+ # =============================================================================
349
+ def main():
350
+ cfg = Config()
351
+ set_seed(cfg.RANDOM_SEED)
352
+ os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
353
+
354
+ # ---- Data ----
355
+ train_df, val_df, test_df, country_encoder, num_countries = load_and_split_data(cfg)
356
+
357
+ # エンコーダを保存(推論時に必要)
358
+ encoder_path = os.path.join(cfg.OUTPUT_DIR, "country_encoder.json")
359
+ with open(encoder_path, "w", encoding="utf-8") as f:
360
+ json.dump(
361
+ {"classes": country_encoder.classes_.tolist()},
362
+ f,
363
+ ensure_ascii=False,
364
+ indent=2,
365
+ )
366
+ logger.info(f"Saved country encoder to {encoder_path}")
367
+
368
+ # ---- Tokenizer & Datasets ----
369
+ logger.info(f"Loading tokenizer: {cfg.MODEL_NAME}")
370
+ tokenizer = XLMRobertaTokenizer.from_pretrained(cfg.MODEL_NAME)
371
+
372
+ train_dataset = TextClassificationDataset(train_df, tokenizer, cfg.MAX_LENGTH)
373
+ val_dataset = TextClassificationDataset(val_df, tokenizer, cfg.MAX_LENGTH)
374
+ test_dataset = TextClassificationDataset(test_df, tokenizer, cfg.MAX_LENGTH)
375
+
376
+ # ---- Sampler & DataLoader ----
377
+ sampler = create_weighted_sampler(train_df)
378
+ train_loader = DataLoader(
379
+ train_dataset,
380
+ batch_size=cfg.BATCH_SIZE,
381
+ sampler=sampler,
382
+ num_workers=4,
383
+ pin_memory=True,
384
+ )
385
+ val_loader = DataLoader(val_dataset, batch_size=cfg.BATCH_SIZE * 2, shuffle=False, num_workers=4, pin_memory=True)
386
+ test_loader = DataLoader(test_dataset, batch_size=cfg.BATCH_SIZE * 2, shuffle=False, num_workers=4, pin_memory=True)
387
+
388
+ # ---- Model ----
389
+ logger.info("Building model")
390
+ model = MultiTaskClassifier(cfg.MODEL_NAME, num_countries).to(cfg.DEVICE)
391
+
392
+ if cfg.FREEZE_EMBEDDINGS:
393
+ model.freeze_embeddings()
394
+ if cfg.FREEZE_LOWER_LAYERS > 0:
395
+ model.freeze_lower_layers(cfg.FREEZE_LOWER_LAYERS)
396
+
397
+ trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
398
+ total_params = sum(p.numel() for p in model.parameters())
399
+ logger.info(f"Parameters: {trainable_params:,} trainable / {total_params:,} total")
400
+
401
+ # ---- Loss ----
402
+ country_weights = compute_class_weights(train_df, num_countries, cfg.CLASS_WEIGHT_CLAMP).to(cfg.DEVICE)
403
+ loss_fn = MultiTaskLoss(country_weights, type_loss_weight=cfg.TYPE_LOSS_WEIGHT)
404
+
405
+ # ---- Optimizer & Scheduler ----
406
+ optimizer = torch.optim.AdamW(
407
+ filter(lambda p: p.requires_grad, model.parameters()),
408
+ lr=cfg.LEARNING_RATE,
409
+ weight_decay=cfg.WEIGHT_DECAY,
410
+ )
411
+ total_steps = len(train_loader) * cfg.NUM_EPOCHS
412
+ warmup_steps = int(total_steps * cfg.WARMUP_RATIO)
413
+
414
+ scheduler = torch.optim.lr_scheduler.OneCycleLR(
415
+ optimizer,
416
+ max_lr=cfg.LEARNING_RATE,
417
+ total_steps=total_steps,
418
+ pct_start=cfg.WARMUP_RATIO,
419
+ anneal_strategy="cos",
420
+ )
421
+
422
+ # ---- Training ----
423
+ best_val_f1 = 0.0
424
+ history = []
425
+
426
+ for epoch in range(1, cfg.NUM_EPOCHS + 1):
427
+ logger.info(f"\n{'='*60}")
428
+ logger.info(f"Epoch {epoch}/{cfg.NUM_EPOCHS}")
429
+ logger.info(f"{'='*60}")
430
+
431
+ train_metrics = train_one_epoch(model, train_loader, loss_fn, optimizer, scheduler, cfg)
432
+ logger.info(
433
+ f"[Train] Loss: {train_metrics['loss']:.4f} | "
434
+ f"Country Loss: {train_metrics['country_loss']:.4f} | "
435
+ f"Type Loss: {train_metrics['type_loss']:.4f} | "
436
+ f"Country Acc: {train_metrics['country_acc']:.4f}"
437
+ )
438
+
439
+ val_metrics = evaluate(model, val_loader, loss_fn, cfg)
440
+ logger.info(
441
+ f"[Val] Loss: {val_metrics['loss']:.4f} | "
442
+ f"Country Acc: {val_metrics['country_acc']:.4f} | "
443
+ f"Country F1(macro): {val_metrics['country_f1_macro']:.4f} | "
444
+ f"Country F1(weighted): {val_metrics['country_f1_weighted']:.4f} | "
445
+ f"Type Acc: {val_metrics['type_acc']:.4f}"
446
+ )
447
+
448
+ history.append({
449
+ "epoch": epoch,
450
+ "train_loss": train_metrics["loss"],
451
+ "val_loss": val_metrics["loss"],
452
+ "val_country_acc": val_metrics["country_acc"],
453
+ "val_country_f1_macro": val_metrics["country_f1_macro"],
454
+ "val_country_f1_weighted": val_metrics["country_f1_weighted"],
455
+ "val_type_acc": val_metrics["type_acc"],
456
+ "val_type_f1": val_metrics["type_f1"],
457
+ })
458
+
459
+ # Best model の保存(macro F1 基準)
460
+ if val_metrics["country_f1_macro"] > best_val_f1:
461
+ best_val_f1 = val_metrics["country_f1_macro"]
462
+ save_path = os.path.join(cfg.OUTPUT_DIR, "best_model.pt")
463
+ torch.save({
464
+ "epoch": epoch,
465
+ "model_state_dict": model.state_dict(),
466
+ "optimizer_state_dict": optimizer.state_dict(),
467
+ "val_f1_macro": best_val_f1,
468
+ "num_countries": num_countries,
469
+ }, save_path)
470
+ logger.info(f"*** New best model saved (F1 macro: {best_val_f1:.4f}) ***")
471
+
472
+ # ---- 学習履歴の保存 ----
473
+ history_path = os.path.join(cfg.OUTPUT_DIR, "training_history.json")
474
+ with open(history_path, "w") as f:
475
+ json.dump(history, f, indent=2)
476
+
477
+ # ---- Test Evaluation ----
478
+ logger.info(f"\n{'='*60}")
479
+ logger.info("Final Evaluation on Test Set")
480
+ logger.info(f"{'='*60}")
481
+
482
+ # ベストモデルをロード
483
+ checkpoint = torch.load(os.path.join(cfg.OUTPUT_DIR, "best_model.pt"), map_location=cfg.DEVICE)
484
+ model.load_state_dict(checkpoint["model_state_dict"])
485
+ logger.info(f"Loaded best model from epoch {checkpoint['epoch']}")
486
+
487
+ test_metrics = evaluate(model, test_loader, loss_fn, cfg)
488
+
489
+ logger.info(f"\n--- Country Classification ---")
490
+ logger.info(f"Accuracy: {test_metrics['country_acc']:.4f}")
491
+ logger.info(f"F1 (macro): {test_metrics['country_f1_macro']:.4f}")
492
+ logger.info(f"F1 (weighted): {test_metrics['country_f1_weighted']:.4f}")
493
+ logger.info(f"\n--- Type Classification (Name/Address) ---")
494
+ logger.info(f"Accuracy: {test_metrics['type_acc']:.4f}")
495
+ logger.info(f"F1: {test_metrics['type_f1']:.4f}")
496
+
497
+ # 詳細レポート
498
+ report = classification_report(
499
+ test_metrics["country_labels"],
500
+ test_metrics["country_preds"],
501
+ target_names=country_encoder.classes_,
502
+ output_dict=True,
503
+ zero_division=0,
504
+ )
505
+ report_path = os.path.join(cfg.OUTPUT_DIR, "test_classification_report.json")
506
+ with open(report_path, "w", encoding="utf-8") as f:
507
+ json.dump(report, f, ensure_ascii=False, indent=2)
508
+ logger.info(f"Saved detailed report to {report_path}")
509
+
510
+ # テキスト版レポートも表示
511
+ print("\n" + classification_report(
512
+ test_metrics["country_labels"],
513
+ test_metrics["country_preds"],
514
+ target_names=country_encoder.classes_,
515
+ zero_division=0,
516
+ ))
517
+
518
+ logger.info("Done!")
519
+
520
+
521
+ if __name__ == "__main__":
522
+ main()