ba-agent-posttrain-scripts / data_adapter.py
richingme's picture
Upload BA agent post-training scripts
f8f0e4e verified
import json
import os
import re
from pathlib import Path
from typing import Any, Dict, Iterable, Optional, Tuple
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
from utils import SYSTEM_PROMPT
_LOCAL_DATASET_LOADERS = {
".json": "json",
".jsonl": "json",
".csv": "csv",
".parquet": "parquet",
}
def load_sft_dataset(
dataset_name: str,
dataset_sub_name: str = "",
split: str = "train",
default_system_prompt: str = SYSTEM_PROMPT,
) -> Dataset:
dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
return dataset.map(
lambda example: normalize_sft_example(example, default_system_prompt=default_system_prompt),
remove_columns=dataset.column_names,
)
def load_preference_dataset(
dataset_name: str,
dataset_sub_name: str = "",
split: str = "train",
default_system_prompt: str = SYSTEM_PROMPT,
) -> Dataset:
dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
return dataset.map(
lambda example: normalize_preference_example(example, default_system_prompt=default_system_prompt),
remove_columns=dataset.column_names,
)
def load_prompt_dataset(
dataset_name: str,
dataset_sub_name: str = "",
split: str = "train",
default_system_prompt: str = SYSTEM_PROMPT,
) -> Dataset:
dataset = load_dataset_split(dataset_name, dataset_sub_name=dataset_sub_name, split=split)
return dataset.map(
lambda example: normalize_prompt_example(example, default_system_prompt=default_system_prompt),
remove_columns=dataset.column_names,
)
def load_dataset_split(dataset_name: str, dataset_sub_name: str = "", split: str = "train") -> Dataset:
expanded_name = os.path.expanduser(dataset_name)
if os.path.exists(expanded_name):
dataset = _load_local_dataset(Path(expanded_name), split=split)
return _select_split(dataset, split)
if dataset_sub_name:
return load_dataset(dataset_name, dataset_sub_name, split=split)
return load_dataset(dataset_name, split=split)
def normalize_sft_example(example: Dict[str, Any], default_system_prompt: str = SYSTEM_PROMPT) -> Dict[str, str]:
if "messages" in example or "conversations" in example:
messages = _coerce_messages(example.get("messages", example.get("conversations")))
if messages:
system_prompt, prompt, response = _messages_to_sft_fields(messages, default_system_prompt)
return {
"system": system_prompt,
"prompt": prompt,
"response": response,
}
prompt = _get_first_value(example, ("prompt", "question"))
response = _get_first_value(example, ("response", "answer", "output", "completion"))
if prompt is None and "instruction" in example and "output" in example:
prompt = " ".join(
part for part in (_stringify_text(example["instruction"]), _stringify_text(example.get("input"))) if part
)
response = _stringify_text(example["output"])
if prompt is None or response is None:
raise ValueError(
"Unsupported SFT dataset schema. Expected prompt/response, question/answer, instruction/output, "
f"or messages. Found columns: {sorted(example.keys())}"
)
return {
"system": _extract_system_prompt(example, default_system_prompt),
"prompt": _stringify_text(prompt),
"response": _stringify_text(response),
}
def normalize_preference_example(
example: Dict[str, Any],
default_system_prompt: str = SYSTEM_PROMPT,
) -> Dict[str, str]:
if {"chosen", "rejected"}.issubset(example.keys()) and "prompt" not in example and "question" not in example:
system_prompt, prompt, chosen = _extract_hh_prompt_and_response(
_stringify_text(example["chosen"]),
default_system_prompt=default_system_prompt,
)
_, rejected_prompt, rejected = _extract_hh_prompt_and_response(
_stringify_text(example["rejected"]),
default_system_prompt=default_system_prompt,
)
prompt = prompt or rejected_prompt
return {
"system": system_prompt,
"prompt": prompt,
"chosen": chosen,
"rejected": rejected,
}
if {
"prompt",
"response_0",
"response_1",
"better_response_id",
}.issubset(example.keys()):
chosen, rejected = _select_pairwise_responses(example)
return {
"system": _extract_system_prompt(example, default_system_prompt),
"prompt": _stringify_text(example["prompt"]),
"chosen": chosen,
"rejected": rejected,
}
prompt = _get_first_value(example, ("prompt", "question"))
chosen = _get_first_value(example, ("chosen", "response_chosen", "preferred"))
rejected = _get_first_value(example, ("rejected", "response_rejected", "dispreferred"))
if prompt is None or chosen is None or rejected is None:
raise ValueError(
"Unsupported preference dataset schema. Expected prompt/chosen/rejected, "
"question/response_chosen/response_rejected, SafeRLHF columns, or HH-RLHF chosen/rejected. "
f"Found columns: {sorted(example.keys())}"
)
return {
"system": _extract_system_prompt(example, default_system_prompt),
"prompt": _stringify_text(prompt),
"chosen": _stringify_text(chosen),
"rejected": _stringify_text(rejected),
}
def normalize_prompt_example(example: Dict[str, Any], default_system_prompt: str = SYSTEM_PROMPT) -> Dict[str, str]:
if "messages" in example or "conversations" in example:
messages = _coerce_messages(example.get("messages", example.get("conversations")))
if messages:
system_prompt, prompt, _ = _messages_to_sft_fields(messages, default_system_prompt)
return {
"system": system_prompt,
"prompt": prompt,
}
if {
"chosen",
"rejected",
}.issubset(example.keys()) and "prompt" not in example and "question" not in example:
system_prompt, prompt, _ = _extract_hh_prompt_and_response(
_stringify_text(example["chosen"]),
default_system_prompt=default_system_prompt,
)
return {
"system": system_prompt,
"prompt": prompt,
}
prompt = _get_first_value(example, ("prompt", "question"))
if prompt is None and "instruction" in example:
prompt = " ".join(
part for part in (_stringify_text(example["instruction"]), _stringify_text(example.get("input"))) if part
)
if prompt is None:
raise ValueError(
"Unsupported prompt dataset schema. Expected prompt/question/instruction, messages, or HH-RLHF chosen. "
f"Found columns: {sorted(example.keys())}"
)
return {
"system": _extract_system_prompt(example, default_system_prompt),
"prompt": _stringify_text(prompt),
}
def _load_local_dataset(dataset_path: Path, split: str):
if dataset_path.is_file():
loader_name = _loader_name_from_suffix(dataset_path.suffix)
return load_dataset(loader_name, data_files={split: str(dataset_path)})
try:
return load_from_disk(str(dataset_path))
except (FileNotFoundError, ValueError):
data_file = _discover_local_data_file(dataset_path, split=split)
loader_name = _loader_name_from_suffix(data_file.suffix)
return load_dataset(loader_name, data_files={split: str(data_file)})
def _select_split(dataset, split: str) -> Dataset:
if isinstance(dataset, DatasetDict):
if split in dataset:
return dataset[split]
first_split = next(iter(dataset.keys()))
return dataset[first_split]
return dataset
def _discover_local_data_file(dataset_dir: Path, split: str) -> Path:
for suffix in _LOCAL_DATASET_LOADERS:
candidate = dataset_dir / f"{split}{suffix}"
if candidate.exists():
return candidate
candidates = []
for suffix in _LOCAL_DATASET_LOADERS:
candidates.extend(sorted(dataset_dir.glob(f"*{suffix}")))
if len(candidates) == 1:
return candidates[0]
raise ValueError(
f"Could not infer dataset file under {dataset_dir}. "
f"Expected {split}.jsonl/.json/.csv/.parquet or exactly one supported file."
)
def _loader_name_from_suffix(suffix: str) -> str:
if suffix not in _LOCAL_DATASET_LOADERS:
raise ValueError(f"Unsupported local dataset format: {suffix}")
return _LOCAL_DATASET_LOADERS[suffix]
def _extract_system_prompt(example: Dict[str, Any], default_system_prompt: str) -> str:
system_prompt = _get_first_value(example, ("system", "system_prompt", "system_message"))
system_prompt = _stringify_text(system_prompt)
return system_prompt or default_system_prompt
def _get_first_value(example: Dict[str, Any], keys: Iterable[str]):
for key in keys:
if key in example and example[key] is not None:
return example[key]
return None
def _stringify_text(value: Any) -> str:
if value is None:
return ""
if isinstance(value, str):
return value.strip()
if isinstance(value, list):
parts = [_stringify_text(item) for item in value]
return "\n".join(part for part in parts if part).strip()
if isinstance(value, dict):
text_value = value.get("text")
if text_value is not None:
return _stringify_text(text_value)
content_value = value.get("content")
if content_value is not None:
return _stringify_text(content_value)
return json.dumps(value, ensure_ascii=False, sort_keys=True)
return str(value).strip()
def _coerce_messages(raw_messages: Any):
if raw_messages is None:
return None
if isinstance(raw_messages, str):
raw_messages = json.loads(raw_messages)
if not isinstance(raw_messages, list):
raise ValueError(f"Unsupported messages payload: {type(raw_messages)}")
return raw_messages
def _messages_to_sft_fields(messages, default_system_prompt: str) -> Tuple[str, str, str]:
if not messages:
raise ValueError("Empty messages payload.")
assistant_indexes = [
index for index, message in enumerate(messages) if _normalize_role(message.get("role", message.get("from"))) == "assistant"
]
if not assistant_indexes:
raise ValueError("messages must contain at least one assistant turn.")
final_assistant_index = assistant_indexes[-1]
system_parts = []
prompt_lines = []
response = ""
for index, message in enumerate(messages):
role = _normalize_role(message.get("role", message.get("from", message.get("speaker"))))
content = _stringify_text(message.get("content", message.get("value", message.get("text"))))
if not content:
continue
if role == "system":
system_parts.append(content)
continue
if index == final_assistant_index:
response = content
continue
prompt_lines.append(f"{_render_role(role)}: {content}")
if not response:
raise ValueError("The final assistant turn is empty.")
system_prompt = "\n".join(system_parts).strip() or default_system_prompt
prompt = "\n".join(prompt_lines).strip()
return system_prompt, prompt, response
def _normalize_role(role: Optional[str]) -> str:
normalized_role = (role or "").strip().lower()
role_map = {
"human": "user",
"user": "user",
"assistant": "assistant",
"gpt": "assistant",
"bot": "assistant",
"system": "system",
"tool": "tool",
"function": "tool",
}
return role_map.get(normalized_role, normalized_role or "user")
def _render_role(role: str) -> str:
label_map = {
"user": "User",
"assistant": "Assistant",
"tool": "Tool",
}
return label_map.get(role, role.title())
def _extract_hh_prompt_and_response(text: str, default_system_prompt: str) -> Tuple[str, str, str]:
cleaned_text = text.lstrip()
chunks = re.split(r"\n\nAssistant:", cleaned_text)
if len(chunks) < 2:
raise ValueError("Invalid HH-RLHF transcript: missing assistant response.")
prompt_part = "\n\nAssistant:".join(chunks[:-1]).strip()
response = chunks[-1].strip()
prompt_lines = []
for block in re.split(r"\n\n", prompt_part):
current_block = block.strip()
if current_block.startswith("Human:"):
prompt_lines.append(f"User: {current_block[len('Human:'):].strip()}")
elif current_block.startswith("Assistant:"):
prompt_lines.append(f"Assistant: {current_block[len('Assistant:'):].strip()}")
return default_system_prompt, "\n".join(prompt_lines).strip(), response
def _select_pairwise_responses(example: Dict[str, Any]) -> Tuple[str, str]:
response_0 = _stringify_text(example["response_0"])
response_1 = _stringify_text(example["response_1"])
better_id = int(example["better_response_id"])
if {
"is_response_0_safe",
"is_response_1_safe",
}.issubset(example.keys()):
response_0_safe = bool(example["is_response_0_safe"])
response_1_safe = bool(example["is_response_1_safe"])
if response_0_safe and not response_1_safe:
return response_0, response_1
if response_1_safe and not response_0_safe:
return response_1, response_0
if better_id == 0:
return response_0, response_1
return response_1, response_0