Coding datasets
Collection
Datasets for high quality small LM model pre-training. • 3 items • Updated
repo_name stringlengths 4 135 | repo_url stringlengths 23 154 | snapshot_id float64 | revision_id float64 | directory_id float64 | branch_name float64 | visit_date stringclasses 755
values | revision_date float64 | committer_date float64 | github_id int64 131 1.05B | star_events_count int64 0 118k | fork_events_count int64 0 42.1k | gha_license_id stringclasses 34
values | gha_created_at stringlengths 25 25 ⌀ | gha_updated_at stringdate 2013-01-02 19:34:41 2025-09-10 18:41:34 | gha_pushed_at stringdate 2009-04-27 16:14:12 2025-09-10 19:09:58 ⌀ | gha_language stringclasses 284
values | files listlengths 2 3.2k | num_files int64 2 3.2k | __index_level_0__ int64 0 189k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pericdario/remote-nunchuk-keyboard | https://github.com/pericdario/remote-nunchuk-keyboard | null | null | null | null | null | null | null | 236,235,108 | 0 | 0 | null | null | 2020-01-26T00:20:14+00:00 | 2020-01-26T00:20:12+00:00 | null | [
{
"alpha_fraction": 0.757548032936871,
"alphanum_fraction": 0.7740164684354987,
"avg_line_length": 25.658536585365855,
"blob_id": "776a2cd3d70dfc5307e2204ab6c5adc6ab94438a",
"content_id": "70315ec6658d29d2af8456eb08c6c2a9883030c2299acf54e3d81cb98408d1a4",
"detected_licenses": [],
"is_gen... | 8 | 0 |
w3c/wai-dynamic-planning | https://github.com/w3c/wai-dynamic-planning | null | null | null | null | null | null | null | 29,198,001 | 5 | 0 | null | null | 2025-07-16T22:53:48+00:00 | 2025-07-16T22:53:19+00:00 | null | [
{
"alpha_fraction": 0.7795698924731183,
"alphanum_fraction": 0.7903225806451613,
"avg_line_length": 61,
"blob_id": "55bba6c86b12e7b6abeaf71b8d0053efed976457",
"content_id": "a54ffd77d8909760575f9930430586fe63979f75de29fa5f8e07d3afa4f0ba25",
"detected_licenses": [],
"is_generated": false,... | 14 | 1 |
yihanYozikua/distance-measure | https://github.com/yihanYozikua/distance-measure | null | null | null | null | null | null | null | 313,556,027 | 1 | 0 | null | null | 2021-06-23T12:41:45+00:00 | 2021-04-08T08:46:36+00:00 | null | [
{
"alpha_fraction": 0.7458703939008895,
"alphanum_fraction": 0.7484116899618806,
"avg_line_length": 25.233333333333334,
"blob_id": "47fe6ac1012d739c94ffaa6d4f0e1a1326579754",
"content_id": "65eafab2f1d18bcb729ce923084c1385c6a6f5c0b37129b682e98df288b20e84",
"detected_licenses": [],
"is_ge... | 5 | 2 |
AuroransSolis/osu-db-manager | https://github.com/AuroransSolis/osu-db-manager | null | null | null | null | null | null | null | 184,883,096 | 1 | 0 | null | null | 2020-08-16T10:01:09+00:00 | 2020-08-16T10:01:07+00:00 | null | [
{
"alpha_fraction": 0.6397694524495677,
"alphanum_fraction": 0.6844380403458213,
"avg_line_length": 15.162790697674419,
"blob_id": "e9a9f2431d9282dfcaeb278cd35295424b1b4ff2",
"content_id": "9d8932b3dc1f5577fd852bb401ef727a01e2adf6766b02f508bd4bcd7609f197",
"detected_licenses": [],
"is_ge... | 46 | 3 |
lucasfloripa/Elciess | https://github.com/lucasfloripa/Elciess | null | null | null | null | null | null | null | 252,508,866 | 1 | 0 | null | null | 2021-06-03T18:23:32+00:00 | 2022-12-22T15:17:33+00:00 | null | [{"alpha_fraction":0.705078125,"alphanum_fraction":0.7141927083333334,"avg_line_length":29.72,"blob_(...TRUNCATED) | 592 | 4 |
niceoutput/job-board-scraper | https://github.com/niceoutput/job-board-scraper | null | null | null | null | null | null | null | 297,316,597 | 1 | 0 | null | null | 2023-03-09T00:48:04+00:00 | 2020-09-21T21:42:54+00:00 | null | [{"alpha_fraction":0.7924528301886793,"alphanum_fraction":0.7924528301886793,"avg_line_length":25.5,(...TRUNCATED) | 5 | 5 |
Chandrshekhar-coder/Shop-Now | https://github.com/Chandrshekhar-coder/Shop-Now | null | null | null | null | null | null | null | 384,398,311 | 1 | 0 | null | null | 2021-07-31T16:04:21+00:00 | 2021-07-09T10:27:18+00:00 | null | [{"alpha_fraction":0.7336561743341404,"alphanum_fraction":0.738498789346247,"avg_line_length":23.812(...TRUNCATED) | 30 | 6 |
akagupta9/CodeCoverageByFunctionalTesting | https://github.com/akagupta9/CodeCoverageByFunctionalTesting | null | null | null | null | null | null | null | 391,085,505 | 0 | 0 | null | null | 2022-03-22T14:37:19+00:00 | 2021-07-30T15:34:16+00:00 | null | [{"alpha_fraction":0.7279411764705882,"alphanum_fraction":0.7279411764705882,"avg_line_length":33.25(...TRUNCATED) | 9 | 7 |
xebialabs-community/xlr-cherwell-plugin | https://github.com/xebialabs-community/xlr-cherwell-plugin | null | null | null | null | null | null | null | 202,187,190 | 1 | 0 | null | null | 2022-04-02T15:22:10+00:00 | 2022-04-08T16:08:35+00:00 | null | [{"alpha_fraction":0.8028571428571428,"alphanum_fraction":0.8028571428571428,"avg_line_length":149.0(...TRUNCATED) | 33 | 8 |
jprudent/didactic-happiness | https://github.com/jprudent/didactic-happiness | null | null | null | null | null | null | null | 49,062,950 | 3 | 0 | null | null | 2020-06-10T14:39:05+00:00 | 2019-11-06T08:40:19+00:00 | null | [{"alpha_fraction":0.6713947990543735,"alphanum_fraction":0.7801418439716312,"avg_line_length":69.5,(...TRUNCATED) | 47 | 9 |
Update on The Stack V2 dataset: https://huggingface.co/datasets/bigcode/the-stack-v2-train-smol-ids
All repos from original dataset are parsed with Github API and re-downloaded, so respective updates are kept, metadata is updated. This took 10+ days to process due to GraphQL limits.
In addition, during parsing I applied set of heuristics from the original paper, this includes: binary_or_size_check, length_checks, autogen_check, alpha_check, encoded_check and language specific filters
All filtered files are removed from files list field, so repos might be not full
In result, there are 408716 repos with a total of 30M files
Code for filtering
from collections.abc import Iterable
from dataclasses import dataclass
from html import unescape
from pathlib import Path
import regex as re
ALLOWED_LANGUAGES = {
"Ant Build System",
"AsciiDoc",
"C",
"C#",
"C++",
"CMake",
"Dockerfile",
"Go",
"Go Module",
"Gradle",
"Groovy",
"HTML",
"INI",
"Java",
"Java Properties",
"JavaScript",
"JSON",
"JSON with Comments",
"Kotlin",
"Lua",
"M4Sugar",
"Makefile",
"Markdown",
"Maven POM",
"PHP",
"Python",
"R",
"RDoc",
"reStructuredText",
"RMarkdown",
"Ruby",
"Rust",
"Shell",
"SQL",
"Swift",
"Text",
"TOML",
"TypeScript",
"YAML",
}
AUTOGEN_PHRASES = (
"auto-generated",
"autogenerated",
"automatically generated",
"generated automatically",
"this file is generated",
)
RE_BASE64 = re.compile(r"[a-zA-Z0-9+/\n=]{64,}")
RE_HEXSEQ = re.compile(r"(?:\b(?:0x|\\x)?[0-9a-fA-F]{2}(?:,|\b\s*)){8,}")
RE_UNICODE = re.compile(r"(?:\\u[0-9a-fA-F]{4}){8,}")
@dataclass(slots=True)
class FileStats:
path: str
size: int
nlines: int
avg_line_len: float
max_line_len: int
alpha_ratio: float
is_binary: bool
language: str | None
EXT_TO_LANGUAGE = {
# minimal mapping; can be extended, or replaced by enry if available
".c": "C",
".h": "C",
".cc": "C++",
".cpp": "C++",
".hpp": "C++",
".cs": "C#",
".cmake": "CMake",
"CMakeLists.txt": "CMake",
"Dockerfile": "Dockerfile",
".go": "Go",
".mod": "Go Module",
".gradle": "Gradle",
".groovy": "Groovy",
".html": "HTML",
".ini": "INI",
".java": "Java",
".properties": "Java Properties",
".js": "JavaScript",
".mjs": "JavaScript",
".ts": "TypeScript",
".json": "JSON",
".jsonc": "JSON with Comments",
".kt": "Kotlin",
".lua": "Lua",
".m4": "M4Sugar",
"Makefile": "Makefile",
".md": "Markdown",
"pom.xml": "Maven POM",
".php": "PHP",
".py": "Python",
".r": "R",
".rdoc": "RDoc",
".rst": "reStructuredText",
".rmd": "RMarkdown",
".rb": "Ruby",
".rs": "Rust",
".sh": "Shell",
".sql": "SQL",
".swift": "Swift",
".txt": "Text",
".toml": "TOML",
".yaml": "YAML",
".yml": "YAML",
}
def detect_language(path: str) -> str | None:
name = Path(path).name
if name in EXT_TO_LANGUAGE:
return EXT_TO_LANGUAGE[name]
ext = Path(path).suffix
return EXT_TO_LANGUAGE.get(ext)
def is_generated_content(text: str, language: str | None) -> bool:
# Primary: phrase scan in first 5 lines
head = "\n".join(text.splitlines()[:5]).lower()
if any(p in head for p in AUTOGEN_PHRASES):
return True
# Optional: try enry if available
try:
import enry # type: ignore
return bool(enry.is_generated(text))
except Exception:
return False
def visible_text_from_html(text: str) -> str:
# Very light-weight visibility heuristic: strip tags, scripts/styles
text = re.sub(r"<script[\s\S]*?</script>", " ", text)
text = re.sub(r"<style[\s\S]*?</style>", " ", text)
text = re.sub(r"<!--.*?-->", " ", text)
text = re.sub(r"<[^>]+>", " ", text)
text = unescape(text)
return re.sub(r"\s+", " ", text).strip()
def compute_stats(text: str, path: str) -> FileStats:
lines = text.splitlines() or [""]
nlines = len(lines)
lengths = [len(line) for line in lines]
avg_len = sum(lengths) / max(1, nlines)
max_len = max(lengths) if lengths else 0
alpha = sum(c.isalpha() for c in text)
alpha_ratio = alpha / max(1, len(text))
# naive binary detection
is_binary = "\x00" in text or (sum(1 for c in text if ord(c) < 9) > 0)
language = detect_language(path)
return FileStats(
path=path,
size=len(text.encode("utf-8", "ignore")),
nlines=nlines,
avg_line_len=avg_len,
max_line_len=max_len,
alpha_ratio=alpha_ratio,
is_binary=is_binary,
language=language,
)
def encoded_data_fraction(text: str) -> tuple[int, int]:
matched = 0
longest = 0
for rx in (RE_BASE64, RE_HEXSEQ, RE_UNICODE):
for m in rx.finditer(text):
seg_len = len(m.group(0))
matched += seg_len
longest = max(longest, seg_len)
return matched, longest
def _binary_or_size_check(stats: FileStats) -> bool:
if stats.is_binary or stats.size == 0 or stats.size > 10 * 1024 * 1024:
return False
return True
def _length_checks(stats: FileStats) -> bool:
long_langs = {"HTML", "JSON", "Markdown", "Roff", "Roff Manpage", "SMT", "TeX", "Text", "XML"}
if stats.language in long_langs:
return stats.max_line_len <= 100_000
if stats.nlines > 100_000:
return False
if stats.avg_line_len > 100:
return False
if stats.max_line_len > 1_000:
return False
return True
def _autogen_check(text: str, language: str | None) -> bool:
return not is_generated_content(text, language)
def _alpha_check(stats: FileStats) -> bool:
if stats.language in {"Motorola 68K Assembly", "WebAssembly"}:
return True
return stats.alpha_ratio >= 0.25
def _encoded_check(text: str) -> bool:
matched, longest = encoded_data_fraction(text)
if longest > 1024:
return False
if matched / max(1, len(text)) > 0.5:
return False
return True
def basic_filters(text: str, stats: FileStats) -> bool:
return (
_binary_or_size_check(stats)
and _length_checks(stats)
and _autogen_check(text, stats.language)
and _alpha_check(stats)
and _encoded_check(text)
)
def language_specific_filters(text: str, stats: FileStats) -> bool:
lang = stats.language
if not lang:
return False
# Language allowlist
if lang not in ALLOWED_LANGUAGES:
return False
if lang in {"Text", "JSON", "YAML", "Web Ontology Language", "Graphviz (DOT)"}:
if stats.nlines > 512:
return False
if lang == "HTML":
visible = visible_text_from_html(text)
if len(visible) < 100:
return False
if len(visible) / max(1, len(text)) < 0.2:
return False
if lang == "Text":
name = Path(stats.path).name.lower()
base = Path(stats.path).stem.lower()
if ("requirement" not in name) and (
base not in {"readme", "notes", "todo", "description", "cmakelists"}
):
return False
return True
def should_keep_file(path: str, text: str, *, languages: Iterable[str] | None = None) -> bool:
stats = compute_stats(text, path)
if languages is not None:
if stats.language not in set(languages):
return False
return basic_filters(text, stats) and language_specific_filters(text, stats)