repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Cl...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/metaspace.rs
use crate::tokenizer::{Decoder, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use serde::{Deserialize, Deserializer, Serialize}; /// Enum representing options for the metaspace prepending scheme. #[derive(Debug, Clone, PartialEq, Serialize, Eq, Deserialize, Copy)] #[serde(rename_all = "snake_case"...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/byte_level.rs
use std::collections::{HashMap, HashSet}; use crate::utils::SysRegex; use serde::{Deserialize, Serialize}; use crate::tokenizer::{ Decoder, Encoding, PostProcessor, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; fn bytes_char() -> HashMap<u8, cha...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/mod.rs
pub mod bert; pub mod byte_level; pub mod delimiter; pub mod digits; pub mod metaspace; pub mod punctuation; pub mod sequence; pub mod split; pub mod unicode_scripts; pub mod whitespace; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::bert::BertPreTokenizer; use crate::pre_tokenizers::byte_level::Byte...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs
use regex::Regex; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Whitespace; impl Default for Whitespace { fn de...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/delimiter.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] #[macro_rules_attribute(impl_serde_type!)] pub struct CharDelimiterSplit { pub deli...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/digits.rs
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] /// Pre tokenizes the numbers into single tokens. If individual_digits is set /// to true, then all digits are ...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/bert.rs
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_bert_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[mac...
0
hf_public_repos/tokenizers/tokenizers/src
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/split.rs
use crate::utils::SysRegex; use serde::{Deserialize, Deserializer, Serialize}; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; /// Represents the different patterns that `Split` can use #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub...
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs
mod pre_tokenizer; mod scripts; // Re-export the PreTokenizer pub use pre_tokenizer::UnicodeScripts;
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/scripts.rs
// Generated by modified Perl script at https://github.com/google/sentencepiece/blob/master/data/gen_unicode_scripts_code.pl // Unicode scripts : https://gist.github.com/Narsil/07556f26dc84a6baeff4d499e68d3cd2 // Rust adaptation : https://gist.github.com/Narsil/1df9fbbf5296a8d4d62de55dcb2fe700 #[derive(PartialEq, Debu...
0
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers
hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/pre_tokenizer.rs
use crate::pre_tokenizers::unicode_scripts::scripts::{get_script, Script}; use crate::tokenizer::{normalizer::Range, PreTokenizedString, PreTokenizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct UnicodeScripts; impl Uni...
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/docs/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for those with `?=` SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build BUILDDIR ?= build SOURCEDIR = source # Put it first so that "make" without argument is like "make html_all". h...
0
hf_public_repos/tokenizers
hf_public_repos/tokenizers/docs/README.md
## Requirements In order to generate the documentation, it is necessary to have a Python environment with the following: ```python pip install sphinx sphinx_rtd_theme setuptools_rust ``` It is also necessary to have the `tokenizers` library in this same environment, for Sphinx to generate all the API Reference and li...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/index.rst
Tokenizers ==================================================================================================== Fast State-of-the-art tokenizers, optimized for both research and production `🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with a focus on performance and versatility. These t...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/entities.inc
.. entities:: python :global: class class classmethod class method Tokenizer :class:`~tokenizers.Tokenizer` Tokenizer.train :meth:`~tokenizers.Tokenizer.train` Tokenizer.save :meth:`~tokenizers.Tokenizer.save` Tokenizer.from_file :meth:`~toke...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If ex...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/quicktour.rst
Quicktour ==================================================================================================== Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. .. only:: python It can b...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/pipeline.rst
The tokenization pipeline ==================================================================================================== When calling :entity:`Tokenizer.encode` or :entity:`Tokenizer.encode_batch`, the input text(s) go through the following pipeline: - :ref:`normalization` - :ref:`pre-tokenization` - :ref:`mode...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source/components.rst
Components ==================================================================================================== When building a Tokenizer, you can attach various types of components to this Tokenizer in order to customize its behavior. This page lists most provided components. .. _normalizers: .. entities:: python ...
0
hf_public_repos/tokenizers/docs/source/_static
hf_public_repos/tokenizers/docs/source/_static/css/huggingface.css
/* Our DOM objects */ /* Version control */ .selectors { margin-bottom: 10px; } .dropdown-button { display: inline-block; width: 50%; background-color: #6670FF; color: white; border: none; padding: 5px; font-size: 15px; cursor: pointer; } .dropdown-button:hover, .dropdown-button:...
0
hf_public_repos/tokenizers/docs/source/_static
hf_public_repos/tokenizers/docs/source/_static/css/code-snippets.css
.highlight .c1, .highlight .sd{ color: #999 } .highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt { color: #FB8D68; } .highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s { color: #6670FF; }...
0
hf_public_repos/tokenizers/docs/source/_static
hf_public_repos/tokenizers/docs/source/_static/js/custom.js
// These three variables below need to be updated at each release for the selectors. const languages = [ "rust", "python", "node" ]; // Last stable version for each language const stableVersion = { "rust": "master", "python": "v0.10.0", "node": "master" } // Dictionary doc folder to Label for each language...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/installation/main.rst
Installation ==================================================================================================== .. only:: python .. include:: python.inc .. only:: rust .. include:: rust.inc .. only:: node .. include:: node.inc
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/installation/rust.inc
Crates.io ---------------------------------------------------------------------------------------------------- 🤗 Tokenizers is available on `crates.io <https://crates.io/crates/tokenizers>`__. You just need to add it to your :obj:`Cargo.toml`:: tokenizers = "0.10"
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/installation/python.inc
🤗 Tokenizers is tested on Python 3.5+. You should install 🤗 Tokenizers in a `virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with Python virtual environments, check out the `user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__. C...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/installation/node.inc
Installation with npm ---------------------------------------------------------------------------------------------------- You can simply install 🤗 Tokenizers with npm using:: npm install tokenizers
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/_ext/rust_doc.py
from docutils import nodes import sphinx from sphinx.locale import _ from conf import rust_version logger = sphinx.util.logging.getLogger(__name__) class RustRef: def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]): doctype = name.split("_")[1] parts = text.split(":...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/_ext/toctree_tags.py
import re from sphinx.directives.other import TocTree class TocTreeTags(TocTree): hasPat = re.compile("^\s*:(.+):(.+)$") def filter_entries(self, entries): filtered = [] for e in entries: m = self.hasPat.match(e) if m != None: if self.env.app.tags.has(m...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/_ext/entities.py
from collections import defaultdict, abc from typing import cast from docutils import nodes from docutils.parsers.rst import Directive import sphinx from sphinx.locale import _ from sphinx.util.docutils import SphinxDirective from sphinx.errors import ExtensionError from conf import languages as LANGUAGES logger = ...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/api/rust.inc
Documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Rust API Reference is available directly on the `Docs.rs <https://docs.rs/tokenizers>`__ website.
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/api/reference.rst
.. only:: python .. include:: python.inc .. only:: rust .. include:: rust.inc .. only:: node .. include:: node.inc
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/api/python.inc
Input sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These types represent all the different kinds of sequence that can be used as input of a Tokenizer. Globally, any sequence can be either a string or a list of strings, according to the operating mode of...
0
hf_public_repos/tokenizers/docs/source
hf_public_repos/tokenizers/docs/source/api/node.inc
Documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The node API has not been documented yet.
0
hf_public_repos/tokenizers/docs/source/tutorials
hf_public_repos/tokenizers/docs/source/tutorials/python/training_from_memory.rst
Training from memory ---------------------------------------------------------------------------------------------------- In the `Quicktour <quicktour>`__, we saw how to build and train a tokenizer using text files, but we can actually use any Python Iterator. In this section we'll see a few different ways of training...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/index.mdx
<!-- DISABLE-FRONTMATTER-SECTIONS --> # Tokenizers Fast State-of-the-art tokenizers, optimized for both research and production [🤗 Tokenizers](https://github.com/huggingface/tokenizers) provides an implementation of today's most used tokenizers, with a focus on performance and versatility. These tokenizers are also...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/_toctree.yml
- sections: - local: index title: 🤗 Tokenizers - local: quicktour title: Quicktour - local: installation title: Installation - local: pipeline title: The tokenization pipeline - local: components title: Components - local: training_from_memory title: Training from memory title: G...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/training_from_memory.mdx
# Training from memory In the [Quicktour](quicktour), we saw how to build and train a tokenizer using text files, but we can actually use any Python Iterator. In this section we'll see a few different ways of training our tokenizer. For all the examples listed below, we'll use the same [`~tokenizers.Tokenizer`] and [...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/pipeline.mdx
# The tokenization pipeline When calling `Tokenizer.encode` or `Tokenizer.encode_batch`, the input text(s) go through the following pipeline: - `normalization` - `pre-tokenization` - `model` - `post-processing` We'll see in details what happens during each of those steps in detail, as well as when you want t...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/quicktour.mdx
# Quicktour Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. ## Build a tokenizer from scratch To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on [wikitext-...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/components.mdx
# Components When building a Tokenizer, you can attach various types of components to this Tokenizer in order to customize its behavior. This page lists most provided components. ## Normalizers A `Normalizer` is in charge of pre-processing the input string in order to normalize it as relevant for a given use case. S...
0
hf_public_repos/tokenizers/docs
hf_public_repos/tokenizers/docs/source-doc-builder/installation.mdx
# Installation <tokenizerslangcontent> <python> 🤗 Tokenizers is tested on Python 3.5+. You should install 🤗 Tokenizers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/instal...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/pre-tokenizers.mdx
# Pre-tokenizers <tokenizerslangcontent> <python> ## BertPreTokenizer [[autodoc]] tokenizers.pre_tokenizers.BertPreTokenizer ## ByteLevel [[autodoc]] tokenizers.pre_tokenizers.ByteLevel ## CharDelimiterSplit [[autodoc]] tokenizers.pre_tokenizers.CharDelimiterSplit ## Digits [[autodoc]] tokenizers.pre_tokenizers...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/input-sequences.mdx
# Input Sequences <tokenizerslangcontent> <python> These types represent all the different kinds of sequence that can be used as input of a Tokenizer. Globally, any sequence can be either a string or a list of strings, according to the operating mode of the tokenizer: `raw text` vs `pre-tokenized`. ## TextInputSequen...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/normalizers.mdx
# Normalizers <tokenizerslangcontent> <python> ## BertNormalizer [[autodoc]] tokenizers.normalizers.BertNormalizer ## Lowercase [[autodoc]] tokenizers.normalizers.Lowercase ## NFC [[autodoc]] tokenizers.normalizers.NFC ## NFD [[autodoc]] tokenizers.normalizers.NFD ## NFKC [[autodoc]] tokenizers.normalizers.NF...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/encoding.mdx
# Encoding <tokenizerslangcontent> <python> ## Encoding [[autodoc]] tokenizers.Encoding - all - attention_mask - ids - n_sequences - offsets - overflowing - sequence_ids - special_tokens_mask - tokens - type_ids - word_ids - words </python> <rust> The Rust API Reference...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/added-tokens.mdx
# Added Tokens <tokenizerslangcontent> <python> ## AddedToken [[autodoc]] tokenizers.AddedToken - content - lstrip - normalized - rstrip - single_word </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <nod...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/models.mdx
# Models <tokenizerslangcontent> <python> ## BPE [[autodoc]] tokenizers.models.BPE ## Model [[autodoc]] tokenizers.models.Model ## Unigram [[autodoc]] tokenizers.models.Unigram ## WordLevel [[autodoc]] tokenizers.models.WordLevel ## WordPiece [[autodoc]] tokenizers.models.WordPiece </python> <rust> The Rust A...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/visualizer.mdx
# Visualizer <tokenizerslangcontent> <python> ## Annotation [[autodoc]] tokenizers.tools.Annotation ## EncodingVisualizer [[autodoc]] tokenizers.tools.EncodingVisualizer - __call__ </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) webs...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/encode-inputs.mdx
# Encode Inputs <tokenizerslangcontent> <python> These types represent all the different kinds of input that a [`~tokenizers.Tokenizer`] accepts when using [`~tokenizers.Tokenizer.encode_batch`]. ## TextEncodeInput[[[[tokenizers.TextEncodeInput]]]] <code>tokenizers.TextEncodeInput</code> Represents a textual input ...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/post-processors.mdx
# Post-processors <tokenizerslangcontent> <python> ## BertProcessing [[autodoc]] tokenizers.processors.BertProcessing ## ByteLevel [[autodoc]] tokenizers.processors.ByteLevel ## RobertaProcessing [[autodoc]] tokenizers.processors.RobertaProcessing ## TemplateProcessing [[autodoc]] tokenizers.processors.Template...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/decoders.mdx
# Decoders <tokenizerslangcontent> <python> ## BPEDecoder [[autodoc]] tokenizers.decoders.BPEDecoder ## ByteLevel [[autodoc]] tokenizers.decoders.ByteLevel ## CTC [[autodoc]] tokenizers.decoders.CTC ## Metaspace [[autodoc]] tokenizers.decoders.Metaspace ## WordPiece [[autodoc]] tokenizers.decoders.WordPiece <...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/tokenizer.mdx
# Tokenizer <tokenizerslangcontent> <python> ## Tokenizer [[autodoc]] tokenizers.Tokenizer - all - decoder - model - normalizer - padding - post_processor - pre_tokenizer - truncation </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokeniz...
0
hf_public_repos/tokenizers/docs/source-doc-builder
hf_public_repos/tokenizers/docs/source-doc-builder/api/trainers.mdx
# Trainers <tokenizerslangcontent> <python> ## BpeTrainer [[autodoc]] tokenizers.trainers.BpeTrainer ## UnigramTrainer [[autodoc]] tokenizers.trainers.UnigramTrainer ## WordLevelTrainer [[autodoc]] tokenizers.trainers.WordLevelTrainer ## WordPieceTrainer [[autodoc]] tokenizers.trainers.WordPieceTrainer </python...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/Cargo.toml
[package] name = "tokenizers-python" version = "0.15.1-dev.0" authors = ["Anthony MOI <m.anthony.moi@gmail.com>"] edition = "2021" [lib] name = "tokenizers" crate-type = ["cdylib"] [dependencies] rayon = "1.8" serde = { version = "1.0", features = [ "rc", "derive" ]} serde_json = "1.0" libc = "0.2" env_logger = "0.10...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/conftest.py
import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", default=False, help="run slow tests") def pytest_configure(config): config.addinivalue_line("markers", "slow: mark test as slow to run") def pytest_collection_modifyitems(config, items): if config.getoption(...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/rust-toolchain
stable
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/CHANGELOG.md
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - [#1096] Python 3.11 support ## [0.13.1] - [#1072]...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/Makefile
.PHONY: style check-style test DATA_DIR = data dir_guard=@mkdir -p $(@D) check_dirs := examples py_src/tokenizers tests # Format source code automatically style: python stub.py black --line-length 119 --target-version py35 $(check_dirs) # Check the source code is formatted correctly check-style: python stub.py -...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/pyproject.toml
[project] name = 'tokenizers' requires-python = '>=3.7' authors = [ {name = 'Nicolas Patry', email = 'patry.nicolas@protonmail.com'}, {name = 'Anthony Moi', email = 'anthony@huggingface.co'} ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audie...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/MANIFEST.in
include Cargo.toml include pyproject.toml include rust-toolchain include ../../LICENSE recursive-include src * recursive-include tokenizers-lib * recursive-exclude tokenizers-lib/target *
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/README.md
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <a href="https://badge.fury.io/py/tokenizers"> <img alt="Build" src="https://badge.fury.io/py/tokenizers.svg"> </a> <a href="https://github.c...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/setup.cfg
[isort] default_section = FIRSTPARTY ensure_newline_before_comments = True force_grid_wrap = 0 include_trailing_comma = True known_first_party = transformers known_third_party = absl conllu datasets elasticsearch fairseq faiss-cpu fastprogress fire fugashi git h5py matplo...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/test.txt
<DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT> <DOCUMENT> \test{bla} thisisatest </DOCUMENT...
0
hf_public_repos/tokenizers/bindings
hf_public_repos/tokenizers/bindings/python/stub.py
import argparse import inspect import os from pathlib import Path import black INDENT = " " * 4 GENERATED_COMMENT = "# Generated content DO NOT EDIT\n" def do_indent(text: str, indent: str): return text.replace("\n", f"\n{indent}") def function(obj, indent, text_signature=None): if text_signature is None...
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.pyi
# Generated content DO NOT EDIT class AddedToken: """ Represents a token that can be be added to a :class:`~tokenizers.Tokenizer`. It can have special options that defines the way it should behave. Args: content (:obj:`str`): The content of the token single_word (:obj:`bool`, defaults ...
0
hf_public_repos/tokenizers/bindings/python/py_src
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/__init__.py
from enum import Enum from typing import List, Tuple, Union Offsets = Tuple[int, int] TextInputSequence = str """A :obj:`str` that represents an input sequence """ PreTokenizedInputSequence = Union[List[str], Tuple[str]] """A pre-tokenized input sequence. Can be one of: - A :obj:`List` of :obj:`str` - A :o...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.pyi
# Generated content DO NOT EDIT class Normalizer: """ Base class for all normalizers This class is not supposed to be instantiated directly. Instead, any implementation of a Normalizer will return an instance of this class when instantiated. """ def normalize(self, normalized): """ ...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py
from .. import normalizers Normalizer = normalizers.Normalizer BertNormalizer = normalizers.BertNormalizer NFD = normalizers.NFD NFKD = normalizers.NFKD NFC = normalizers.NFC NFKC = normalizers.NFKC Sequence = normalizers.Sequence Lowercase = normalizers.Lowercase Prepend = normalizers.Prepend Strip = normalizers.Str...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgb...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/__init__.py
from .visualizer import Annotation, EncodingVisualizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/tools/visualizer.py
import itertools import os import re from string import Template from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple from tokenizers import Encoding, Tokenizer dirname = os.path.dirname(__file__) css_filename = os.path.join(dirname, "visualizer-styles.css") with open(css_filename) as f: css...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ de...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/models/__init__.py
# Generated content DO NOT EDIT from .. import models Model = models.Model BPE = models.BPE Unigram = models.Unigram WordLevel = models.WordLevel WordPiece = models.WordPiece
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.pyi
# Generated content DO NOT EDIT class Trainer: """ Base class for all trainers This class is not supposed to be instantiated directly. Instead, any implementation of a Trainer will return an instance of this class when instantiated. """ class BpeTrainer(Trainer): """ Trainer capable of tra...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/trainers/__init__.py
# Generated content DO NOT EDIT from .. import trainers Trainer = trainers.Trainer BpeTrainer = trainers.BpeTrainer UnigramTrainer = trainers.UnigramTrainer WordLevelTrainer = trainers.WordLevelTrainer WordPieceTrainer = trainers.WordPieceTrainer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi
# Generated content DO NOT EDIT class PostProcessor: """ Base class for all post-processors This class is not supposed to be instantiated directly. Instead, any implementation of a PostProcessor will return an instance of this class when instantiated. """ def num_special_tokens_to_add(self, is...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/processors/__init__.py
# Generated content DO NOT EDIT from .. import processors PostProcessor = processors.PostProcessor BertProcessing = processors.BertProcessing ByteLevel = processors.ByteLevel RobertaProcessing = processors.RobertaProcessing Sequence = processors.Sequence TemplateProcessing = processors.TemplateProcessing
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.pyi
# Generated content DO NOT EDIT class Decoder: """ Base class for all decoders This class is not supposed to be instantiated directly. Instead, any implementation of a Decoder will return an instance of this class when instantiated. """ def decode(self, tokens): """ Decode the ...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = dec...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers from tokenizers.models import BPE from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer ...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py
import json import os from typing import Iterator, List, Optional, Union, Tuple from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.models import Unigram from .base_tokenizer import BaseTokenizer class SentencePieceUnigramTokenizer(BaseTokenizer): ...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/sentencepiece_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from tokenizers.models import BPE from tokenizers.normalizers import NFKC from .base_tokenizer import BaseTokenizer class SentencePieceBPETokenizer(BaseTokenizer): """...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/base_tokenizer.py
from typing import Dict, List, Optional, Tuple, Union from tokenizers import AddedToken, EncodeInput, Encoding, InputSequence, Tokenizer from tokenizers.decoders import Decoder from tokenizers.models import Model from tokenizers.normalizers import Normalizer from tokenizers.pre_tokenizers import PreTokenizer from toke...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/char_level_bpe.py
from typing import Dict, Iterator, List, Optional, Tuple, Union from .. import AddedToken, Tokenizer, decoders, pre_tokenizers, trainers from ..models import BPE from ..normalizers import BertNormalizer, Lowercase, Sequence, unicode_normalizer_from_str from .base_tokenizer import BaseTokenizer class CharBPETokenizer...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/implementations/bert_wordpiece.py
from typing import Dict, Iterator, List, Optional, Union from tokenizers import AddedToken, Tokenizer, decoders, trainers from tokenizers.models import WordPiece from tokenizers.normalizers import BertNormalizer from tokenizers.pre_tokenizers import BertPreTokenizer from tokenizers.processors import BertProcessing fr...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.pyi
# Generated content DO NOT EDIT class PreTokenizer: """ Base class for all pre-tokenizers This class is not supposed to be instantiated directly. Instead, any implementation of a PreTokenizer will return an instance of this class when instantiated. """ def pre_tokenize(self, pretok): "...
0
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers
hf_public_repos/tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Pun...
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/utils.py
import multiprocessing as mp import os import pytest import requests DATA_PATH = os.path.join("tests", "data") def download(url, with_filename=None): filename = with_filename if with_filename is not None else url.rsplit("/")[-1] filepath = os.path.join(DATA_PATH, filename) if not os.path.exists(filepa...
0
hf_public_repos/tokenizers/bindings/python
hf_public_repos/tokenizers/bindings/python/tests/test_serialization.py
import json import os import unittest import tqdm from huggingface_hub import HfApi, cached_download, hf_hub_url from tokenizers import Tokenizer from .utils import albert_base, data_dir class TestSerialization: def test_full_serialization_albert(self, albert_base): # Check we can read this file. ...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py
import gzip import os import datasets import pytest from ..utils import data_dir, train_files class TestTrainFromIterators: @staticmethod def get_tokenizer_trainer(): # START init_tokenizer_trainer from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers ...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_quicktour.py
from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import Whitespace from tokenizers.trainers import BpeTrainer from ..utils import data_dir, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: ...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/documentation/test_pipeline.py
from tokenizers import Tokenizer from ..utils import data_dir, doc_pipeline_bert_tokenizer, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestPipeline: def test_pipeline(self, doc_wiki_t...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_bert_wordpiece.py
import pytest from tokenizers import BertWordPieceTokenizer from ..utils import bert_files, data_dir, multiprocessing_with_parallelism class TestBertWordPieceTokenizer: def test_basic_encode(self, bert_files): tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"]) # Encode with speci...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py
import pytest from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors from tokenizers.implementations import BaseTokenizer class TestBaseTokenizer: def test_get_set_components(self): toki = Tokenizer(models.BPE()) toki.normalizer = normalizers.NFC() tok...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_char_bpe.py
import pytest from tokenizers import CharBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, openai_files class TestCharBPETokenizer: def test_basic_encode(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) output ...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_sentencepiece.py
import os import pytest from tokenizers import SentencePieceBPETokenizer, SentencePieceUnigramTokenizer class TestSentencePieceBPE: def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = SentencePieceBPETokenizer() tokenizer.trai...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/implementations/test_byte_level_bpe.py
import pytest from tokenizers import ByteLevelBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, roberta_files class TestByteLevelBPE: def test_basic_encode(self, roberta_files): tokenizer = ByteLevelBPETokenizer.from_file(roberta_files["vocab"], roberta_files["merges"]) ...
0
hf_public_repos/tokenizers/bindings/python/tests
hf_public_repos/tokenizers/bindings/python/tests/bindings/test_processors.py
import json import pickle import pytest from tokenizers import Tokenizer from tokenizers.models import BPE from tokenizers.pre_tokenizers import ByteLevel as ByteLevelPreTokenizer from tokenizers.processors import ( BertProcessing, ByteLevel, PostProcessor, RobertaProcessing, Sequence, Templat...
0