metadata
dataset_info:
features:
- name: _id
dtype: large_string
- name: hash
dtype: large_string
- name: vocab_hash
dtype: large_string
- name: vocab_size
dtype: int64
- name: model_type
dtype: large_string
- name: num_merges
dtype: float64
- name: has_normalizer
dtype: bool
- name: has_pre_tokenizer
dtype: bool
- name: has_post_processor
dtype: bool
- name: has_decoder
dtype: bool
- name: num_added_tokens
dtype: int64
- name: normalizer_type
dtype: large_string
- name: pre_tokenizer_type
dtype: large_string
- name: decoder_type
dtype: large_string
- name: normalizer_types
list: string
- name: pre_tokenizer_types
list: string
- name: decoder_types
list: string
- name: tokenizer_id
dtype: int64
- name: id
dtype: large_string
- name: author
dtype: string
- name: base_models
struct:
- name: models
list:
- name: _id
dtype: string
- name: id
dtype: string
- name: relation
dtype: string
- name: downloads
dtype: int64
- name: downloads_all_time
dtype: int64
- name: gated
dtype: string
- name: created_at
dtype: timestamp[us, tz=UTC]
- name: last_modified
dtype: timestamp[us, tz=UTC]
- name: library_name
dtype: string
- name: likes
dtype: int64
- name: trending_score
dtype: float64
- name: model_index
dtype: string
- name: pipeline_tag
dtype: string
- name: safetensors
dtype: string
- name: siblings
list: string
- name: sha
dtype: string
- name: tags
list: string
- name: gguf
dtype: string
- name: config
dtype: string
- name: transformers_info
struct:
- name: auto_model
dtype: string
- name: custom_class
dtype: string
- name: pipeline_tag
dtype: string
- name: processor
dtype: string
- name: card_data
dtype: string
- name: card
dtype: string
- name: spaces
dtype: 'null'
- name: licenses
list: string
- name: datasets
list: string
- name: languages
list: string
- name: safetensors_params
dtype: float64
- name: gguf_params
dtype: float64
- name: metrics
list: string
- name: architectures
list: string
- name: tasks
list: string
- name: modalities
list: string
- name: input_modalities
list: string
- name: output_modalities
list: string
splits:
- name: train
num_bytes: 2184348884
num_examples: 423650
download_size: 802260300
dataset_size: 2184348884
configs:
- config_name: default
data_files:
- split: train
path: data/train-*